From 027e11acf7a1e9ee264f957c1b4f58c2c4e16200 Mon Sep 17 00:00:00 2001 From: meorphis Date: Wed, 15 May 2024 17:00:05 -0400 Subject: [PATCH 001/192] chore(internal): remove some custom code from branch --- examples/.keep | 4 - examples/assistant.py | 38 - examples/assistant_stream.py | 33 - examples/assistant_stream_helpers.py | 78 -- examples/async_demo.py | 22 - examples/audio.py | 64 -- examples/azure.py | 43 - examples/azure_ad.py | 30 - examples/demo.py | 53 -- examples/module_client.py | 25 - examples/picture.py | 21 - examples/streaming.py | 56 -- src/openai/lib/.keep | 4 - src/openai/lib/_old_api.py | 72 -- src/openai/lib/_validators.py | 809 ------------------ src/openai/lib/azure.py | 542 ------------ src/openai/lib/streaming/__init__.py | 8 - src/openai/lib/streaming/_assistants.py | 1035 ----------------------- 18 files changed, 2937 deletions(-) delete mode 100644 examples/.keep delete mode 100644 examples/assistant.py delete mode 100644 examples/assistant_stream.py delete mode 100644 examples/assistant_stream_helpers.py delete mode 100755 examples/async_demo.py delete mode 100755 examples/audio.py delete mode 100755 examples/azure.py delete mode 100755 examples/azure_ad.py delete mode 100755 examples/demo.py delete mode 100755 examples/module_client.py delete mode 100644 examples/picture.py delete mode 100755 examples/streaming.py delete mode 100644 src/openai/lib/.keep delete mode 100644 src/openai/lib/_old_api.py delete mode 100644 src/openai/lib/_validators.py delete mode 100644 src/openai/lib/azure.py delete mode 100644 src/openai/lib/streaming/__init__.py delete mode 100644 src/openai/lib/streaming/_assistants.py diff --git a/examples/.keep b/examples/.keep deleted file mode 100644 index d8c73e937a..0000000000 --- a/examples/.keep +++ /dev/null @@ -1,4 +0,0 @@ -File generated from our OpenAPI spec by Stainless. - -This directory can be used to store example files demonstrating usage of this SDK. -It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/examples/assistant.py b/examples/assistant.py deleted file mode 100644 index 0631494ecc..0000000000 --- a/examples/assistant.py +++ /dev/null @@ -1,38 +0,0 @@ - -import openai - -# gets API Key from environment variable OPENAI_API_KEY -client = openai.OpenAI() - -assistant = client.beta.assistants.create( - name="Math Tutor", - instructions="You are a personal math tutor. Write and run code to answer math questions.", - tools=[{"type": "code_interpreter"}], - model="gpt-4-1106-preview", -) - -thread = client.beta.threads.create() - -message = client.beta.threads.messages.create( - thread_id=thread.id, - role="user", - content="I need to solve the equation `3x + 11 = 14`. Can you help me?", -) - -run = client.beta.threads.runs.create_and_poll( - thread_id=thread.id, - assistant_id=assistant.id, - instructions="Please address the user as Jane Doe. The user has a premium account.", -) - -print("Run completed with status: " + run.status) - -if run.status == "completed": - messages = client.beta.threads.messages.list(thread_id=thread.id) - - print("messages: ") - for message in messages: - assert message.content[0].type == "text" - print({"role": message.role, "message": message.content[0].text.value}) - - client.beta.assistants.delete(assistant.id) diff --git a/examples/assistant_stream.py b/examples/assistant_stream.py deleted file mode 100644 index 0465d3930f..0000000000 --- a/examples/assistant_stream.py +++ /dev/null @@ -1,33 +0,0 @@ -import openai - -# gets API Key from environment variable OPENAI_API_KEY -client = openai.OpenAI() - -assistant = client.beta.assistants.create( - name="Math Tutor", - instructions="You are a personal math tutor. Write and run code to answer math questions.", - tools=[{"type": "code_interpreter"}], - model="gpt-4-1106-preview", -) - -thread = client.beta.threads.create() - -message = client.beta.threads.messages.create( - thread_id=thread.id, - role="user", - content="I need to solve the equation `3x + 11 = 14`. Can you help me?", -) - -print("starting run stream") - -stream = client.beta.threads.runs.create( - thread_id=thread.id, - assistant_id=assistant.id, - instructions="Please address the user as Jane Doe. The user has a premium account.", - stream=True, -) - -for event in stream: - print(event.model_dump_json(indent=2, exclude_unset=True)) - -client.beta.assistants.delete(assistant.id) diff --git a/examples/assistant_stream_helpers.py b/examples/assistant_stream_helpers.py deleted file mode 100644 index 7baec77c72..0000000000 --- a/examples/assistant_stream_helpers.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import annotations - -from typing_extensions import override - -import openai -from openai import AssistantEventHandler -from openai.types.beta import AssistantStreamEvent -from openai.types.beta.threads import Text, TextDelta -from openai.types.beta.threads.runs import RunStep, RunStepDelta - - -class EventHandler(AssistantEventHandler): - @override - def on_event(self, event: AssistantStreamEvent) -> None: - if event.event == "thread.run.step.created": - details = event.data.step_details - if details.type == "tool_calls": - print("Generating code to interpret:\n\n```py") - elif event.event == "thread.message.created": - print("\nResponse:\n") - - @override - def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None: - print(delta.value, end="", flush=True) - - @override - def on_run_step_done(self, run_step: RunStep) -> None: - details = run_step.step_details - if details.type == "tool_calls": - for tool in details.tool_calls: - if tool.type == "code_interpreter": - print("\n```\nExecuting code...") - - @override - def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None: - details = delta.step_details - if details is not None and details.type == "tool_calls": - for tool in details.tool_calls or []: - if tool.type == "code_interpreter" and tool.code_interpreter and tool.code_interpreter.input: - print(tool.code_interpreter.input, end="", flush=True) - - -def main() -> None: - client = openai.OpenAI() - - assistant = client.beta.assistants.create( - name="Math Tutor", - instructions="You are a personal math tutor. Write and run code to answer math questions.", - tools=[{"type": "code_interpreter"}], - model="gpt-4-1106-preview", - ) - - try: - question = "I need to solve the equation `3x + 11 = 14`. Can you help me?" - - thread = client.beta.threads.create( - messages=[ - { - "role": "user", - "content": question, - }, - ] - ) - print(f"Question: {question}\n") - - with client.beta.threads.runs.stream( - thread_id=thread.id, - assistant_id=assistant.id, - instructions="Please address the user as Jane Doe. The user has a premium account.", - event_handler=EventHandler(), - ) as stream: - stream.until_done() - print() - finally: - client.beta.assistants.delete(assistant.id) - - -main() diff --git a/examples/async_demo.py b/examples/async_demo.py deleted file mode 100755 index 793b4e43fb..0000000000 --- a/examples/async_demo.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env -S poetry run python - -import asyncio - -from openai import AsyncOpenAI - -# gets API Key from environment variable OPENAI_API_KEY -client = AsyncOpenAI() - - -async def main() -> None: - stream = await client.completions.create( - model="gpt-3.5-turbo-instruct", - prompt="Say this is a test", - stream=True, - ) - async for completion in stream: - print(completion.choices[0].text, end="") - print() - - -asyncio.run(main()) diff --git a/examples/audio.py b/examples/audio.py deleted file mode 100755 index 85f47bfb06..0000000000 --- a/examples/audio.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env rye run python - -import time -from pathlib import Path - -from openai import OpenAI - -# gets OPENAI_API_KEY from your environment variables -openai = OpenAI() - -speech_file_path = Path(__file__).parent / "speech.mp3" - - -def main() -> None: - stream_to_speakers() - - # Create text-to-speech audio file - with openai.audio.speech.with_streaming_response.create( - model="tts-1", - voice="alloy", - input="the quick brown fox jumped over the lazy dogs", - ) as response: - response.stream_to_file(speech_file_path) - - # Create transcription from audio file - transcription = openai.audio.transcriptions.create( - model="whisper-1", - file=speech_file_path, - ) - print(transcription.text) - - # Create translation from audio file - translation = openai.audio.translations.create( - model="whisper-1", - file=speech_file_path, - ) - print(translation.text) - - -def stream_to_speakers() -> None: - import pyaudio - - player_stream = pyaudio.PyAudio().open(format=pyaudio.paInt16, channels=1, rate=24000, output=True) - - start_time = time.time() - - with openai.audio.speech.with_streaming_response.create( - model="tts-1", - voice="alloy", - response_format="pcm", # similar to WAV, but without a header chunk at the start. - input="""I see skies of blue and clouds of white - The bright blessed days, the dark sacred nights - And I think to myself - What a wonderful world""", - ) as response: - print(f"Time to first byte: {int((time.time() - start_time) * 1000)}ms") - for chunk in response.iter_bytes(chunk_size=1024): - player_stream.write(chunk) - - print(f"Done in {int((time.time() - start_time) * 1000)}ms.") - - -if __name__ == "__main__": - main() diff --git a/examples/azure.py b/examples/azure.py deleted file mode 100755 index 6936c4cb0e..0000000000 --- a/examples/azure.py +++ /dev/null @@ -1,43 +0,0 @@ -from openai import AzureOpenAI - -# may change in the future -# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning -api_version = "2023-07-01-preview" - -# gets the API Key from environment variable AZURE_OPENAI_API_KEY -client = AzureOpenAI( - api_version=api_version, - # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource - azure_endpoint="https://example-endpoint.openai.azure.com/", -) - -completion = client.chat.completions.create( - model="deployment-name", # e.g. gpt-35-instant - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], -) -print(completion.to_json()) - - -deployment_client = AzureOpenAI( - api_version=api_version, - # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource - azure_endpoint="https://example-resource.azure.openai.com/", - # Navigate to the Azure OpenAI Studio to deploy a model. - azure_deployment="deployment-name", # e.g. gpt-35-instant -) - -completion = deployment_client.chat.completions.create( - model="", - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], -) -print(completion.to_json()) diff --git a/examples/azure_ad.py b/examples/azure_ad.py deleted file mode 100755 index 1b0d81863d..0000000000 --- a/examples/azure_ad.py +++ /dev/null @@ -1,30 +0,0 @@ -from azure.identity import DefaultAzureCredential, get_bearer_token_provider - -from openai import AzureOpenAI - -token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") - - -# may change in the future -# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning -api_version = "2023-07-01-preview" - -# https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource -endpoint = "https://my-resource.openai.azure.com/" - -client = AzureOpenAI( - api_version=api_version, - azure_endpoint=endpoint, - azure_ad_token_provider=token_provider, -) - -completion = client.chat.completions.create( - model="deployment-name", # e.g. gpt-35-instant - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], -) -print(completion.to_json()) diff --git a/examples/demo.py b/examples/demo.py deleted file mode 100755 index ac1710f3e0..0000000000 --- a/examples/demo.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env -S poetry run python - -from openai import OpenAI - -# gets API Key from environment variable OPENAI_API_KEY -client = OpenAI() - -# Non-streaming: -print("----- standard request -----") -completion = client.chat.completions.create( - model="gpt-4", - messages=[ - { - "role": "user", - "content": "Say this is a test", - }, - ], -) -print(completion.choices[0].message.content) - -# Streaming: -print("----- streaming request -----") -stream = client.chat.completions.create( - model="gpt-4", - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], - stream=True, -) -for chunk in stream: - if not chunk.choices: - continue - - print(chunk.choices[0].delta.content, end="") -print() - -# Response headers: -print("----- custom response headers test -----") -response = client.chat.completions.with_raw_response.create( - model="gpt-4", - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], -) -completion = response.parse() -print(response.request_id) -print(completion.choices[0].message.content) diff --git a/examples/module_client.py b/examples/module_client.py deleted file mode 100755 index 5f2fb79dcf..0000000000 --- a/examples/module_client.py +++ /dev/null @@ -1,25 +0,0 @@ -import openai - -# will default to `os.environ['OPENAI_API_KEY']` if not explicitly set -openai.api_key = "..." - -# all client options can be configured just like the `OpenAI` instantiation counterpart -openai.base_url = "https://.../" -openai.default_headers = {"x-foo": "true"} - -# all API calls work in the exact same fashion as well -stream = openai.chat.completions.create( - model="gpt-4", - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], - stream=True, -) - -for chunk in stream: - print(chunk.choices[0].delta.content or "", end="", flush=True) - -print() diff --git a/examples/picture.py b/examples/picture.py deleted file mode 100644 index c27b52b0da..0000000000 --- a/examples/picture.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python - -from openai import OpenAI - -# gets OPENAI_API_KEY from your environment variables -openai = OpenAI() - -prompt = "An astronaut lounging in a tropical resort in space, pixel art" -model = "dall-e-3" - - -def main() -> None: - # Generate an image based on the prompt - response = openai.images.generate(prompt=prompt, model=model) - - # Prints response containing a URL link to image - print(response) - - -if __name__ == "__main__": - main() diff --git a/examples/streaming.py b/examples/streaming.py deleted file mode 100755 index 9a84891a83..0000000000 --- a/examples/streaming.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env -S poetry run python - -import asyncio - -from openai import OpenAI, AsyncOpenAI - -# This script assumes you have the OPENAI_API_KEY environment variable set to a valid OpenAI API key. -# -# You can run this script from the root directory like so: -# `python examples/streaming.py` - - -def sync_main() -> None: - client = OpenAI() - response = client.completions.create( - model="gpt-3.5-turbo-instruct", - prompt="1,2,3,", - max_tokens=5, - temperature=0, - stream=True, - ) - - # You can manually control iteration over the response - first = next(response) - print(f"got response data: {first.to_json()}") - - # Or you could automatically iterate through all of data. - # Note that the for loop will not exit until *all* of the data has been processed. - for data in response: - print(data.to_json()) - - -async def async_main() -> None: - client = AsyncOpenAI() - response = await client.completions.create( - model="gpt-3.5-turbo-instruct", - prompt="1,2,3,", - max_tokens=5, - temperature=0, - stream=True, - ) - - # You can manually control iteration over the response. - # In Python 3.10+ you can also use the `await anext(response)` builtin instead - first = await response.__anext__() - print(f"got response data: {first.to_json()}") - - # Or you could automatically iterate through all of data. - # Note that the for loop will not exit until *all* of the data has been processed. - async for data in response: - print(data.to_json()) - - -sync_main() - -asyncio.run(async_main()) diff --git a/src/openai/lib/.keep b/src/openai/lib/.keep deleted file mode 100644 index 5e2c99fdbe..0000000000 --- a/src/openai/lib/.keep +++ /dev/null @@ -1,4 +0,0 @@ -File generated from our OpenAPI spec by Stainless. - -This directory can be used to store custom files to expand the SDK. -It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/openai/lib/_old_api.py b/src/openai/lib/_old_api.py deleted file mode 100644 index 929c87e80b..0000000000 --- a/src/openai/lib/_old_api.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Any -from typing_extensions import override - -from .._utils import LazyProxy -from .._exceptions import OpenAIError - -INSTRUCTIONS = """ - -You tried to access openai.{symbol}, but this is no longer supported in openai>=1.0.0 - see the README at https://github.com/openai/openai-python for the API. - -You can run `openai migrate` to automatically upgrade your codebase to use the 1.0.0 interface. - -Alternatively, you can pin your installation to the old version, e.g. `pip install openai==0.28` - -A detailed migration guide is available here: https://github.com/openai/openai-python/discussions/742 -""" - - -class APIRemovedInV1(OpenAIError): - def __init__(self, *, symbol: str) -> None: - super().__init__(INSTRUCTIONS.format(symbol=symbol)) - - -class APIRemovedInV1Proxy(LazyProxy[Any]): - def __init__(self, *, symbol: str) -> None: - super().__init__() - self._symbol = symbol - - @override - def __load__(self) -> Any: - # return the proxy until it is eventually called so that - # we don't break people that are just checking the attributes - # of a module - return self - - def __call__(self, *_args: Any, **_kwargs: Any) -> Any: - raise APIRemovedInV1(symbol=self._symbol) - - -SYMBOLS = [ - "Edit", - "File", - "Audio", - "Image", - "Model", - "Engine", - "Customer", - "FineTune", - "Embedding", - "Completion", - "Deployment", - "Moderation", - "ErrorObject", - "FineTuningJob", - "ChatCompletion", -] - -# we explicitly tell type checkers that nothing is exported -# from this file so that when we re-export the old symbols -# in `openai/__init__.py` they aren't added to the auto-complete -# suggestions given by editors -if TYPE_CHECKING: - __all__: list[str] = [] -else: - __all__ = SYMBOLS - - -__locals = locals() -for symbol in SYMBOLS: - __locals[symbol] = APIRemovedInV1Proxy(symbol=symbol) diff --git a/src/openai/lib/_validators.py b/src/openai/lib/_validators.py deleted file mode 100644 index cf24cd2294..0000000000 --- a/src/openai/lib/_validators.py +++ /dev/null @@ -1,809 +0,0 @@ -# pyright: basic -from __future__ import annotations - -import os -import sys -from typing import Any, TypeVar, Callable, Optional, NamedTuple -from typing_extensions import TypeAlias - -from .._extras import pandas as pd - - -class Remediation(NamedTuple): - name: str - immediate_msg: Optional[str] = None - necessary_msg: Optional[str] = None - necessary_fn: Optional[Callable[[Any], Any]] = None - optional_msg: Optional[str] = None - optional_fn: Optional[Callable[[Any], Any]] = None - error_msg: Optional[str] = None - - -OptionalDataFrameT = TypeVar("OptionalDataFrameT", bound="Optional[pd.DataFrame]") - - -def num_examples_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will only print out the number of examples and recommend to the user to increase the number of examples if less than 100. - """ - MIN_EXAMPLES = 100 - optional_suggestion = ( - "" - if len(df) >= MIN_EXAMPLES - else ". In general, we recommend having at least a few hundred examples. We've found that performance tends to linearly increase for every doubling of the number of examples" - ) - immediate_msg = f"\n- Your file contains {len(df)} prompt-completion pairs{optional_suggestion}" - return Remediation(name="num_examples", immediate_msg=immediate_msg) - - -def necessary_column_validator(df: pd.DataFrame, necessary_column: str) -> Remediation: - """ - This validator will ensure that the necessary column is present in the dataframe. - """ - - def lower_case_column(df: pd.DataFrame, column: Any) -> pd.DataFrame: - cols = [c for c in df.columns if str(c).lower() == column] - df.rename(columns={cols[0]: column.lower()}, inplace=True) - return df - - immediate_msg = None - necessary_fn = None - necessary_msg = None - error_msg = None - - if necessary_column not in df.columns: - if necessary_column in [str(c).lower() for c in df.columns]: - - def lower_case_column_creator(df: pd.DataFrame) -> pd.DataFrame: - return lower_case_column(df, necessary_column) - - necessary_fn = lower_case_column_creator - immediate_msg = f"\n- The `{necessary_column}` column/key should be lowercase" - necessary_msg = f"Lower case column name to `{necessary_column}`" - else: - error_msg = f"`{necessary_column}` column/key is missing. Please make sure you name your columns/keys appropriately, then retry" - - return Remediation( - name="necessary_column", - immediate_msg=immediate_msg, - necessary_msg=necessary_msg, - necessary_fn=necessary_fn, - error_msg=error_msg, - ) - - -def additional_column_validator(df: pd.DataFrame, fields: list[str] = ["prompt", "completion"]) -> Remediation: - """ - This validator will remove additional columns from the dataframe. - """ - additional_columns = [] - necessary_msg = None - immediate_msg = None - necessary_fn = None # type: ignore - - if len(df.columns) > 2: - additional_columns = [c for c in df.columns if c not in fields] - warn_message = "" - for ac in additional_columns: - dups = [c for c in additional_columns if ac in c] - if len(dups) > 0: - warn_message += f"\n WARNING: Some of the additional columns/keys contain `{ac}` in their name. These will be ignored, and the column/key `{ac}` will be used instead. This could also result from a duplicate column/key in the provided file." - immediate_msg = f"\n- The input file should contain exactly two columns/keys per row. Additional columns/keys present are: {additional_columns}{warn_message}" - necessary_msg = f"Remove additional columns/keys: {additional_columns}" - - def necessary_fn(x: Any) -> Any: - return x[fields] - - return Remediation( - name="additional_column", - immediate_msg=immediate_msg, - necessary_msg=necessary_msg, - necessary_fn=necessary_fn, - ) - - -def non_empty_field_validator(df: pd.DataFrame, field: str = "completion") -> Remediation: - """ - This validator will ensure that no completion is empty. - """ - necessary_msg = None - necessary_fn = None # type: ignore - immediate_msg = None - - if df[field].apply(lambda x: x == "").any() or df[field].isnull().any(): - empty_rows = (df[field] == "") | (df[field].isnull()) - empty_indexes = df.reset_index().index[empty_rows].tolist() - immediate_msg = f"\n- `{field}` column/key should not contain empty strings. These are rows: {empty_indexes}" - - def necessary_fn(x: Any) -> Any: - return x[x[field] != ""].dropna(subset=[field]) - - necessary_msg = f"Remove {len(empty_indexes)} rows with empty {field}s" - - return Remediation( - name=f"empty_{field}", - immediate_msg=immediate_msg, - necessary_msg=necessary_msg, - necessary_fn=necessary_fn, - ) - - -def duplicated_rows_validator(df: pd.DataFrame, fields: list[str] = ["prompt", "completion"]) -> Remediation: - """ - This validator will suggest to the user to remove duplicate rows if they exist. - """ - duplicated_rows = df.duplicated(subset=fields) - duplicated_indexes = df.reset_index().index[duplicated_rows].tolist() - immediate_msg = None - optional_msg = None - optional_fn = None # type: ignore - - if len(duplicated_indexes) > 0: - immediate_msg = f"\n- There are {len(duplicated_indexes)} duplicated {'-'.join(fields)} sets. These are rows: {duplicated_indexes}" - optional_msg = f"Remove {len(duplicated_indexes)} duplicate rows" - - def optional_fn(x: Any) -> Any: - return x.drop_duplicates(subset=fields) - - return Remediation( - name="duplicated_rows", - immediate_msg=immediate_msg, - optional_msg=optional_msg, - optional_fn=optional_fn, - ) - - -def long_examples_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will suggest to the user to remove examples that are too long. - """ - immediate_msg = None - optional_msg = None - optional_fn = None # type: ignore - - ft_type = infer_task_type(df) - if ft_type != "open-ended generation": - - def get_long_indexes(d: pd.DataFrame) -> Any: - long_examples = d.apply(lambda x: len(x.prompt) + len(x.completion) > 10000, axis=1) - return d.reset_index().index[long_examples].tolist() - - long_indexes = get_long_indexes(df) - - if len(long_indexes) > 0: - immediate_msg = f"\n- There are {len(long_indexes)} examples that are very long. These are rows: {long_indexes}\nFor conditional generation, and for classification the examples shouldn't be longer than 2048 tokens." - optional_msg = f"Remove {len(long_indexes)} long examples" - - def optional_fn(x: Any) -> Any: - long_indexes_to_drop = get_long_indexes(x) - if long_indexes != long_indexes_to_drop: - sys.stdout.write( - f"The indices of the long examples has changed as a result of a previously applied recommendation.\nThe {len(long_indexes_to_drop)} long examples to be dropped are now at the following indices: {long_indexes_to_drop}\n" - ) - return x.drop(long_indexes_to_drop) - - return Remediation( - name="long_examples", - immediate_msg=immediate_msg, - optional_msg=optional_msg, - optional_fn=optional_fn, - ) - - -def common_prompt_suffix_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will suggest to add a common suffix to the prompt if one doesn't already exist in case of classification or conditional generation. - """ - error_msg = None - immediate_msg = None - optional_msg = None - optional_fn = None # type: ignore - - # Find a suffix which is not contained within the prompt otherwise - suggested_suffix = "\n\n### =>\n\n" - suffix_options = [ - " ->", - "\n\n###\n\n", - "\n\n===\n\n", - "\n\n---\n\n", - "\n\n===>\n\n", - "\n\n--->\n\n", - ] - for suffix_option in suffix_options: - if suffix_option == " ->": - if df.prompt.str.contains("\n").any(): - continue - if df.prompt.str.contains(suffix_option, regex=False).any(): - continue - suggested_suffix = suffix_option - break - display_suggested_suffix = suggested_suffix.replace("\n", "\\n") - - ft_type = infer_task_type(df) - if ft_type == "open-ended generation": - return Remediation(name="common_suffix") - - def add_suffix(x: Any, suffix: Any) -> Any: - x["prompt"] += suffix - return x - - common_suffix = get_common_xfix(df.prompt, xfix="suffix") - if (df.prompt == common_suffix).all(): - error_msg = f"All prompts are identical: `{common_suffix}`\nConsider leaving the prompts blank if you want to do open-ended generation, otherwise ensure prompts are different" - return Remediation(name="common_suffix", error_msg=error_msg) - - if common_suffix != "": - common_suffix_new_line_handled = common_suffix.replace("\n", "\\n") - immediate_msg = f"\n- All prompts end with suffix `{common_suffix_new_line_handled}`" - if len(common_suffix) > 10: - immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`" - if df.prompt.str[: -len(common_suffix)].str.contains(common_suffix, regex=False).any(): - immediate_msg += f"\n WARNING: Some of your prompts contain the suffix `{common_suffix}` more than once. We strongly suggest that you review your prompts and add a unique suffix" - - else: - immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty" - - if common_suffix == "": - optional_msg = f"Add a suffix separator `{display_suggested_suffix}` to all prompts" - - def optional_fn(x: Any) -> Any: - return add_suffix(x, suggested_suffix) - - return Remediation( - name="common_completion_suffix", - immediate_msg=immediate_msg, - optional_msg=optional_msg, - optional_fn=optional_fn, - error_msg=error_msg, - ) - - -def common_prompt_prefix_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will suggest to remove a common prefix from the prompt if a long one exist. - """ - MAX_PREFIX_LEN = 12 - - immediate_msg = None - optional_msg = None - optional_fn = None # type: ignore - - common_prefix = get_common_xfix(df.prompt, xfix="prefix") - if common_prefix == "": - return Remediation(name="common_prefix") - - def remove_common_prefix(x: Any, prefix: Any) -> Any: - x["prompt"] = x["prompt"].str[len(prefix) :] - return x - - if (df.prompt == common_prefix).all(): - # already handled by common_suffix_validator - return Remediation(name="common_prefix") - - if common_prefix != "": - immediate_msg = f"\n- All prompts start with prefix `{common_prefix}`" - if MAX_PREFIX_LEN < len(common_prefix): - immediate_msg += ". Fine-tuning doesn't require the instruction specifying the task, or a few-shot example scenario. Most of the time you should only add the input data into the prompt, and the desired output into the completion" - optional_msg = f"Remove prefix `{common_prefix}` from all prompts" - - def optional_fn(x: Any) -> Any: - return remove_common_prefix(x, common_prefix) - - return Remediation( - name="common_prompt_prefix", - immediate_msg=immediate_msg, - optional_msg=optional_msg, - optional_fn=optional_fn, - ) - - -def common_completion_prefix_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will suggest to remove a common prefix from the completion if a long one exist. - """ - MAX_PREFIX_LEN = 5 - - common_prefix = get_common_xfix(df.completion, xfix="prefix") - ws_prefix = len(common_prefix) > 0 and common_prefix[0] == " " - if len(common_prefix) < MAX_PREFIX_LEN: - return Remediation(name="common_prefix") - - def remove_common_prefix(x: Any, prefix: Any, ws_prefix: Any) -> Any: - x["completion"] = x["completion"].str[len(prefix) :] - if ws_prefix: - # keep the single whitespace as prefix - x["completion"] = f" {x['completion']}" - return x - - if (df.completion == common_prefix).all(): - # already handled by common_suffix_validator - return Remediation(name="common_prefix") - - immediate_msg = f"\n- All completions start with prefix `{common_prefix}`. Most of the time you should only add the output data into the completion, without any prefix" - optional_msg = f"Remove prefix `{common_prefix}` from all completions" - - def optional_fn(x: Any) -> Any: - return remove_common_prefix(x, common_prefix, ws_prefix) - - return Remediation( - name="common_completion_prefix", - immediate_msg=immediate_msg, - optional_msg=optional_msg, - optional_fn=optional_fn, - ) - - -def common_completion_suffix_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will suggest to add a common suffix to the completion if one doesn't already exist in case of classification or conditional generation. - """ - error_msg = None - immediate_msg = None - optional_msg = None - optional_fn = None # type: ignore - - ft_type = infer_task_type(df) - if ft_type == "open-ended generation" or ft_type == "classification": - return Remediation(name="common_suffix") - - common_suffix = get_common_xfix(df.completion, xfix="suffix") - if (df.completion == common_suffix).all(): - error_msg = f"All completions are identical: `{common_suffix}`\nEnsure completions are different, otherwise the model will just repeat `{common_suffix}`" - return Remediation(name="common_suffix", error_msg=error_msg) - - # Find a suffix which is not contained within the completion otherwise - suggested_suffix = " [END]" - suffix_options = [ - "\n", - ".", - " END", - "***", - "+++", - "&&&", - "$$$", - "@@@", - "%%%", - ] - for suffix_option in suffix_options: - if df.completion.str.contains(suffix_option, regex=False).any(): - continue - suggested_suffix = suffix_option - break - display_suggested_suffix = suggested_suffix.replace("\n", "\\n") - - def add_suffix(x: Any, suffix: Any) -> Any: - x["completion"] += suffix - return x - - if common_suffix != "": - common_suffix_new_line_handled = common_suffix.replace("\n", "\\n") - immediate_msg = f"\n- All completions end with suffix `{common_suffix_new_line_handled}`" - if len(common_suffix) > 10: - immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`" - if df.completion.str[: -len(common_suffix)].str.contains(common_suffix, regex=False).any(): - immediate_msg += f"\n WARNING: Some of your completions contain the suffix `{common_suffix}` more than once. We suggest that you review your completions and add a unique ending" - - else: - immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples." - - if common_suffix == "": - optional_msg = f"Add a suffix ending `{display_suggested_suffix}` to all completions" - - def optional_fn(x: Any) -> Any: - return add_suffix(x, suggested_suffix) - - return Remediation( - name="common_completion_suffix", - immediate_msg=immediate_msg, - optional_msg=optional_msg, - optional_fn=optional_fn, - error_msg=error_msg, - ) - - -def completions_space_start_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will suggest to add a space at the start of the completion if it doesn't already exist. This helps with tokenization. - """ - - def add_space_start(x: Any) -> Any: - x["completion"] = x["completion"].apply(lambda s: ("" if s.startswith(" ") else " ") + s) - return x - - optional_msg = None - optional_fn = None - immediate_msg = None - - if df.completion.str[:1].nunique() != 1 or df.completion.values[0][0] != " ": - immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details" - optional_msg = "Add a whitespace character to the beginning of the completion" - optional_fn = add_space_start - return Remediation( - name="completion_space_start", - immediate_msg=immediate_msg, - optional_msg=optional_msg, - optional_fn=optional_fn, - ) - - -def lower_case_validator(df: pd.DataFrame, column: Any) -> Remediation | None: - """ - This validator will suggest to lowercase the column values, if more than a third of letters are uppercase. - """ - - def lower_case(x: Any) -> Any: - x[column] = x[column].str.lower() - return x - - count_upper = df[column].apply(lambda x: sum(1 for c in x if c.isalpha() and c.isupper())).sum() - count_lower = df[column].apply(lambda x: sum(1 for c in x if c.isalpha() and c.islower())).sum() - - if count_upper * 2 > count_lower: - return Remediation( - name="lower_case", - immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details", - optional_msg=f"Lowercase all your data in column/key `{column}`", - optional_fn=lower_case, - ) - return None - - -def read_any_format( - fname: str, fields: list[str] = ["prompt", "completion"] -) -> tuple[pd.DataFrame | None, Remediation]: - """ - This function will read a file saved in .csv, .json, .txt, .xlsx or .tsv format using pandas. - - for .xlsx it will read the first sheet - - for .txt it will assume completions and split on newline - """ - remediation = None - necessary_msg = None - immediate_msg = None - error_msg = None - df = None - - if os.path.isfile(fname): - try: - if fname.lower().endswith(".csv") or fname.lower().endswith(".tsv"): - file_extension_str, separator = ("CSV", ",") if fname.lower().endswith(".csv") else ("TSV", "\t") - immediate_msg = ( - f"\n- Based on your file extension, your file is formatted as a {file_extension_str} file" - ) - necessary_msg = f"Your format `{file_extension_str}` will be converted to `JSONL`" - df = pd.read_csv(fname, sep=separator, dtype=str).fillna("") - elif fname.lower().endswith(".xlsx"): - immediate_msg = "\n- Based on your file extension, your file is formatted as an Excel file" - necessary_msg = "Your format `XLSX` will be converted to `JSONL`" - xls = pd.ExcelFile(fname) - sheets = xls.sheet_names - if len(sheets) > 1: - immediate_msg += "\n- Your Excel file contains more than one sheet. Please either save as csv or ensure all data is present in the first sheet. WARNING: Reading only the first sheet..." - df = pd.read_excel(fname, dtype=str).fillna("") - elif fname.lower().endswith(".txt"): - immediate_msg = "\n- Based on your file extension, you provided a text file" - necessary_msg = "Your format `TXT` will be converted to `JSONL`" - with open(fname, "r") as f: - content = f.read() - df = pd.DataFrame( - [["", line] for line in content.split("\n")], - columns=fields, - dtype=str, - ).fillna("") - elif fname.lower().endswith(".jsonl"): - df = pd.read_json(fname, lines=True, dtype=str).fillna("") # type: ignore - if len(df) == 1: # type: ignore - # this is NOT what we expect for a .jsonl file - immediate_msg = "\n- Your JSONL file appears to be in a JSON format. Your file will be converted to JSONL format" - necessary_msg = "Your format `JSON` will be converted to `JSONL`" - df = pd.read_json(fname, dtype=str).fillna("") # type: ignore - else: - pass # this is what we expect for a .jsonl file - elif fname.lower().endswith(".json"): - try: - # to handle case where .json file is actually a .jsonl file - df = pd.read_json(fname, lines=True, dtype=str).fillna("") # type: ignore - if len(df) == 1: # type: ignore - # this code path corresponds to a .json file that has one line - df = pd.read_json(fname, dtype=str).fillna("") # type: ignore - else: - # this is NOT what we expect for a .json file - immediate_msg = "\n- Your JSON file appears to be in a JSONL format. Your file will be converted to JSONL format" - necessary_msg = "Your format `JSON` will be converted to `JSONL`" - except ValueError: - # this code path corresponds to a .json file that has multiple lines (i.e. it is indented) - df = pd.read_json(fname, dtype=str).fillna("") # type: ignore - else: - error_msg = ( - "Your file must have one of the following extensions: .CSV, .TSV, .XLSX, .TXT, .JSON or .JSONL" - ) - if "." in fname: - error_msg += f" Your file `{fname}` ends with the extension `.{fname.split('.')[-1]}` which is not supported." - else: - error_msg += f" Your file `{fname}` is missing a file extension." - - except (ValueError, TypeError): - file_extension_str = fname.split(".")[-1].upper() - error_msg = f"Your file `{fname}` does not appear to be in valid {file_extension_str} format. Please ensure your file is formatted as a valid {file_extension_str} file." - - else: - error_msg = f"File {fname} does not exist." - - remediation = Remediation( - name="read_any_format", - necessary_msg=necessary_msg, - immediate_msg=immediate_msg, - error_msg=error_msg, - ) - return df, remediation - - -def format_inferrer_validator(df: pd.DataFrame) -> Remediation: - """ - This validator will infer the likely fine-tuning format of the data, and display it to the user if it is classification. - It will also suggest to use ada and explain train/validation split benefits. - """ - ft_type = infer_task_type(df) - immediate_msg = None - if ft_type == "classification": - immediate_msg = f"\n- Based on your data it seems like you're trying to fine-tune a model for {ft_type}\n- For classification, we recommend you try one of the faster and cheaper models, such as `ada`\n- For classification, you can estimate the expected model performance by keeping a held out dataset, which is not used for training" - return Remediation(name="num_examples", immediate_msg=immediate_msg) - - -def apply_necessary_remediation(df: OptionalDataFrameT, remediation: Remediation) -> OptionalDataFrameT: - """ - This function will apply a necessary remediation to a dataframe, or print an error message if one exists. - """ - if remediation.error_msg is not None: - sys.stderr.write(f"\n\nERROR in {remediation.name} validator: {remediation.error_msg}\n\nAborting...") - sys.exit(1) - if remediation.immediate_msg is not None: - sys.stdout.write(remediation.immediate_msg) - if remediation.necessary_fn is not None: - df = remediation.necessary_fn(df) - return df - - -def accept_suggestion(input_text: str, auto_accept: bool) -> bool: - sys.stdout.write(input_text) - if auto_accept: - sys.stdout.write("Y\n") - return True - return input().lower() != "n" - - -def apply_optional_remediation( - df: pd.DataFrame, remediation: Remediation, auto_accept: bool -) -> tuple[pd.DataFrame, bool]: - """ - This function will apply an optional remediation to a dataframe, based on the user input. - """ - optional_applied = False - input_text = f"- [Recommended] {remediation.optional_msg} [Y/n]: " - if remediation.optional_msg is not None: - if accept_suggestion(input_text, auto_accept): - assert remediation.optional_fn is not None - df = remediation.optional_fn(df) - optional_applied = True - if remediation.necessary_msg is not None: - sys.stdout.write(f"- [Necessary] {remediation.necessary_msg}\n") - return df, optional_applied - - -def estimate_fine_tuning_time(df: pd.DataFrame) -> None: - """ - Estimate the time it'll take to fine-tune the dataset - """ - ft_format = infer_task_type(df) - expected_time = 1.0 - if ft_format == "classification": - num_examples = len(df) - expected_time = num_examples * 1.44 - else: - size = df.memory_usage(index=True).sum() - expected_time = size * 0.0515 - - def format_time(time: float) -> str: - if time < 60: - return f"{round(time, 2)} seconds" - elif time < 3600: - return f"{round(time / 60, 2)} minutes" - elif time < 86400: - return f"{round(time / 3600, 2)} hours" - else: - return f"{round(time / 86400, 2)} days" - - time_string = format_time(expected_time + 140) - sys.stdout.write( - f"Once your model starts training, it'll approximately take {time_string} to train a `curie` model, and less for `ada` and `babbage`. Queue will approximately take half an hour per job ahead of you.\n" - ) - - -def get_outfnames(fname: str, split: bool) -> list[str]: - suffixes = ["_train", "_valid"] if split else [""] - i = 0 - while True: - index_suffix = f" ({i})" if i > 0 else "" - candidate_fnames = [f"{os.path.splitext(fname)[0]}_prepared{suffix}{index_suffix}.jsonl" for suffix in suffixes] - if not any(os.path.isfile(f) for f in candidate_fnames): - return candidate_fnames - i += 1 - - -def get_classification_hyperparams(df: pd.DataFrame) -> tuple[int, object]: - n_classes = df.completion.nunique() - pos_class = None - if n_classes == 2: - pos_class = df.completion.value_counts().index[0] - return n_classes, pos_class - - -def write_out_file(df: pd.DataFrame, fname: str, any_remediations: bool, auto_accept: bool) -> None: - """ - This function will write out a dataframe to a file, if the user would like to proceed, and also offer a fine-tuning command with the newly created file. - For classification it will optionally ask the user if they would like to split the data into train/valid files, and modify the suggested command to include the valid set. - """ - ft_format = infer_task_type(df) - common_prompt_suffix = get_common_xfix(df.prompt, xfix="suffix") - common_completion_suffix = get_common_xfix(df.completion, xfix="suffix") - - split = False - input_text = "- [Recommended] Would you like to split into training and validation set? [Y/n]: " - if ft_format == "classification": - if accept_suggestion(input_text, auto_accept): - split = True - - additional_params = "" - common_prompt_suffix_new_line_handled = common_prompt_suffix.replace("\n", "\\n") - common_completion_suffix_new_line_handled = common_completion_suffix.replace("\n", "\\n") - optional_ending_string = ( - f' Make sure to include `stop=["{common_completion_suffix_new_line_handled}"]` so that the generated texts ends at the expected place.' - if len(common_completion_suffix_new_line_handled) > 0 - else "" - ) - - input_text = "\n\nYour data will be written to a new JSONL file. Proceed [Y/n]: " - - if not any_remediations and not split: - sys.stdout.write( - f'\nYou can use your file for fine-tuning:\n> openai api fine_tunes.create -t "{fname}"{additional_params}\n\nAfter you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt.{optional_ending_string}\n' - ) - estimate_fine_tuning_time(df) - - elif accept_suggestion(input_text, auto_accept): - fnames = get_outfnames(fname, split) - if split: - assert len(fnames) == 2 and "train" in fnames[0] and "valid" in fnames[1] - MAX_VALID_EXAMPLES = 1000 - n_train = max(len(df) - MAX_VALID_EXAMPLES, int(len(df) * 0.8)) - df_train = df.sample(n=n_train, random_state=42) - df_valid = df.drop(df_train.index) - df_train[["prompt", "completion"]].to_json( # type: ignore - fnames[0], lines=True, orient="records", force_ascii=False, indent=None - ) - df_valid[["prompt", "completion"]].to_json( - fnames[1], lines=True, orient="records", force_ascii=False, indent=None - ) - - n_classes, pos_class = get_classification_hyperparams(df) - additional_params += " --compute_classification_metrics" - if n_classes == 2: - additional_params += f' --classification_positive_class "{pos_class}"' - else: - additional_params += f" --classification_n_classes {n_classes}" - else: - assert len(fnames) == 1 - df[["prompt", "completion"]].to_json( - fnames[0], lines=True, orient="records", force_ascii=False, indent=None - ) - - # Add -v VALID_FILE if we split the file into train / valid - files_string = ("s" if split else "") + " to `" + ("` and `".join(fnames)) - valid_string = f' -v "{fnames[1]}"' if split else "" - separator_reminder = ( - "" - if len(common_prompt_suffix_new_line_handled) == 0 - else f"After you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt." - ) - sys.stdout.write( - f'\nWrote modified file{files_string}`\nFeel free to take a look!\n\nNow use that file when fine-tuning:\n> openai api fine_tunes.create -t "{fnames[0]}"{valid_string}{additional_params}\n\n{separator_reminder}{optional_ending_string}\n' - ) - estimate_fine_tuning_time(df) - else: - sys.stdout.write("Aborting... did not write the file\n") - - -def infer_task_type(df: pd.DataFrame) -> str: - """ - Infer the likely fine-tuning task type from the data - """ - CLASSIFICATION_THRESHOLD = 3 # min_average instances of each class - if sum(df.prompt.str.len()) == 0: - return "open-ended generation" - - if len(df.completion.unique()) < len(df) / CLASSIFICATION_THRESHOLD: - return "classification" - - return "conditional generation" - - -def get_common_xfix(series: Any, xfix: str = "suffix") -> str: - """ - Finds the longest common suffix or prefix of all the values in a series - """ - common_xfix = "" - while True: - common_xfixes = ( - series.str[-(len(common_xfix) + 1) :] if xfix == "suffix" else series.str[: len(common_xfix) + 1] - ) # first few or last few characters - if common_xfixes.nunique() != 1: # we found the character at which we don't have a unique xfix anymore - break - elif common_xfix == common_xfixes.values[0]: # the entire first row is a prefix of every other row - break - else: # the first or last few characters are still common across all rows - let's try to add one more - common_xfix = common_xfixes.values[0] - return common_xfix - - -Validator: TypeAlias = "Callable[[pd.DataFrame], Remediation | None]" - - -def get_validators() -> list[Validator]: - return [ - num_examples_validator, - lambda x: necessary_column_validator(x, "prompt"), - lambda x: necessary_column_validator(x, "completion"), - additional_column_validator, - non_empty_field_validator, - format_inferrer_validator, - duplicated_rows_validator, - long_examples_validator, - lambda x: lower_case_validator(x, "prompt"), - lambda x: lower_case_validator(x, "completion"), - common_prompt_suffix_validator, - common_prompt_prefix_validator, - common_completion_prefix_validator, - common_completion_suffix_validator, - completions_space_start_validator, - ] - - -def apply_validators( - df: pd.DataFrame, - fname: str, - remediation: Remediation | None, - validators: list[Validator], - auto_accept: bool, - write_out_file_func: Callable[..., Any], -) -> None: - optional_remediations: list[Remediation] = [] - if remediation is not None: - optional_remediations.append(remediation) - for validator in validators: - remediation = validator(df) - if remediation is not None: - optional_remediations.append(remediation) - df = apply_necessary_remediation(df, remediation) - - any_optional_or_necessary_remediations = any( - [ - remediation - for remediation in optional_remediations - if remediation.optional_msg is not None or remediation.necessary_msg is not None - ] - ) - any_necessary_applied = any( - [remediation for remediation in optional_remediations if remediation.necessary_msg is not None] - ) - any_optional_applied = False - - if any_optional_or_necessary_remediations: - sys.stdout.write("\n\nBased on the analysis we will perform the following actions:\n") - for remediation in optional_remediations: - df, optional_applied = apply_optional_remediation(df, remediation, auto_accept) - any_optional_applied = any_optional_applied or optional_applied - else: - sys.stdout.write("\n\nNo remediations found.\n") - - any_optional_or_necessary_applied = any_optional_applied or any_necessary_applied - - write_out_file_func(df, fname, any_optional_or_necessary_applied, auto_accept) diff --git a/src/openai/lib/azure.py b/src/openai/lib/azure.py deleted file mode 100644 index b76b83c61c..0000000000 --- a/src/openai/lib/azure.py +++ /dev/null @@ -1,542 +0,0 @@ -from __future__ import annotations - -import os -import inspect -from typing import Any, Union, Mapping, TypeVar, Callable, Awaitable, overload -from typing_extensions import Self, override - -import httpx - -from .._types import NOT_GIVEN, Omit, Timeout, NotGiven -from .._utils import is_given, is_mapping -from .._client import OpenAI, AsyncOpenAI -from .._models import FinalRequestOptions -from .._streaming import Stream, AsyncStream -from .._exceptions import OpenAIError -from .._base_client import DEFAULT_MAX_RETRIES, BaseClient - -_deployments_endpoints = set( - [ - "/completions", - "/chat/completions", - "/embeddings", - "/audio/transcriptions", - "/audio/translations", - "/audio/speech", - "/images/generations", - ] -) - - -AzureADTokenProvider = Callable[[], str] -AsyncAzureADTokenProvider = Callable[[], "str | Awaitable[str]"] -_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient]) -_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]]) - - -# we need to use a sentinel API key value for Azure AD -# as we don't want to make the `api_key` in the main client Optional -# and Azure AD tokens may be retrieved on a per-request basis -API_KEY_SENTINEL = "".join(["<", "missing API key", ">"]) - - -class MutuallyExclusiveAuthError(OpenAIError): - def __init__(self) -> None: - super().__init__( - "The `api_key`, `azure_ad_token` and `azure_ad_token_provider` arguments are mutually exclusive; Only one can be passed at a time" - ) - - -class BaseAzureClient(BaseClient[_HttpxClientT, _DefaultStreamT]): - @override - def _build_request( - self, - options: FinalRequestOptions, - ) -> httpx.Request: - if options.url in _deployments_endpoints and is_mapping(options.json_data): - model = options.json_data.get("model") - if model is not None and not "/deployments" in str(self.base_url): - options.url = f"/deployments/{model}{options.url}" - - return super()._build_request(options) - - -class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI): - @overload - def __init__( - self, - *, - azure_endpoint: str, - azure_deployment: str | None = None, - api_version: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AzureADTokenProvider | None = None, - organization: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.Client | None = None, - _strict_response_validation: bool = False, - ) -> None: - ... - - @overload - def __init__( - self, - *, - azure_deployment: str | None = None, - api_version: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AzureADTokenProvider | None = None, - organization: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.Client | None = None, - _strict_response_validation: bool = False, - ) -> None: - ... - - @overload - def __init__( - self, - *, - base_url: str, - api_version: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AzureADTokenProvider | None = None, - organization: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.Client | None = None, - _strict_response_validation: bool = False, - ) -> None: - ... - - def __init__( - self, - *, - api_version: str | None = None, - azure_endpoint: str | None = None, - azure_deployment: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AzureADTokenProvider | None = None, - organization: str | None = None, - project: str | None = None, - base_url: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.Client | None = None, - _strict_response_validation: bool = False, - ) -> None: - """Construct a new synchronous azure openai client instance. - - This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `AZURE_OPENAI_API_KEY` - - `organization` from `OPENAI_ORG_ID` - - `project` from `OPENAI_PROJECT_ID` - - `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN` - - `api_version` from `OPENAI_API_VERSION` - - `azure_endpoint` from `AZURE_OPENAI_ENDPOINT` - - Args: - azure_endpoint: Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/` - - azure_ad_token: Your Azure Active Directory token, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id - - azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request. - - azure_deployment: A model deployment, if given sets the base client URL to include `/deployments/{azure_deployment}`. - Note: this means you won't be able to use non-deployment endpoints. Not supported with Assistants APIs. - """ - if api_key is None: - api_key = os.environ.get("AZURE_OPENAI_API_KEY") - - if azure_ad_token is None: - azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN") - - if api_key is None and azure_ad_token is None and azure_ad_token_provider is None: - raise OpenAIError( - "Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables." - ) - - if api_version is None: - api_version = os.environ.get("OPENAI_API_VERSION") - - if api_version is None: - raise ValueError( - "Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable" - ) - - if default_query is None: - default_query = {"api-version": api_version} - else: - default_query = {**default_query, "api-version": api_version} - - if base_url is None: - if azure_endpoint is None: - azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") - - if azure_endpoint is None: - raise ValueError( - "Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable" - ) - - if azure_deployment is not None: - base_url = f"{azure_endpoint}/openai/deployments/{azure_deployment}" - else: - base_url = f"{azure_endpoint}/openai" - else: - if azure_endpoint is not None: - raise ValueError("base_url and azure_endpoint are mutually exclusive") - - if api_key is None: - # define a sentinel value to avoid any typing issues - api_key = API_KEY_SENTINEL - - super().__init__( - api_key=api_key, - organization=organization, - project=project, - base_url=base_url, - timeout=timeout, - max_retries=max_retries, - default_headers=default_headers, - default_query=default_query, - http_client=http_client, - _strict_response_validation=_strict_response_validation, - ) - self._api_version = api_version - self._azure_ad_token = azure_ad_token - self._azure_ad_token_provider = azure_ad_token_provider - - @override - def copy( - self, - *, - api_key: str | None = None, - organization: str | None = None, - project: str | None = None, - api_version: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AzureADTokenProvider | None = None, - base_url: str | httpx.URL | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - http_client: httpx.Client | None = None, - max_retries: int | NotGiven = NOT_GIVEN, - default_headers: Mapping[str, str] | None = None, - set_default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - set_default_query: Mapping[str, object] | None = None, - _extra_kwargs: Mapping[str, Any] = {}, - ) -> Self: - """ - Create a new client instance re-using the same options given to the current client with optional overriding. - """ - return super().copy( - api_key=api_key, - organization=organization, - project=project, - base_url=base_url, - timeout=timeout, - http_client=http_client, - max_retries=max_retries, - default_headers=default_headers, - set_default_headers=set_default_headers, - default_query=default_query, - set_default_query=set_default_query, - _extra_kwargs={ - "api_version": api_version or self._api_version, - "azure_ad_token": azure_ad_token or self._azure_ad_token, - "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider, - **_extra_kwargs, - }, - ) - - with_options = copy - - def _get_azure_ad_token(self) -> str | None: - if self._azure_ad_token is not None: - return self._azure_ad_token - - provider = self._azure_ad_token_provider - if provider is not None: - token = provider() - if not token or not isinstance(token, str): # pyright: ignore[reportUnnecessaryIsInstance] - raise ValueError( - f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}", - ) - return token - - return None - - @override - def _prepare_options(self, options: FinalRequestOptions) -> None: - headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {} - options.headers = headers - - azure_ad_token = self._get_azure_ad_token() - if azure_ad_token is not None: - if headers.get("Authorization") is None: - headers["Authorization"] = f"Bearer {azure_ad_token}" - elif self.api_key is not API_KEY_SENTINEL: - if headers.get("api-key") is None: - headers["api-key"] = self.api_key - else: - # should never be hit - raise ValueError("Unable to handle auth") - - return super()._prepare_options(options) - - -class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], AsyncOpenAI): - @overload - def __init__( - self, - *, - azure_endpoint: str, - azure_deployment: str | None = None, - api_version: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, - organization: str | None = None, - project: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.AsyncClient | None = None, - _strict_response_validation: bool = False, - ) -> None: - ... - - @overload - def __init__( - self, - *, - azure_deployment: str | None = None, - api_version: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, - organization: str | None = None, - project: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.AsyncClient | None = None, - _strict_response_validation: bool = False, - ) -> None: - ... - - @overload - def __init__( - self, - *, - base_url: str, - api_version: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, - organization: str | None = None, - project: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.AsyncClient | None = None, - _strict_response_validation: bool = False, - ) -> None: - ... - - def __init__( - self, - *, - azure_endpoint: str | None = None, - azure_deployment: str | None = None, - api_version: str | None = None, - api_key: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, - organization: str | None = None, - project: str | None = None, - base_url: str | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - max_retries: int = DEFAULT_MAX_RETRIES, - default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - http_client: httpx.AsyncClient | None = None, - _strict_response_validation: bool = False, - ) -> None: - """Construct a new asynchronous azure openai client instance. - - This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `AZURE_OPENAI_API_KEY` - - `organization` from `OPENAI_ORG_ID` - - `project` from `OPENAI_PROJECT_ID` - - `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN` - - `api_version` from `OPENAI_API_VERSION` - - `azure_endpoint` from `AZURE_OPENAI_ENDPOINT` - - Args: - azure_endpoint: Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/` - - azure_ad_token: Your Azure Active Directory token, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id - - azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request. - - azure_deployment: A model deployment, if given sets the base client URL to include `/deployments/{azure_deployment}`. - Note: this means you won't be able to use non-deployment endpoints. Not supported with Assistants APIs. - """ - if api_key is None: - api_key = os.environ.get("AZURE_OPENAI_API_KEY") - - if azure_ad_token is None: - azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN") - - if api_key is None and azure_ad_token is None and azure_ad_token_provider is None: - raise OpenAIError( - "Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables." - ) - - if api_version is None: - api_version = os.environ.get("OPENAI_API_VERSION") - - if api_version is None: - raise ValueError( - "Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable" - ) - - if default_query is None: - default_query = {"api-version": api_version} - else: - default_query = {**default_query, "api-version": api_version} - - if base_url is None: - if azure_endpoint is None: - azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") - - if azure_endpoint is None: - raise ValueError( - "Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable" - ) - - if azure_deployment is not None: - base_url = f"{azure_endpoint}/openai/deployments/{azure_deployment}" - else: - base_url = f"{azure_endpoint}/openai" - else: - if azure_endpoint is not None: - raise ValueError("base_url and azure_endpoint are mutually exclusive") - - if api_key is None: - # define a sentinel value to avoid any typing issues - api_key = API_KEY_SENTINEL - - super().__init__( - api_key=api_key, - organization=organization, - project=project, - base_url=base_url, - timeout=timeout, - max_retries=max_retries, - default_headers=default_headers, - default_query=default_query, - http_client=http_client, - _strict_response_validation=_strict_response_validation, - ) - self._api_version = api_version - self._azure_ad_token = azure_ad_token - self._azure_ad_token_provider = azure_ad_token_provider - - @override - def copy( - self, - *, - api_key: str | None = None, - organization: str | None = None, - project: str | None = None, - api_version: str | None = None, - azure_ad_token: str | None = None, - azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, - base_url: str | httpx.URL | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - http_client: httpx.AsyncClient | None = None, - max_retries: int | NotGiven = NOT_GIVEN, - default_headers: Mapping[str, str] | None = None, - set_default_headers: Mapping[str, str] | None = None, - default_query: Mapping[str, object] | None = None, - set_default_query: Mapping[str, object] | None = None, - _extra_kwargs: Mapping[str, Any] = {}, - ) -> Self: - """ - Create a new client instance re-using the same options given to the current client with optional overriding. - """ - return super().copy( - api_key=api_key, - organization=organization, - project=project, - base_url=base_url, - timeout=timeout, - http_client=http_client, - max_retries=max_retries, - default_headers=default_headers, - set_default_headers=set_default_headers, - default_query=default_query, - set_default_query=set_default_query, - _extra_kwargs={ - "api_version": api_version or self._api_version, - "azure_ad_token": azure_ad_token or self._azure_ad_token, - "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider, - **_extra_kwargs, - }, - ) - - with_options = copy - - async def _get_azure_ad_token(self) -> str | None: - if self._azure_ad_token is not None: - return self._azure_ad_token - - provider = self._azure_ad_token_provider - if provider is not None: - token = provider() - if inspect.isawaitable(token): - token = await token - if not token or not isinstance(token, str): - raise ValueError( - f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}", - ) - return token - - return None - - @override - async def _prepare_options(self, options: FinalRequestOptions) -> None: - headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {} - options.headers = headers - - azure_ad_token = await self._get_azure_ad_token() - if azure_ad_token is not None: - if headers.get("Authorization") is None: - headers["Authorization"] = f"Bearer {azure_ad_token}" - elif self.api_key is not API_KEY_SENTINEL: - if headers.get("api-key") is None: - headers["api-key"] = self.api_key - else: - # should never be hit - raise ValueError("Unable to handle auth") - - return await super()._prepare_options(options) diff --git a/src/openai/lib/streaming/__init__.py b/src/openai/lib/streaming/__init__.py deleted file mode 100644 index eb378d2561..0000000000 --- a/src/openai/lib/streaming/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from ._assistants import ( - AssistantEventHandler as AssistantEventHandler, - AssistantEventHandlerT as AssistantEventHandlerT, - AssistantStreamManager as AssistantStreamManager, - AsyncAssistantEventHandler as AsyncAssistantEventHandler, - AsyncAssistantEventHandlerT as AsyncAssistantEventHandlerT, - AsyncAssistantStreamManager as AsyncAssistantStreamManager, -) diff --git a/src/openai/lib/streaming/_assistants.py b/src/openai/lib/streaming/_assistants.py deleted file mode 100644 index 03d97ec2eb..0000000000 --- a/src/openai/lib/streaming/_assistants.py +++ /dev/null @@ -1,1035 +0,0 @@ -from __future__ import annotations - -import asyncio -from types import TracebackType -from typing import TYPE_CHECKING, Any, Generic, TypeVar, Callable, Iterable, Iterator, cast -from typing_extensions import Awaitable, AsyncIterable, AsyncIterator, assert_never - -import httpx - -from ..._utils import is_dict, is_list, consume_sync_iterator, consume_async_iterator -from ..._models import construct_type -from ..._streaming import Stream, AsyncStream -from ...types.beta import AssistantStreamEvent -from ...types.beta.threads import ( - Run, - Text, - Message, - ImageFile, - TextDelta, - MessageDelta, - MessageContent, - MessageContentDelta, -) -from ...types.beta.threads.runs import RunStep, ToolCall, RunStepDelta, ToolCallDelta - - -class AssistantEventHandler: - text_deltas: Iterable[str] - """Iterator over just the text deltas in the stream. - - This corresponds to the `thread.message.delta` event - in the API. - - ```py - for text in stream.text_deltas: - print(text, end="", flush=True) - print() - ``` - """ - - def __init__(self) -> None: - self._current_event: AssistantStreamEvent | None = None - self._current_message_content_index: int | None = None - self._current_message_content: MessageContent | None = None - self._current_tool_call_index: int | None = None - self._current_tool_call: ToolCall | None = None - self.__current_run_step_id: str | None = None - self.__current_run: Run | None = None - self.__run_step_snapshots: dict[str, RunStep] = {} - self.__message_snapshots: dict[str, Message] = {} - self.__current_message_snapshot: Message | None = None - - self.text_deltas = self.__text_deltas__() - self._iterator = self.__stream__() - self.__stream: Stream[AssistantStreamEvent] | None = None - - def _init(self, stream: Stream[AssistantStreamEvent]) -> None: - if self.__stream: - raise RuntimeError( - "A single event handler cannot be shared between multiple streams; You will need to construct a new event handler instance" - ) - - self.__stream = stream - - def __next__(self) -> AssistantStreamEvent: - return self._iterator.__next__() - - def __iter__(self) -> Iterator[AssistantStreamEvent]: - for item in self._iterator: - yield item - - @property - def current_event(self) -> AssistantStreamEvent | None: - return self._current_event - - @property - def current_run(self) -> Run | None: - return self.__current_run - - @property - def current_run_step_snapshot(self) -> RunStep | None: - if not self.__current_run_step_id: - return None - - return self.__run_step_snapshots[self.__current_run_step_id] - - @property - def current_message_snapshot(self) -> Message | None: - return self.__current_message_snapshot - - def close(self) -> None: - """ - Close the response and release the connection. - - Automatically called when the context manager exits. - """ - if self.__stream: - self.__stream.close() - - def until_done(self) -> None: - """Waits until the stream has been consumed""" - consume_sync_iterator(self) - - def get_final_run(self) -> Run: - """Wait for the stream to finish and returns the completed Run object""" - self.until_done() - - if not self.__current_run: - raise RuntimeError("No final run object found") - - return self.__current_run - - def get_final_run_steps(self) -> list[RunStep]: - """Wait for the stream to finish and returns the steps taken in this run""" - self.until_done() - - if not self.__run_step_snapshots: - raise RuntimeError("No run steps found") - - return [step for step in self.__run_step_snapshots.values()] - - def get_final_messages(self) -> list[Message]: - """Wait for the stream to finish and returns the messages emitted in this run""" - self.until_done() - - if not self.__message_snapshots: - raise RuntimeError("No messages found") - - return [message for message in self.__message_snapshots.values()] - - def __text_deltas__(self) -> Iterator[str]: - for event in self: - if event.event != "thread.message.delta": - continue - - for content_delta in event.data.delta.content or []: - if content_delta.type == "text" and content_delta.text and content_delta.text.value: - yield content_delta.text.value - - # event handlers - - def on_end(self) -> None: - """Fires when the stream has finished. - - This happens if the stream is read to completion - or if an exception occurs during iteration. - """ - - def on_event(self, event: AssistantStreamEvent) -> None: - """Callback that is fired for every Server-Sent-Event""" - - def on_run_step_created(self, run_step: RunStep) -> None: - """Callback that is fired when a run step is created""" - - def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None: - """Callback that is fired whenever a run step delta is returned from the API - - The first argument is just the delta as sent by the API and the second argument - is the accumulated snapshot of the run step. For example, a tool calls event may - look like this: - - # delta - tool_calls=[ - RunStepDeltaToolCallsCodeInterpreter( - index=0, - type='code_interpreter', - id=None, - code_interpreter=CodeInterpreter(input=' sympy', outputs=None) - ) - ] - # snapshot - tool_calls=[ - CodeToolCall( - id='call_wKayJlcYV12NiadiZuJXxcfx', - code_interpreter=CodeInterpreter(input='from sympy', outputs=[]), - type='code_interpreter', - index=0 - ) - ], - """ - - def on_run_step_done(self, run_step: RunStep) -> None: - """Callback that is fired when a run step is completed""" - - def on_tool_call_created(self, tool_call: ToolCall) -> None: - """Callback that is fired when a tool call is created""" - - def on_tool_call_delta(self, delta: ToolCallDelta, snapshot: ToolCall) -> None: - """Callback that is fired when a tool call delta is encountered""" - - def on_tool_call_done(self, tool_call: ToolCall) -> None: - """Callback that is fired when a tool call delta is encountered""" - - def on_exception(self, exception: Exception) -> None: - """Fired whenever an exception happens during streaming""" - - def on_timeout(self) -> None: - """Fires if the request times out""" - - def on_message_created(self, message: Message) -> None: - """Callback that is fired when a message is created""" - - def on_message_delta(self, delta: MessageDelta, snapshot: Message) -> None: - """Callback that is fired whenever a message delta is returned from the API - - The first argument is just the delta as sent by the API and the second argument - is the accumulated snapshot of the message. For example, a text content event may - look like this: - - # delta - MessageDeltaText( - index=0, - type='text', - text=Text( - value=' Jane' - ), - ) - # snapshot - MessageContentText( - index=0, - type='text', - text=Text( - value='Certainly, Jane' - ), - ) - """ - - def on_message_done(self, message: Message) -> None: - """Callback that is fired when a message is completed""" - - def on_text_created(self, text: Text) -> None: - """Callback that is fired when a text content block is created""" - - def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None: - """Callback that is fired whenever a text content delta is returned - by the API. - - The first argument is just the delta as sent by the API and the second argument - is the accumulated snapshot of the text. For example: - - on_text_delta(TextDelta(value="The"), Text(value="The")), - on_text_delta(TextDelta(value=" solution"), Text(value="The solution")), - on_text_delta(TextDelta(value=" to"), Text(value="The solution to")), - on_text_delta(TextDelta(value=" the"), Text(value="The solution to the")), - on_text_delta(TextDelta(value=" equation"), Text(value="The solution to the equivalent")), - """ - - def on_text_done(self, text: Text) -> None: - """Callback that is fired when a text content block is finished""" - - def on_image_file_done(self, image_file: ImageFile) -> None: - """Callback that is fired when an image file block is finished""" - - def _emit_sse_event(self, event: AssistantStreamEvent) -> None: - self._current_event = event - self.on_event(event) - - self.__current_message_snapshot, new_content = accumulate_event( - event=event, - current_message_snapshot=self.__current_message_snapshot, - ) - if self.__current_message_snapshot is not None: - self.__message_snapshots[self.__current_message_snapshot.id] = self.__current_message_snapshot - - accumulate_run_step( - event=event, - run_step_snapshots=self.__run_step_snapshots, - ) - - for content_delta in new_content: - assert self.__current_message_snapshot is not None - - block = self.__current_message_snapshot.content[content_delta.index] - if block.type == "text": - self.on_text_created(block.text) - - if ( - event.event == "thread.run.completed" - or event.event == "thread.run.cancelled" - or event.event == "thread.run.expired" - or event.event == "thread.run.failed" - or event.event == "thread.run.requires_action" - ): - self.__current_run = event.data - if self._current_tool_call: - self.on_tool_call_done(self._current_tool_call) - elif ( - event.event == "thread.run.created" - or event.event == "thread.run.in_progress" - or event.event == "thread.run.cancelling" - or event.event == "thread.run.queued" - ): - self.__current_run = event.data - elif event.event == "thread.message.created": - self.on_message_created(event.data) - elif event.event == "thread.message.delta": - snapshot = self.__current_message_snapshot - assert snapshot is not None - - message_delta = event.data.delta - if message_delta.content is not None: - for content_delta in message_delta.content: - if content_delta.type == "text" and content_delta.text: - snapshot_content = snapshot.content[content_delta.index] - assert snapshot_content.type == "text" - self.on_text_delta(content_delta.text, snapshot_content.text) - - # If the delta is for a new message content: - # - emit on_text_done/on_image_file_done for the previous message content - # - emit on_text_created/on_image_created for the new message content - if content_delta.index != self._current_message_content_index: - if self._current_message_content is not None: - if self._current_message_content.type == "text": - self.on_text_done(self._current_message_content.text) - elif self._current_message_content.type == "image_file": - self.on_image_file_done(self._current_message_content.image_file) - - self._current_message_content_index = content_delta.index - self._current_message_content = snapshot.content[content_delta.index] - - # Update the current_message_content (delta event is correctly emitted already) - self._current_message_content = snapshot.content[content_delta.index] - - self.on_message_delta(event.data.delta, snapshot) - elif event.event == "thread.message.completed" or event.event == "thread.message.incomplete": - self.__current_message_snapshot = event.data - self.__message_snapshots[event.data.id] = event.data - - if self._current_message_content_index is not None: - content = event.data.content[self._current_message_content_index] - if content.type == "text": - self.on_text_done(content.text) - elif content.type == "image_file": - self.on_image_file_done(content.image_file) - - self.on_message_done(event.data) - elif event.event == "thread.run.step.created": - self.__current_run_step_id = event.data.id - self.on_run_step_created(event.data) - elif event.event == "thread.run.step.in_progress": - self.__current_run_step_id = event.data.id - elif event.event == "thread.run.step.delta": - step_snapshot = self.__run_step_snapshots[event.data.id] - - run_step_delta = event.data.delta - if ( - run_step_delta.step_details - and run_step_delta.step_details.type == "tool_calls" - and run_step_delta.step_details.tool_calls is not None - ): - assert step_snapshot.step_details.type == "tool_calls" - for tool_call_delta in run_step_delta.step_details.tool_calls: - if tool_call_delta.index == self._current_tool_call_index: - self.on_tool_call_delta( - tool_call_delta, - step_snapshot.step_details.tool_calls[tool_call_delta.index], - ) - - # If the delta is for a new tool call: - # - emit on_tool_call_done for the previous tool_call - # - emit on_tool_call_created for the new tool_call - if tool_call_delta.index != self._current_tool_call_index: - if self._current_tool_call is not None: - self.on_tool_call_done(self._current_tool_call) - - self._current_tool_call_index = tool_call_delta.index - self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index] - self.on_tool_call_created(self._current_tool_call) - - # Update the current_tool_call (delta event is correctly emitted already) - self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index] - - self.on_run_step_delta( - event.data.delta, - step_snapshot, - ) - elif ( - event.event == "thread.run.step.completed" - or event.event == "thread.run.step.cancelled" - or event.event == "thread.run.step.expired" - or event.event == "thread.run.step.failed" - ): - if self._current_tool_call: - self.on_tool_call_done(self._current_tool_call) - - self.on_run_step_done(event.data) - self.__current_run_step_id = None - elif event.event == "thread.created" or event.event == "thread.message.in_progress" or event.event == "error": - # currently no special handling - ... - else: - # we only want to error at build-time - if TYPE_CHECKING: # type: ignore[unreachable] - assert_never(event) - - self._current_event = None - - def __stream__(self) -> Iterator[AssistantStreamEvent]: - stream = self.__stream - if not stream: - raise RuntimeError("Stream has not been started yet") - - try: - for event in stream: - self._emit_sse_event(event) - - yield event - except (httpx.TimeoutException, asyncio.TimeoutError) as exc: - self.on_timeout() - self.on_exception(exc) - raise - except Exception as exc: - self.on_exception(exc) - raise - finally: - self.on_end() - - -AssistantEventHandlerT = TypeVar("AssistantEventHandlerT", bound=AssistantEventHandler) - - -class AssistantStreamManager(Generic[AssistantEventHandlerT]): - """Wrapper over AssistantStreamEventHandler that is returned by `.stream()` - so that a context manager can be used. - - ```py - with client.threads.create_and_run_stream(...) as stream: - for event in stream: - ... - ``` - """ - - def __init__( - self, - api_request: Callable[[], Stream[AssistantStreamEvent]], - *, - event_handler: AssistantEventHandlerT, - ) -> None: - self.__stream: Stream[AssistantStreamEvent] | None = None - self.__event_handler = event_handler - self.__api_request = api_request - - def __enter__(self) -> AssistantEventHandlerT: - self.__stream = self.__api_request() - self.__event_handler._init(self.__stream) - return self.__event_handler - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - if self.__stream is not None: - self.__stream.close() - - -class AsyncAssistantEventHandler: - text_deltas: AsyncIterable[str] - """Iterator over just the text deltas in the stream. - - This corresponds to the `thread.message.delta` event - in the API. - - ```py - async for text in stream.text_deltas: - print(text, end="", flush=True) - print() - ``` - """ - - def __init__(self) -> None: - self._current_event: AssistantStreamEvent | None = None - self._current_message_content_index: int | None = None - self._current_message_content: MessageContent | None = None - self._current_tool_call_index: int | None = None - self._current_tool_call: ToolCall | None = None - self.__current_run_step_id: str | None = None - self.__current_run: Run | None = None - self.__run_step_snapshots: dict[str, RunStep] = {} - self.__message_snapshots: dict[str, Message] = {} - self.__current_message_snapshot: Message | None = None - - self.text_deltas = self.__text_deltas__() - self._iterator = self.__stream__() - self.__stream: AsyncStream[AssistantStreamEvent] | None = None - - def _init(self, stream: AsyncStream[AssistantStreamEvent]) -> None: - if self.__stream: - raise RuntimeError( - "A single event handler cannot be shared between multiple streams; You will need to construct a new event handler instance" - ) - - self.__stream = stream - - async def __anext__(self) -> AssistantStreamEvent: - return await self._iterator.__anext__() - - async def __aiter__(self) -> AsyncIterator[AssistantStreamEvent]: - async for item in self._iterator: - yield item - - async def close(self) -> None: - """ - Close the response and release the connection. - - Automatically called when the context manager exits. - """ - if self.__stream: - await self.__stream.close() - - @property - def current_event(self) -> AssistantStreamEvent | None: - return self._current_event - - @property - def current_run(self) -> Run | None: - return self.__current_run - - @property - def current_run_step_snapshot(self) -> RunStep | None: - if not self.__current_run_step_id: - return None - - return self.__run_step_snapshots[self.__current_run_step_id] - - @property - def current_message_snapshot(self) -> Message | None: - return self.__current_message_snapshot - - async def until_done(self) -> None: - """Waits until the stream has been consumed""" - await consume_async_iterator(self) - - async def get_final_run(self) -> Run: - """Wait for the stream to finish and returns the completed Run object""" - await self.until_done() - - if not self.__current_run: - raise RuntimeError("No final run object found") - - return self.__current_run - - async def get_final_run_steps(self) -> list[RunStep]: - """Wait for the stream to finish and returns the steps taken in this run""" - await self.until_done() - - if not self.__run_step_snapshots: - raise RuntimeError("No run steps found") - - return [step for step in self.__run_step_snapshots.values()] - - async def get_final_messages(self) -> list[Message]: - """Wait for the stream to finish and returns the messages emitted in this run""" - await self.until_done() - - if not self.__message_snapshots: - raise RuntimeError("No messages found") - - return [message for message in self.__message_snapshots.values()] - - async def __text_deltas__(self) -> AsyncIterator[str]: - async for event in self: - if event.event != "thread.message.delta": - continue - - for content_delta in event.data.delta.content or []: - if content_delta.type == "text" and content_delta.text and content_delta.text.value: - yield content_delta.text.value - - # event handlers - - async def on_end(self) -> None: - """Fires when the stream has finished. - - This happens if the stream is read to completion - or if an exception occurs during iteration. - """ - - async def on_event(self, event: AssistantStreamEvent) -> None: - """Callback that is fired for every Server-Sent-Event""" - - async def on_run_step_created(self, run_step: RunStep) -> None: - """Callback that is fired when a run step is created""" - - async def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None: - """Callback that is fired whenever a run step delta is returned from the API - - The first argument is just the delta as sent by the API and the second argument - is the accumulated snapshot of the run step. For example, a tool calls event may - look like this: - - # delta - tool_calls=[ - RunStepDeltaToolCallsCodeInterpreter( - index=0, - type='code_interpreter', - id=None, - code_interpreter=CodeInterpreter(input=' sympy', outputs=None) - ) - ] - # snapshot - tool_calls=[ - CodeToolCall( - id='call_wKayJlcYV12NiadiZuJXxcfx', - code_interpreter=CodeInterpreter(input='from sympy', outputs=[]), - type='code_interpreter', - index=0 - ) - ], - """ - - async def on_run_step_done(self, run_step: RunStep) -> None: - """Callback that is fired when a run step is completed""" - - async def on_tool_call_created(self, tool_call: ToolCall) -> None: - """Callback that is fired when a tool call is created""" - - async def on_tool_call_delta(self, delta: ToolCallDelta, snapshot: ToolCall) -> None: - """Callback that is fired when a tool call delta is encountered""" - - async def on_tool_call_done(self, tool_call: ToolCall) -> None: - """Callback that is fired when a tool call delta is encountered""" - - async def on_exception(self, exception: Exception) -> None: - """Fired whenever an exception happens during streaming""" - - async def on_timeout(self) -> None: - """Fires if the request times out""" - - async def on_message_created(self, message: Message) -> None: - """Callback that is fired when a message is created""" - - async def on_message_delta(self, delta: MessageDelta, snapshot: Message) -> None: - """Callback that is fired whenever a message delta is returned from the API - - The first argument is just the delta as sent by the API and the second argument - is the accumulated snapshot of the message. For example, a text content event may - look like this: - - # delta - MessageDeltaText( - index=0, - type='text', - text=Text( - value=' Jane' - ), - ) - # snapshot - MessageContentText( - index=0, - type='text', - text=Text( - value='Certainly, Jane' - ), - ) - """ - - async def on_message_done(self, message: Message) -> None: - """Callback that is fired when a message is completed""" - - async def on_text_created(self, text: Text) -> None: - """Callback that is fired when a text content block is created""" - - async def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None: - """Callback that is fired whenever a text content delta is returned - by the API. - - The first argument is just the delta as sent by the API and the second argument - is the accumulated snapshot of the text. For example: - - on_text_delta(TextDelta(value="The"), Text(value="The")), - on_text_delta(TextDelta(value=" solution"), Text(value="The solution")), - on_text_delta(TextDelta(value=" to"), Text(value="The solution to")), - on_text_delta(TextDelta(value=" the"), Text(value="The solution to the")), - on_text_delta(TextDelta(value=" equation"), Text(value="The solution to the equivalent")), - """ - - async def on_text_done(self, text: Text) -> None: - """Callback that is fired when a text content block is finished""" - - async def on_image_file_done(self, image_file: ImageFile) -> None: - """Callback that is fired when an image file block is finished""" - - async def _emit_sse_event(self, event: AssistantStreamEvent) -> None: - self._current_event = event - await self.on_event(event) - - self.__current_message_snapshot, new_content = accumulate_event( - event=event, - current_message_snapshot=self.__current_message_snapshot, - ) - if self.__current_message_snapshot is not None: - self.__message_snapshots[self.__current_message_snapshot.id] = self.__current_message_snapshot - - accumulate_run_step( - event=event, - run_step_snapshots=self.__run_step_snapshots, - ) - - for content_delta in new_content: - assert self.__current_message_snapshot is not None - - block = self.__current_message_snapshot.content[content_delta.index] - if block.type == "text": - await self.on_text_created(block.text) - - if ( - event.event == "thread.run.completed" - or event.event == "thread.run.cancelled" - or event.event == "thread.run.expired" - or event.event == "thread.run.failed" - or event.event == "thread.run.requires_action" - ): - self.__current_run = event.data - if self._current_tool_call: - await self.on_tool_call_done(self._current_tool_call) - elif ( - event.event == "thread.run.created" - or event.event == "thread.run.in_progress" - or event.event == "thread.run.cancelling" - or event.event == "thread.run.queued" - ): - self.__current_run = event.data - elif event.event == "thread.message.created": - await self.on_message_created(event.data) - elif event.event == "thread.message.delta": - snapshot = self.__current_message_snapshot - assert snapshot is not None - - message_delta = event.data.delta - if message_delta.content is not None: - for content_delta in message_delta.content: - if content_delta.type == "text" and content_delta.text: - snapshot_content = snapshot.content[content_delta.index] - assert snapshot_content.type == "text" - await self.on_text_delta(content_delta.text, snapshot_content.text) - - # If the delta is for a new message content: - # - emit on_text_done/on_image_file_done for the previous message content - # - emit on_text_created/on_image_created for the new message content - if content_delta.index != self._current_message_content_index: - if self._current_message_content is not None: - if self._current_message_content.type == "text": - await self.on_text_done(self._current_message_content.text) - elif self._current_message_content.type == "image_file": - await self.on_image_file_done(self._current_message_content.image_file) - - self._current_message_content_index = content_delta.index - self._current_message_content = snapshot.content[content_delta.index] - - # Update the current_message_content (delta event is correctly emitted already) - self._current_message_content = snapshot.content[content_delta.index] - - await self.on_message_delta(event.data.delta, snapshot) - elif event.event == "thread.message.completed" or event.event == "thread.message.incomplete": - self.__current_message_snapshot = event.data - self.__message_snapshots[event.data.id] = event.data - - if self._current_message_content_index is not None: - content = event.data.content[self._current_message_content_index] - if content.type == "text": - await self.on_text_done(content.text) - elif content.type == "image_file": - await self.on_image_file_done(content.image_file) - - await self.on_message_done(event.data) - elif event.event == "thread.run.step.created": - self.__current_run_step_id = event.data.id - await self.on_run_step_created(event.data) - elif event.event == "thread.run.step.in_progress": - self.__current_run_step_id = event.data.id - elif event.event == "thread.run.step.delta": - step_snapshot = self.__run_step_snapshots[event.data.id] - - run_step_delta = event.data.delta - if ( - run_step_delta.step_details - and run_step_delta.step_details.type == "tool_calls" - and run_step_delta.step_details.tool_calls is not None - ): - assert step_snapshot.step_details.type == "tool_calls" - for tool_call_delta in run_step_delta.step_details.tool_calls: - if tool_call_delta.index == self._current_tool_call_index: - await self.on_tool_call_delta( - tool_call_delta, - step_snapshot.step_details.tool_calls[tool_call_delta.index], - ) - - # If the delta is for a new tool call: - # - emit on_tool_call_done for the previous tool_call - # - emit on_tool_call_created for the new tool_call - if tool_call_delta.index != self._current_tool_call_index: - if self._current_tool_call is not None: - await self.on_tool_call_done(self._current_tool_call) - - self._current_tool_call_index = tool_call_delta.index - self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index] - await self.on_tool_call_created(self._current_tool_call) - - # Update the current_tool_call (delta event is correctly emitted already) - self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index] - - await self.on_run_step_delta( - event.data.delta, - step_snapshot, - ) - elif ( - event.event == "thread.run.step.completed" - or event.event == "thread.run.step.cancelled" - or event.event == "thread.run.step.expired" - or event.event == "thread.run.step.failed" - ): - if self._current_tool_call: - await self.on_tool_call_done(self._current_tool_call) - - await self.on_run_step_done(event.data) - self.__current_run_step_id = None - elif event.event == "thread.created" or event.event == "thread.message.in_progress" or event.event == "error": - # currently no special handling - ... - else: - # we only want to error at build-time - if TYPE_CHECKING: # type: ignore[unreachable] - assert_never(event) - - self._current_event = None - - async def __stream__(self) -> AsyncIterator[AssistantStreamEvent]: - stream = self.__stream - if not stream: - raise RuntimeError("Stream has not been started yet") - - try: - async for event in stream: - await self._emit_sse_event(event) - - yield event - except (httpx.TimeoutException, asyncio.TimeoutError) as exc: - await self.on_timeout() - await self.on_exception(exc) - raise - except Exception as exc: - await self.on_exception(exc) - raise - finally: - await self.on_end() - - -AsyncAssistantEventHandlerT = TypeVar("AsyncAssistantEventHandlerT", bound=AsyncAssistantEventHandler) - - -class AsyncAssistantStreamManager(Generic[AsyncAssistantEventHandlerT]): - """Wrapper over AsyncAssistantStreamEventHandler that is returned by `.stream()` - so that an async context manager can be used without `await`ing the - original client call. - - ```py - async with client.threads.create_and_run_stream(...) as stream: - async for event in stream: - ... - ``` - """ - - def __init__( - self, - api_request: Awaitable[AsyncStream[AssistantStreamEvent]], - *, - event_handler: AsyncAssistantEventHandlerT, - ) -> None: - self.__stream: AsyncStream[AssistantStreamEvent] | None = None - self.__event_handler = event_handler - self.__api_request = api_request - - async def __aenter__(self) -> AsyncAssistantEventHandlerT: - self.__stream = await self.__api_request - self.__event_handler._init(self.__stream) - return self.__event_handler - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - if self.__stream is not None: - await self.__stream.close() - - -def accumulate_run_step( - *, - event: AssistantStreamEvent, - run_step_snapshots: dict[str, RunStep], -) -> None: - if event.event == "thread.run.step.created": - run_step_snapshots[event.data.id] = event.data - return - - if event.event == "thread.run.step.delta": - data = event.data - snapshot = run_step_snapshots[data.id] - - if data.delta: - merged = accumulate_delta( - cast( - "dict[object, object]", - snapshot.model_dump(exclude_unset=True), - ), - cast( - "dict[object, object]", - data.delta.model_dump(exclude_unset=True), - ), - ) - run_step_snapshots[snapshot.id] = cast(RunStep, construct_type(type_=RunStep, value=merged)) - - return None - - -def accumulate_event( - *, - event: AssistantStreamEvent, - current_message_snapshot: Message | None, -) -> tuple[Message | None, list[MessageContentDelta]]: - """Returns a tuple of message snapshot and newly created text message deltas""" - if event.event == "thread.message.created": - return event.data, [] - - new_content: list[MessageContentDelta] = [] - - if event.event != "thread.message.delta": - return current_message_snapshot, [] - - if not current_message_snapshot: - raise RuntimeError("Encountered a message delta with no previous snapshot") - - data = event.data - if data.delta.content: - for content_delta in data.delta.content: - try: - block = current_message_snapshot.content[content_delta.index] - except IndexError: - current_message_snapshot.content.insert( - content_delta.index, - cast( - MessageContent, - construct_type( - # mypy doesn't allow Content for some reason - type_=cast(Any, MessageContent), - value=content_delta.model_dump(exclude_unset=True), - ), - ), - ) - new_content.append(content_delta) - else: - merged = accumulate_delta( - cast( - "dict[object, object]", - block.model_dump(exclude_unset=True), - ), - cast( - "dict[object, object]", - content_delta.model_dump(exclude_unset=True), - ), - ) - current_message_snapshot.content[content_delta.index] = cast( - MessageContent, - construct_type( - # mypy doesn't allow Content for some reason - type_=cast(Any, MessageContent), - value=merged, - ), - ) - - return current_message_snapshot, new_content - - -def accumulate_delta(acc: dict[object, object], delta: dict[object, object]) -> dict[object, object]: - for key, delta_value in delta.items(): - if key not in acc: - acc[key] = delta_value - continue - - acc_value = acc[key] - if acc_value is None: - acc[key] = delta_value - continue - - # the `index` property is used in arrays of objects so it should - # not be accumulated like other values e.g. - # [{'foo': 'bar', 'index': 0}] - # - # the same applies to `type` properties as they're used for - # discriminated unions - if key == "index" or key == "type": - acc[key] = delta_value - continue - - if isinstance(acc_value, str) and isinstance(delta_value, str): - acc_value += delta_value - elif isinstance(acc_value, (int, float)) and isinstance(delta_value, (int, float)): - acc_value += delta_value - elif is_dict(acc_value) and is_dict(delta_value): - acc_value = accumulate_delta(acc_value, delta_value) - elif is_list(acc_value) and is_list(delta_value): - # for lists of non-dictionary items we'll only ever get new entries - # in the array, existing entries will never be changed - if all(isinstance(x, (str, int, float)) for x in acc_value): - acc_value.extend(delta_value) - continue - - for delta_entry in delta_value: - if not is_dict(delta_entry): - raise TypeError(f"Unexpected list delta entry is not a dictionary: {delta_entry}") - - try: - index = delta_entry["index"] - except KeyError as exc: - raise RuntimeError(f"Expected list delta entry to have an `index` key; {delta_entry}") from exc - - if not isinstance(index, int): - raise TypeError(f"Unexpected, list delta entry `index` value is not an integer; {index}") - - try: - acc_entry = acc_value[index] - except IndexError: - acc_value.insert(index, delta_entry) - else: - if not is_dict(acc_entry): - raise TypeError("not handled yet") - - acc_value[index] = accumulate_delta(acc_entry, delta_entry) - - acc[key] = acc_value - - return acc From 93916eced97afe181dbfe9bc34a64d57992b751f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 16 May 2024 11:55:36 -0400 Subject: [PATCH 002/192] chore(internal): temporarily remove some code for migration (#1429) --- README.md | 164 +- api.md | 16 - examples/.keep | 4 + helpers.md | 238 -- pyproject.toml | 14 +- requirements-dev.lock | 82 - requirements.lock | 20 - src/openai/__init__.py | 274 -- src/openai/__main__.py | 3 - src/openai/_extras/__init__.py | 2 - src/openai/_extras/_common.py | 21 - src/openai/_extras/numpy_proxy.py | 37 - src/openai/_extras/pandas_proxy.py | 28 - src/openai/_module_client.py | 85 - src/openai/_streaming.py | 38 - src/openai/cli/__init__.py | 1 - src/openai/cli/_api/__init__.py | 1 - src/openai/cli/_api/_main.py | 16 - src/openai/cli/_api/audio.py | 94 - src/openai/cli/_api/chat/__init__.py | 13 - src/openai/cli/_api/chat/completions.py | 156 -- src/openai/cli/_api/completions.py | 173 -- src/openai/cli/_api/files.py | 80 - src/openai/cli/_api/image.py | 139 - src/openai/cli/_api/models.py | 45 - src/openai/cli/_cli.py | 234 -- src/openai/cli/_errors.py | 23 - src/openai/cli/_models.py | 17 - src/openai/cli/_progress.py | 59 - src/openai/cli/_tools/__init__.py | 1 - src/openai/cli/_tools/_main.py | 17 - src/openai/cli/_tools/fine_tunes.py | 63 - src/openai/cli/_tools/migrate.py | 181 -- src/openai/cli/_utils.py | 45 - src/openai/lib/.keep | 4 + .../resources/beta/threads/runs/runs.py | 2415 ++++------------- src/openai/resources/beta/threads/threads.py | 569 ---- .../beta/vector_stores/file_batches.py | 240 +- .../resources/beta/vector_stores/files.py | 178 +- src/openai/resources/embeddings.py | 97 +- src/openai/resources/files.py | 47 - src/openai/types/beta/chat/__init__.py | 3 - src/openai/version.py | 3 - tests/api_resources/beta/threads/test_runs.py | 2 - tests/lib/test_azure.py | 66 - tests/lib/test_old_api.py | 17 - tests/test_module_client.py | 183 -- 47 files changed, 578 insertions(+), 5630 deletions(-) create mode 100644 examples/.keep delete mode 100644 helpers.md delete mode 100644 src/openai/__main__.py delete mode 100644 src/openai/_extras/__init__.py delete mode 100644 src/openai/_extras/_common.py delete mode 100644 src/openai/_extras/numpy_proxy.py delete mode 100644 src/openai/_extras/pandas_proxy.py delete mode 100644 src/openai/_module_client.py delete mode 100644 src/openai/cli/__init__.py delete mode 100644 src/openai/cli/_api/__init__.py delete mode 100644 src/openai/cli/_api/_main.py delete mode 100644 src/openai/cli/_api/audio.py delete mode 100644 src/openai/cli/_api/chat/__init__.py delete mode 100644 src/openai/cli/_api/chat/completions.py delete mode 100644 src/openai/cli/_api/completions.py delete mode 100644 src/openai/cli/_api/files.py delete mode 100644 src/openai/cli/_api/image.py delete mode 100644 src/openai/cli/_api/models.py delete mode 100644 src/openai/cli/_cli.py delete mode 100644 src/openai/cli/_errors.py delete mode 100644 src/openai/cli/_models.py delete mode 100644 src/openai/cli/_progress.py delete mode 100644 src/openai/cli/_tools/__init__.py delete mode 100644 src/openai/cli/_tools/_main.py delete mode 100644 src/openai/cli/_tools/fine_tunes.py delete mode 100644 src/openai/cli/_tools/migrate.py delete mode 100644 src/openai/cli/_utils.py create mode 100644 src/openai/lib/.keep delete mode 100644 src/openai/types/beta/chat/__init__.py delete mode 100644 src/openai/version.py delete mode 100644 tests/lib/test_azure.py delete mode 100644 tests/lib/test_old_api.py delete mode 100644 tests/test_module_client.py diff --git a/README.md b/README.md index e566a2f8d0..8aad7fcd69 100644 --- a/README.md +++ b/README.md @@ -6,17 +6,12 @@ The OpenAI Python library provides convenient access to the OpenAI REST API from application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). -It is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi) with [Stainless](https://stainlessapi.com/). - ## Documentation The REST API documentation can be found [on platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](api.md). ## Installation -> [!IMPORTANT] -> The SDK was rewritten in v1, which was released November 6th 2023. See the [v1 migration guide](https://github.com/openai/openai-python/discussions/742), which includes scripts to automatically update your code. - ```sh # install from PyPI pip install openai @@ -51,56 +46,6 @@ we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) to add `OPENAI_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. -### Polling Helpers - -When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes -helper functions which will poll the status until it reaches a terminal state and then return the resulting object. -If an API method results in an action which could benefit from polling there will be a corresponding version of the -method ending in '\_and_poll'. - -For instance to create a Run and poll until it reaches a terminal state you can run: - -```python -run = client.beta.threads.runs.create_and_poll( - thread_id=thread.id, - assistant_id=assistant.id, -) -``` - -More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle) - -### Bulk Upload Helpers - -When creating an interacting with vector stores, you can use the polling helpers to monitor the status of operations. -For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. - -```python -sample_files = [Path("sample-paper.pdf"), ...] - -batch = await client.vector_stores.file_batches.upload_and_poll( - store.id, - files=sample_files, -) -``` - -### Streaming Helpers - -The SDK also includes helpers to process streams and handle the incoming events. - -```python -with client.beta.threads.runs.stream( - thread_id=thread.id, - assistant_id=assistant.id, - instructions="Please address the user as Jane Doe. The user has a premium account.", -) as stream: - for event in stream: - # Print the text from text delta events - if event.type == "thread.message.delta" and event.data.delta.content: - print(event.data.delta.content[0].text) -``` - -More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md) - ## Async usage Simply import `AsyncOpenAI` instead of `OpenAI` and use `await` with each API call: @@ -143,12 +88,17 @@ from openai import OpenAI client = OpenAI() stream = client.chat.completions.create( - model="gpt-4", - messages=[{"role": "user", "content": "Say this is a test"}], + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-3.5-turbo", stream=True, ) -for chunk in stream: - print(chunk.choices[0].delta.content or "", end="") +for chat_completion in stream: + print(chat_completion) ``` The async client uses the exact same interface. @@ -158,60 +108,20 @@ from openai import AsyncOpenAI client = AsyncOpenAI() - -async def main(): - stream = await client.chat.completions.create( - model="gpt-4", - messages=[{"role": "user", "content": "Say this is a test"}], - stream=True, - ) - async for chunk in stream: - print(chunk.choices[0].delta.content or "", end="") - - -asyncio.run(main()) -``` - -## Module-level client - -> [!IMPORTANT] -> We highly recommend instantiating client instances instead of relying on the global client. - -We also expose a global client instance that is accessible in a similar fashion to versions prior to v1. - -```py -import openai - -# optional; defaults to `os.environ['OPENAI_API_KEY']` -openai.api_key = '...' - -# all client options can be configured just like the `OpenAI` instantiation counterpart -openai.base_url = "https://.../" -openai.default_headers = {"x-foo": "true"} - -completion = openai.chat.completions.create( - model="gpt-4", +stream = await client.chat.completions.create( messages=[ { "role": "user", - "content": "How do I output all files in a directory using Python?", - }, + "content": "Say this is a test", + } ], + model="gpt-3.5-turbo", + stream=True, ) -print(completion.choices[0].message.content) +async for chat_completion in stream: + print(chat_completion) ``` -The API is the exact same as the standard client instance based API. - -This is intended to be used within REPLs or notebooks for faster iteration, **not** in application code. - -We recommend that you always instantiate a client (e.g., with `client = OpenAI()`) in application code because: - -- It can be difficult to reason about where client options are configured -- It's not possible to change certain client options without potentially causing race conditions -- It's harder to mock for testing purposes -- It's not possible to control cleanup of network connections - ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: @@ -579,48 +489,6 @@ client = OpenAI( By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. -## Microsoft Azure OpenAI - -To use this library with [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview), use the `AzureOpenAI` -class instead of the `OpenAI` class. - -> [!IMPORTANT] -> The Azure API shape differs from the core API shape which means that the static types for responses / params -> won't always be correct. - -```py -from openai import AzureOpenAI - -# gets the API Key from environment variable AZURE_OPENAI_API_KEY -client = AzureOpenAI( - # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning - api_version="2023-07-01-preview", - # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource - azure_endpoint="https://example-endpoint.openai.azure.com/", -) - -completion = client.chat.completions.create( - model="deployment-name", # e.g. gpt-35-instant - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], -) -print(completion.to_json()) -``` - -In addition to the options provided in the base `OpenAI` client, the following options are provided: - -- `azure_endpoint` (or the `AZURE_OPENAI_ENDPOINT` environment variable) -- `azure_deployment` -- `api_version` (or the `OPENAI_API_VERSION` environment variable) -- `azure_ad_token` (or the `AZURE_OPENAI_AD_TOKEN` environment variable) -- `azure_ad_token_provider` - -An example of using the client with Azure Active Directory can be found [here](https://github.com/openai/openai-python/blob/main/examples/azure_ad.py). - ## Versioning This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: diff --git a/api.md b/api.md index de69f11dca..c03586c447 100644 --- a/api.md +++ b/api.md @@ -85,7 +85,6 @@ Methods: - client.files.delete(file_id) -> FileDeleted - client.files.content(file_id) -> HttpxBinaryResponseContent - client.files.retrieve_content(file_id) -> str -- client.files.wait_for_processing(\*args) -> FileObject # Images @@ -227,10 +226,6 @@ Methods: - client.beta.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFile - client.beta.vector_stores.files.list(vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile] - client.beta.vector_stores.files.delete(file_id, \*, vector_store_id) -> VectorStoreFileDeleted -- client.beta.vector_stores.files.create_and_poll(\*args) -> VectorStoreFile -- client.beta.vector_stores.files.poll(\*args) -> VectorStoreFile -- client.beta.vector_stores.files.upload(\*args) -> VectorStoreFile -- client.beta.vector_stores.files.upload_and_poll(\*args) -> VectorStoreFile ### FileBatches @@ -246,9 +241,6 @@ Methods: - client.beta.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatch - client.beta.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatch - client.beta.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile] -- client.beta.vector_stores.file_batches.create_and_poll(\*args) -> VectorStoreFileBatch -- client.beta.vector_stores.file_batches.poll(\*args) -> VectorStoreFileBatch -- client.beta.vector_stores.file_batches.upload_and_poll(\*args) -> VectorStoreFileBatch ## Assistants @@ -301,8 +293,6 @@ Methods: - client.beta.threads.update(thread_id, \*\*params) -> Thread - client.beta.threads.delete(thread_id) -> ThreadDeleted - client.beta.threads.create_and_run(\*\*params) -> Run -- client.beta.threads.create_and_run_poll(\*args) -> Run -- client.beta.threads.create_and_run_stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT] ### Runs @@ -320,12 +310,6 @@ Methods: - client.beta.threads.runs.list(thread_id, \*\*params) -> SyncCursorPage[Run] - client.beta.threads.runs.cancel(run_id, \*, thread_id) -> Run - client.beta.threads.runs.submit_tool_outputs(run_id, \*, thread_id, \*\*params) -> Run -- client.beta.threads.runs.create_and_poll(\*args) -> Run -- client.beta.threads.runs.create_and_stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT] -- client.beta.threads.runs.poll(\*args) -> Run -- client.beta.threads.runs.stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT] -- client.beta.threads.runs.submit_tool_outputs_and_poll(\*args) -> Run -- client.beta.threads.runs.submit_tool_outputs_stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT] #### Steps diff --git a/examples/.keep b/examples/.keep new file mode 100644 index 0000000000..d8c73e937a --- /dev/null +++ b/examples/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store example files demonstrating usage of this SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/helpers.md b/helpers.md deleted file mode 100644 index 3508b59a33..0000000000 --- a/helpers.md +++ /dev/null @@ -1,238 +0,0 @@ -# Streaming Helpers - -OpenAI supports streaming responses when interacting with the [Assistant](#assistant-streaming-api) APIs. - -## Assistant Streaming API - -OpenAI supports streaming responses from Assistants. The SDK provides convenience wrappers around the API -so you can subscribe to the types of events you are interested in as well as receive accumulated responses. - -More information can be found in the documentation: [Assistant Streaming](https://platform.openai.com/docs/assistants/overview?lang=python) - -#### An example of creating a run and subscribing to some events - -You can subscribe to events by creating an event handler class and overloading the relevant event handlers. - -```python -from typing_extensions import override -from openai import AssistantEventHandler, OpenAI -from openai.types.beta.threads import Text, TextDelta -from openai.types.beta.threads.runs import ToolCall, ToolCallDelta - -client = openai.OpenAI() - -# First, we create a EventHandler class to define -# how we want to handle the events in the response stream. - -class EventHandler(AssistantEventHandler): - @override - def on_text_created(self, text: Text) -> None: - print(f"\nassistant > ", end="", flush=True) - - @override - def on_text_delta(self, delta: TextDelta, snapshot: Text): - print(delta.value, end="", flush=True) - - @override - def on_tool_call_created(self, tool_call: ToolCall): - print(f"\nassistant > {tool_call.type}\n", flush=True) - - @override - def on_tool_call_delta(self, delta: ToolCallDelta, snapshot: ToolCall): - if delta.type == "code_interpreter" and delta.code_interpreter: - if delta.code_interpreter.input: - print(delta.code_interpreter.input, end="", flush=True) - if delta.code_interpreter.outputs: - print(f"\n\noutput >", flush=True) - for output in delta.code_interpreter.outputs: - if output.type == "logs": - print(f"\n{output.logs}", flush=True) - -# Then, we use the `stream` SDK helper -# with the `EventHandler` class to create the Run -# and stream the response. - -with client.beta.threads.runs.stream( - thread_id="thread_id", - assistant_id="assistant_id", - event_handler=EventHandler(), -) as stream: - stream.until_done() -``` - -#### An example of iterating over events - -You can also iterate over all the streamed events. - -```python -with client.beta.threads.runs.stream( - thread_id=thread.id, - assistant_id=assistant.id -) as stream: - for event in stream: - # Print the text from text delta events - if event.event == "thread.message.delta" and event.data.delta.content: - print(event.data.delta.content[0].text) -``` - -#### An example of iterating over text - -You can also iterate over just the text deltas received - -```python -with client.beta.threads.runs.stream( - thread_id=thread.id, - assistant_id=assistant.id -) as stream: - for text in stream.text_deltas: - print(text) -``` - -### Creating Streams - -There are three helper methods for creating streams: - -```python -client.beta.threads.runs.stream() -``` - -This method can be used to start and stream the response to an existing run with an associated thread -that is already populated with messages. - -```python -client.beta.threads.create_and_run_stream() -``` - -This method can be used to add a message to a thread, start a run and then stream the response. - -```python -client.beta.threads.runs.submit_tool_outputs_stream() -``` - -This method can be used to submit a tool output to a run waiting on the output and start a stream. - -### Assistant Events - -The assistant API provides events you can subscribe to for the following events. - -```python -def on_event(self, event: AssistantStreamEvent) -``` - -This allows you to subscribe to all the possible raw events sent by the OpenAI streaming API. -In many cases it will be more convenient to subscribe to a more specific set of events for your use case. - -More information on the types of events can be found here: [Events](https://platform.openai.com/docs/api-reference/assistants-streaming/events) - -```python -def on_run_step_created(self, run_step: RunStep) -def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -def on_run_step_done(self, run_step: RunStep) -``` - -These events allow you to subscribe to the creation, delta and completion of a RunStep. - -For more information on how Runs and RunSteps work see the documentation [Runs and RunSteps](https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps) - -```python -def on_message_created(self, message: Message) -def on_message_delta(self, delta: MessageDelta, snapshot: Message) -def on_message_done(self, message: Message) -``` - -This allows you to subscribe to Message creation, delta and completion events. Messages can contain -different types of content that can be sent from a model (and events are available for specific content types). -For convenience, the delta event includes both the incremental update and an accumulated snapshot of the content. - -More information on messages can be found -on in the documentation page [Message](https://platform.openai.com/docs/api-reference/messages/object). - -```python -def on_text_created(self, text: Text) -def on_text_delta(self, delta: TextDelta, snapshot: Text) -def on_text_done(self, text: Text) -``` - -These events allow you to subscribe to the creation, delta and completion of a Text content (a specific type of message). -For convenience, the delta event includes both the incremental update and an accumulated snapshot of the content. - -```python -def on_image_file_done(self, image_file: ImageFile) -``` - -Image files are not sent incrementally so an event is provided for when a image file is available. - -```python -def on_tool_call_created(self, tool_call: ToolCall) -def on_tool_call_delta(self, delta: ToolCallDelta, snapshot: ToolCall) -def on_tool_call_done(self, tool_call: ToolCall) -``` - -These events allow you to subscribe to events for the creation, delta and completion of a ToolCall. - -More information on tools can be found here [Tools](https://platform.openai.com/docs/assistants/tools) - -```python -def on_end(self) -``` - -The last event send when a stream ends. - -```python -def on_timeout(self) -``` - -This event is triggered if the request times out. - -```python -def on_exception(self, exception: Exception) -``` - -This event is triggered if an exception occurs during streaming. - -### Assistant Methods - -The assistant streaming object also provides a few methods for convenience: - -```python -def current_event() -> AssistantStreamEvent | None -def current_run() -> Run | None -def current_message_snapshot() -> Message | None -def current_run_step_snapshot() -> RunStep | None -``` - -These methods are provided to allow you to access additional context from within event handlers. In many cases -the handlers should include all the information you need for processing, but if additional context is required it -can be accessed. - -Note: There is not always a relevant context in certain situations (these will be `None` in those cases). - -```python -def get_final_run(self) -> Run -def get_final_run_steps(self) -> List[RunStep] -def get_final_messages(self) -> List[Message] -``` - -These methods are provided for convenience to collect information at the end of a stream. Calling these events -will trigger consumption of the stream until completion and then return the relevant accumulated objects. - -# Polling Helpers - -When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. -The SDK includes helper functions which will poll the status until it reaches a terminal state and then return the resulting object. -If an API method results in an action which could benefit from polling there will be a corresponding version of the -method ending in `_and_poll`. - -All methods also allow you to set the polling frequency, how often the API is checked for an update, via a function argument (`poll_interval_ms`). - -The polling methods are: - -```python -client.beta.threads.create_and_run_poll(...) -client.beta.threads.runs.create_and_poll(...) -client.beta.threads.runs.submit_tool_ouptputs_and_poll(...) -client.beta.vector_stores.files.upload_and_poll(...) -client.beta.vector_stores.files.create_and_poll(...) -client.beta.vector_stores.file_batches.create_and_poll(...) -client.beta.vector_stores.file_batches.upload_and_poll(...) -``` diff --git a/pyproject.toml b/pyproject.toml index a33e167244..09d794a271 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ dependencies = [ "distro>=1.7.0, <2", "sniffio", "cached-property; python_version < '3.8'", - "tqdm > 4" + ] requires-python = ">= 3.7.1" classifiers = [ @@ -36,15 +36,13 @@ classifiers = [ "License :: OSI Approved :: Apache Software License" ] -[project.optional-dependencies] -datalib = ["numpy >= 1", "pandas >= 1.2.3", "pandas-stubs >= 1.1.0.11"] + [project.urls] Homepage = "https://github.com/openai/openai-python" Repository = "https://github.com/openai/openai-python" -[project.scripts] -openai = "openai.cli:main" + [tool.rye] managed = true @@ -60,11 +58,7 @@ dev-dependencies = [ "nox", "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", - "inline-snapshot >=0.7.0", - "azure-identity >=1.14.1", - "types-tqdm > 4", - "types-pyaudio > 0", - "trio >=0.22.2" + ] [tool.rye.scripts] diff --git a/requirements-dev.lock b/requirements-dev.lock index 6a4e12022a..b6e5d7dc7a 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -15,34 +15,13 @@ anyio==4.1.0 # via openai argcomplete==3.1.2 # via nox -asttokens==2.4.1 - # via inline-snapshot attrs==23.1.0 - # via outcome # via pytest - # via trio -azure-core==1.30.1 - # via azure-identity -azure-identity==1.15.0 -black==24.4.2 - # via inline-snapshot certifi==2023.7.22 # via httpcore # via httpx - # via requests -cffi==1.16.0 - # via cryptography -charset-normalizer==3.3.2 - # via requests -click==8.1.7 - # via black - # via inline-snapshot colorlog==6.7.0 # via nox -cryptography==42.0.7 - # via azure-identity - # via msal - # via pyjwt dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv @@ -50,9 +29,6 @@ distro==1.8.0 # via openai exceptiongroup==1.1.3 # via anyio - # via trio -executing==2.0.1 - # via inline-snapshot filelock==3.12.4 # via virtualenv h11==0.14.0 @@ -65,113 +41,55 @@ httpx==0.25.2 idna==3.4 # via anyio # via httpx - # via requests - # via trio importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest -inline-snapshot==0.7.0 -msal==1.28.0 - # via azure-identity - # via msal-extensions -msal-extensions==1.1.0 - # via azure-identity mypy==1.7.1 mypy-extensions==1.0.0 - # via black # via mypy nodeenv==1.8.0 # via pyright nox==2023.4.22 -numpy==1.26.3 - # via openai - # via pandas - # via pandas-stubs -outcome==1.3.0.post0 - # via trio packaging==23.2 - # via black - # via msal-extensions # via nox # via pytest -pandas==2.1.4 - # via openai -pandas-stubs==2.1.4.231227 - # via openai -pathspec==0.12.1 - # via black platformdirs==3.11.0 - # via black # via virtualenv pluggy==1.3.0 # via pytest -portalocker==2.8.2 - # via msal-extensions py==1.11.0 # via pytest -pycparser==2.22 - # via cffi pydantic==2.7.1 # via openai pydantic-core==2.18.2 # via pydantic -pyjwt==2.8.0 - # via msal pyright==1.1.359 pytest==7.1.1 # via pytest-asyncio pytest-asyncio==0.21.1 python-dateutil==2.8.2 - # via pandas # via time-machine pytz==2023.3.post1 # via dirty-equals - # via pandas -requests==2.31.0 - # via azure-core - # via msal respx==0.20.2 ruff==0.1.9 setuptools==68.2.2 # via nodeenv six==1.16.0 - # via asttokens - # via azure-core # via python-dateutil sniffio==1.3.0 # via anyio # via httpx # via openai - # via trio -sortedcontainers==2.4.0 - # via trio time-machine==2.9.0 -toml==0.10.2 - # via inline-snapshot tomli==2.0.1 - # via black # via mypy # via pytest -tqdm==4.66.1 - # via openai -trio==0.22.2 -types-pyaudio==0.2.16.20240106 -types-pytz==2024.1.0.20240417 - # via pandas-stubs -types-toml==0.10.8.20240310 - # via inline-snapshot -types-tqdm==4.66.0.2 typing-extensions==4.8.0 - # via azure-core - # via black # via mypy # via openai # via pydantic # via pydantic-core -tzdata==2024.1 - # via pandas -urllib3==2.2.1 - # via requests virtualenv==20.24.5 # via nox zipp==3.17.0 diff --git a/requirements.lock b/requirements.lock index 47cf8a40e9..027d407e6f 100644 --- a/requirements.lock +++ b/requirements.lock @@ -29,35 +29,15 @@ httpx==0.25.2 idna==3.4 # via anyio # via httpx -numpy==1.26.4 - # via openai - # via pandas - # via pandas-stubs -pandas==2.2.2 - # via openai -pandas-stubs==2.2.1.240316 - # via openai pydantic==2.7.1 # via openai pydantic-core==2.18.2 # via pydantic -python-dateutil==2.9.0.post0 - # via pandas -pytz==2024.1 - # via pandas -six==1.16.0 - # via python-dateutil sniffio==1.3.0 # via anyio # via httpx # via openai -tqdm==4.66.1 - # via openai -types-pytz==2024.1.0.20240417 - # via pandas-stubs typing-extensions==4.8.0 # via openai # via pydantic # via pydantic-core -tzdata==2024.1 - # via pandas diff --git a/src/openai/__init__.py b/src/openai/__init__.py index 0e87ae9259..1ef8f659a6 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -1,10 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from __future__ import annotations - -import os as _os -from typing_extensions import override - from . import types from ._types import NOT_GIVEN, NoneType, NotGiven, Transport, ProxiesTypes from ._utils import file_from_path @@ -72,15 +67,6 @@ "DefaultAsyncHttpxClient", ] -from .lib import azure as _azure -from .version import VERSION as VERSION -from .lib.azure import AzureOpenAI as AzureOpenAI, AsyncAzureOpenAI as AsyncAzureOpenAI -from .lib._old_api import * -from .lib.streaming import ( - AssistantEventHandler as AssistantEventHandler, - AsyncAssistantEventHandler as AsyncAssistantEventHandler, -) - _setup_logging() # Update the __module__ attribute for exported symbols so that @@ -95,263 +81,3 @@ except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass - -# ------ Module level client ------ -import typing as _t -import typing_extensions as _te - -import httpx as _httpx - -from ._base_client import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES - -api_key: str | None = None - -organization: str | None = None - -project: str | None = None - -base_url: str | _httpx.URL | None = None - -timeout: float | Timeout | None = DEFAULT_TIMEOUT - -max_retries: int = DEFAULT_MAX_RETRIES - -default_headers: _t.Mapping[str, str] | None = None - -default_query: _t.Mapping[str, object] | None = None - -http_client: _httpx.Client | None = None - -_ApiType = _te.Literal["openai", "azure"] - -api_type: _ApiType | None = _t.cast(_ApiType, _os.environ.get("OPENAI_API_TYPE")) - -api_version: str | None = _os.environ.get("OPENAI_API_VERSION") - -azure_endpoint: str | None = _os.environ.get("AZURE_OPENAI_ENDPOINT") - -azure_ad_token: str | None = _os.environ.get("AZURE_OPENAI_AD_TOKEN") - -azure_ad_token_provider: _azure.AzureADTokenProvider | None = None - - -class _ModuleClient(OpenAI): - # Note: we have to use type: ignores here as overriding class members - # with properties is technically unsafe but it is fine for our use case - - @property # type: ignore - @override - def api_key(self) -> str | None: - return api_key - - @api_key.setter # type: ignore - def api_key(self, value: str | None) -> None: # type: ignore - global api_key - - api_key = value - - @property # type: ignore - @override - def organization(self) -> str | None: - return organization - - @organization.setter # type: ignore - def organization(self, value: str | None) -> None: # type: ignore - global organization - - organization = value - - @property # type: ignore - @override - def project(self) -> str | None: - return project - - @project.setter # type: ignore - def project(self, value: str | None) -> None: # type: ignore - global project - - project = value - - @property - @override - def base_url(http://github.com/self) -> _httpx.URL: - if base_url is not None: - return _httpx.URL(base_url) - - return super().base_url - - @base_url.setter - def base_url(http://github.com/self,%20url:%20_httpx.URL%20|%20str) -> None: - super().base_url = url # type: ignore[misc] - - @property # type: ignore - @override - def timeout(self) -> float | Timeout | None: - return timeout - - @timeout.setter # type: ignore - def timeout(self, value: float | Timeout | None) -> None: # type: ignore - global timeout - - timeout = value - - @property # type: ignore - @override - def max_retries(self) -> int: - return max_retries - - @max_retries.setter # type: ignore - def max_retries(self, value: int) -> None: # type: ignore - global max_retries - - max_retries = value - - @property # type: ignore - @override - def _custom_headers(self) -> _t.Mapping[str, str] | None: - return default_headers - - @_custom_headers.setter # type: ignore - def _custom_headers(self, value: _t.Mapping[str, str] | None) -> None: # type: ignore - global default_headers - - default_headers = value - - @property # type: ignore - @override - def _custom_query(self) -> _t.Mapping[str, object] | None: - return default_query - - @_custom_query.setter # type: ignore - def _custom_query(self, value: _t.Mapping[str, object] | None) -> None: # type: ignore - global default_query - - default_query = value - - @property # type: ignore - @override - def _client(self) -> _httpx.Client: - return http_client or super()._client - - @_client.setter # type: ignore - def _client(self, value: _httpx.Client) -> None: # type: ignore - global http_client - - http_client = value - - -class _AzureModuleClient(_ModuleClient, AzureOpenAI): # type: ignore - ... - - -class _AmbiguousModuleClientUsageError(OpenAIError): - def __init__(self) -> None: - super().__init__( - "Ambiguous use of module client; please set `openai.api_type` or the `OPENAI_API_TYPE` environment variable to `openai` or `azure`" - ) - - -def _has_openai_credentials() -> bool: - return _os.environ.get("OPENAI_API_KEY") is not None - - -def _has_azure_credentials() -> bool: - return azure_endpoint is not None or _os.environ.get("AZURE_OPENAI_API_KEY") is not None - - -def _has_azure_ad_credentials() -> bool: - return ( - _os.environ.get("AZURE_OPENAI_AD_TOKEN") is not None - or azure_ad_token is not None - or azure_ad_token_provider is not None - ) - - -_client: OpenAI | None = None - - -def _load_client() -> OpenAI: # type: ignore[reportUnusedFunction] - global _client - - if _client is None: - global api_type, azure_endpoint, azure_ad_token, api_version - - if azure_endpoint is None: - azure_endpoint = _os.environ.get("AZURE_OPENAI_ENDPOINT") - - if azure_ad_token is None: - azure_ad_token = _os.environ.get("AZURE_OPENAI_AD_TOKEN") - - if api_version is None: - api_version = _os.environ.get("OPENAI_API_VERSION") - - if api_type is None: - has_openai = _has_openai_credentials() - has_azure = _has_azure_credentials() - has_azure_ad = _has_azure_ad_credentials() - - if has_openai and (has_azure or has_azure_ad): - raise _AmbiguousModuleClientUsageError() - - if (azure_ad_token is not None or azure_ad_token_provider is not None) and _os.environ.get( - "AZURE_OPENAI_API_KEY" - ) is not None: - raise _AmbiguousModuleClientUsageError() - - if has_azure or has_azure_ad: - api_type = "azure" - else: - api_type = "openai" - - if api_type == "azure": - _client = _AzureModuleClient( # type: ignore - api_version=api_version, - azure_endpoint=azure_endpoint, - api_key=api_key, - azure_ad_token=azure_ad_token, - azure_ad_token_provider=azure_ad_token_provider, - organization=organization, - base_url=base_url, - timeout=timeout, - max_retries=max_retries, - default_headers=default_headers, - default_query=default_query, - http_client=http_client, - ) - return _client - - _client = _ModuleClient( - api_key=api_key, - organization=organization, - project=project, - base_url=base_url, - timeout=timeout, - max_retries=max_retries, - default_headers=default_headers, - default_query=default_query, - http_client=http_client, - ) - return _client - - return _client - - -def _reset_client() -> None: # type: ignore[reportUnusedFunction] - global _client - - _client = None - - -from ._module_client import ( - beta as beta, - chat as chat, - audio as audio, - files as files, - images as images, - models as models, - batches as batches, - embeddings as embeddings, - completions as completions, - fine_tuning as fine_tuning, - moderations as moderations, -) diff --git a/src/openai/__main__.py b/src/openai/__main__.py deleted file mode 100644 index 4e28416e10..0000000000 --- a/src/openai/__main__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .cli import main - -main() diff --git a/src/openai/_extras/__init__.py b/src/openai/_extras/__init__.py deleted file mode 100644 index 864dac4171..0000000000 --- a/src/openai/_extras/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .numpy_proxy import numpy as numpy, has_numpy as has_numpy -from .pandas_proxy import pandas as pandas diff --git a/src/openai/_extras/_common.py b/src/openai/_extras/_common.py deleted file mode 100644 index 6e71720e64..0000000000 --- a/src/openai/_extras/_common.py +++ /dev/null @@ -1,21 +0,0 @@ -from .._exceptions import OpenAIError - -INSTRUCTIONS = """ - -OpenAI error: - - missing `{library}` - -This feature requires additional dependencies: - - $ pip install openai[{extra}] - -""" - - -def format_instructions(*, library: str, extra: str) -> str: - return INSTRUCTIONS.format(library=library, extra=extra) - - -class MissingDependencyError(OpenAIError): - pass diff --git a/src/openai/_extras/numpy_proxy.py b/src/openai/_extras/numpy_proxy.py deleted file mode 100644 index 27880bf132..0000000000 --- a/src/openai/_extras/numpy_proxy.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Any -from typing_extensions import override - -from .._utils import LazyProxy -from ._common import MissingDependencyError, format_instructions - -if TYPE_CHECKING: - import numpy as numpy - - -NUMPY_INSTRUCTIONS = format_instructions(library="numpy", extra="datalib") - - -class NumpyProxy(LazyProxy[Any]): - @override - def __load__(self) -> Any: - try: - import numpy - except ImportError as err: - raise MissingDependencyError(NUMPY_INSTRUCTIONS) from err - - return numpy - - -if not TYPE_CHECKING: - numpy = NumpyProxy() - - -def has_numpy() -> bool: - try: - import numpy # noqa: F401 # pyright: ignore[reportUnusedImport] - except ImportError: - return False - - return True diff --git a/src/openai/_extras/pandas_proxy.py b/src/openai/_extras/pandas_proxy.py deleted file mode 100644 index 686377bade..0000000000 --- a/src/openai/_extras/pandas_proxy.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Any -from typing_extensions import override - -from .._utils import LazyProxy -from ._common import MissingDependencyError, format_instructions - -if TYPE_CHECKING: - import pandas as pandas - - -PANDAS_INSTRUCTIONS = format_instructions(library="pandas", extra="datalib") - - -class PandasProxy(LazyProxy[Any]): - @override - def __load__(self) -> Any: - try: - import pandas - except ImportError as err: - raise MissingDependencyError(PANDAS_INSTRUCTIONS) from err - - return pandas - - -if not TYPE_CHECKING: - pandas = PandasProxy() diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py deleted file mode 100644 index 6f7356eb3c..0000000000 --- a/src/openai/_module_client.py +++ /dev/null @@ -1,85 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import override - -from . import resources, _load_client -from ._utils import LazyProxy - - -class ChatProxy(LazyProxy[resources.Chat]): - @override - def __load__(self) -> resources.Chat: - return _load_client().chat - - -class BetaProxy(LazyProxy[resources.Beta]): - @override - def __load__(self) -> resources.Beta: - return _load_client().beta - - -class FilesProxy(LazyProxy[resources.Files]): - @override - def __load__(self) -> resources.Files: - return _load_client().files - - -class AudioProxy(LazyProxy[resources.Audio]): - @override - def __load__(self) -> resources.Audio: - return _load_client().audio - - -class ImagesProxy(LazyProxy[resources.Images]): - @override - def __load__(self) -> resources.Images: - return _load_client().images - - -class ModelsProxy(LazyProxy[resources.Models]): - @override - def __load__(self) -> resources.Models: - return _load_client().models - - -class BatchesProxy(LazyProxy[resources.Batches]): - @override - def __load__(self) -> resources.Batches: - return _load_client().batches - - -class EmbeddingsProxy(LazyProxy[resources.Embeddings]): - @override - def __load__(self) -> resources.Embeddings: - return _load_client().embeddings - - -class CompletionsProxy(LazyProxy[resources.Completions]): - @override - def __load__(self) -> resources.Completions: - return _load_client().completions - - -class ModerationsProxy(LazyProxy[resources.Moderations]): - @override - def __load__(self) -> resources.Moderations: - return _load_client().moderations - - -class FineTuningProxy(LazyProxy[resources.FineTuning]): - @override - def __load__(self) -> resources.FineTuning: - return _load_client().fine_tuning - - -chat: resources.Chat = ChatProxy().__as_proxied__() -beta: resources.Beta = BetaProxy().__as_proxied__() -files: resources.Files = FilesProxy().__as_proxied__() -audio: resources.Audio = AudioProxy().__as_proxied__() -images: resources.Images = ImagesProxy().__as_proxied__() -models: resources.Models = ModelsProxy().__as_proxied__() -batches: resources.Batches = BatchesProxy().__as_proxied__() -embeddings: resources.Embeddings = EmbeddingsProxy().__as_proxied__() -completions: resources.Completions = CompletionsProxy().__as_proxied__() -moderations: resources.Moderations = ModerationsProxy().__as_proxied__() -fine_tuning: resources.FineTuning = FineTuningProxy().__as_proxied__() diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index 0fda992cff..3a5c9571a1 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -77,25 +77,6 @@ def __stream__(self) -> Iterator[_T]: yield process_data(data=data, cast_to=cast_to, response=response) - else: - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) - # Ensure the entire stream is consumed for _sse in iterator: ... @@ -179,25 +160,6 @@ async def __stream__(self) -> AsyncIterator[_T]: yield process_data(data=data, cast_to=cast_to, response=response) - else: - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) - # Ensure the entire stream is consumed async for _sse in iterator: ... diff --git a/src/openai/cli/__init__.py b/src/openai/cli/__init__.py deleted file mode 100644 index d453d5e179..0000000000 --- a/src/openai/cli/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ._cli import main as main diff --git a/src/openai/cli/_api/__init__.py b/src/openai/cli/_api/__init__.py deleted file mode 100644 index 56a0260a6d..0000000000 --- a/src/openai/cli/_api/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ._main import register_commands as register_commands diff --git a/src/openai/cli/_api/_main.py b/src/openai/cli/_api/_main.py deleted file mode 100644 index fe5a5e6fc0..0000000000 --- a/src/openai/cli/_api/_main.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import annotations - -from argparse import ArgumentParser - -from . import chat, audio, files, image, models, completions - - -def register_commands(parser: ArgumentParser) -> None: - subparsers = parser.add_subparsers(help="All API subcommands") - - chat.register(subparsers) - image.register(subparsers) - audio.register(subparsers) - files.register(subparsers) - models.register(subparsers) - completions.register(subparsers) diff --git a/src/openai/cli/_api/audio.py b/src/openai/cli/_api/audio.py deleted file mode 100644 index 90d21b9932..0000000000 --- a/src/openai/cli/_api/audio.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, Optional, cast -from argparse import ArgumentParser - -from .._utils import get_client, print_model -from ..._types import NOT_GIVEN -from .._models import BaseModel -from .._progress import BufferReader - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - # transcriptions - sub = subparser.add_parser("audio.transcriptions.create") - - # Required - sub.add_argument("-m", "--model", type=str, default="whisper-1") - sub.add_argument("-f", "--file", type=str, required=True) - # Optional - sub.add_argument("--response-format", type=str) - sub.add_argument("--language", type=str) - sub.add_argument("-t", "--temperature", type=float) - sub.add_argument("--prompt", type=str) - sub.set_defaults(func=CLIAudio.transcribe, args_model=CLITranscribeArgs) - - # translations - sub = subparser.add_parser("audio.translations.create") - - # Required - sub.add_argument("-f", "--file", type=str, required=True) - # Optional - sub.add_argument("-m", "--model", type=str, default="whisper-1") - sub.add_argument("--response-format", type=str) - # TODO: doesn't seem to be supported by the API - # sub.add_argument("--language", type=str) - sub.add_argument("-t", "--temperature", type=float) - sub.add_argument("--prompt", type=str) - sub.set_defaults(func=CLIAudio.translate, args_model=CLITranslationArgs) - - -class CLITranscribeArgs(BaseModel): - model: str - file: str - response_format: Optional[str] = None - language: Optional[str] = None - temperature: Optional[float] = None - prompt: Optional[str] = None - - -class CLITranslationArgs(BaseModel): - model: str - file: str - response_format: Optional[str] = None - language: Optional[str] = None - temperature: Optional[float] = None - prompt: Optional[str] = None - - -class CLIAudio: - @staticmethod - def transcribe(args: CLITranscribeArgs) -> None: - with open(args.file, "rb") as file_reader: - buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") - - model = get_client().audio.transcriptions.create( - file=(args.file, buffer_reader), - model=args.model, - language=args.language or NOT_GIVEN, - temperature=args.temperature or NOT_GIVEN, - prompt=args.prompt or NOT_GIVEN, - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - response_format=cast(Any, args.response_format), - ) - print_model(model) - - @staticmethod - def translate(args: CLITranslationArgs) -> None: - with open(args.file, "rb") as file_reader: - buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") - - model = get_client().audio.translations.create( - file=(args.file, buffer_reader), - model=args.model, - temperature=args.temperature or NOT_GIVEN, - prompt=args.prompt or NOT_GIVEN, - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - response_format=cast(Any, args.response_format), - ) - print_model(model) diff --git a/src/openai/cli/_api/chat/__init__.py b/src/openai/cli/_api/chat/__init__.py deleted file mode 100644 index 87d971630a..0000000000 --- a/src/openai/cli/_api/chat/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING -from argparse import ArgumentParser - -from . import completions - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - completions.register(subparser) diff --git a/src/openai/cli/_api/chat/completions.py b/src/openai/cli/_api/chat/completions.py deleted file mode 100644 index c299741fe0..0000000000 --- a/src/openai/cli/_api/chat/completions.py +++ /dev/null @@ -1,156 +0,0 @@ -from __future__ import annotations - -import sys -from typing import TYPE_CHECKING, List, Optional, cast -from argparse import ArgumentParser -from typing_extensions import Literal, NamedTuple - -from ..._utils import get_client -from ..._models import BaseModel -from ...._streaming import Stream -from ....types.chat import ( - ChatCompletionRole, - ChatCompletionChunk, - CompletionCreateParams, -) -from ....types.chat.completion_create_params import ( - CompletionCreateParamsStreaming, - CompletionCreateParamsNonStreaming, -) - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - sub = subparser.add_parser("chat.completions.create") - - sub._action_groups.pop() - req = sub.add_argument_group("required arguments") - opt = sub.add_argument_group("optional arguments") - - req.add_argument( - "-g", - "--message", - action="http://github.com/append", - nargs=2, - metavar=("ROLE", "CONTENT"), - help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.", - required=True, - ) - req.add_argument( - "-m", - "--model", - help="The model to use.", - required=True, - ) - - opt.add_argument( - "-n", - "--n", - help="How many completions to generate for the conversation.", - type=int, - ) - opt.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int) - opt.add_argument( - "-t", - "--temperature", - help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. - -Mutually exclusive with `top_p`.""", - type=float, - ) - opt.add_argument( - "-P", - "--top_p", - help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered. - - Mutually exclusive with `temperature`.""", - type=float, - ) - opt.add_argument( - "--stop", - help="A stop sequence at which to stop generating tokens for the message.", - ) - opt.add_argument("--stream", help="Stream messages as they're ready.", action="http://github.com/store_true") - sub.set_defaults(func=CLIChatCompletion.create, args_model=CLIChatCompletionCreateArgs) - - -class CLIMessage(NamedTuple): - role: ChatCompletionRole - content: str - - -class CLIChatCompletionCreateArgs(BaseModel): - message: List[CLIMessage] - model: str - n: Optional[int] = None - max_tokens: Optional[int] = None - temperature: Optional[float] = None - top_p: Optional[float] = None - stop: Optional[str] = None - stream: bool = False - - -class CLIChatCompletion: - @staticmethod - def create(args: CLIChatCompletionCreateArgs) -> None: - params: CompletionCreateParams = { - "model": args.model, - "messages": [ - {"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message - ], - "n": args.n, - "temperature": args.temperature, - "top_p": args.top_p, - "stop": args.stop, - # type checkers are not good at inferring union types so we have to set stream afterwards - "stream": False, - } - if args.stream: - params["stream"] = args.stream # type: ignore - if args.max_tokens is not None: - params["max_tokens"] = args.max_tokens - - if args.stream: - return CLIChatCompletion._stream_create(cast(CompletionCreateParamsStreaming, params)) - - return CLIChatCompletion._create(cast(CompletionCreateParamsNonStreaming, params)) - - @staticmethod - def _create(params: CompletionCreateParamsNonStreaming) -> None: - completion = get_client().chat.completions.create(**params) - should_print_header = len(completion.choices) > 1 - for choice in completion.choices: - if should_print_header: - sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index)) - - content = choice.message.content if choice.message.content is not None else "None" - sys.stdout.write(content) - - if should_print_header or not content.endswith("\n"): - sys.stdout.write("\n") - - sys.stdout.flush() - - @staticmethod - def _stream_create(params: CompletionCreateParamsStreaming) -> None: - # cast is required for mypy - stream = cast( # pyright: ignore[reportUnnecessaryCast] - Stream[ChatCompletionChunk], get_client().chat.completions.create(**params) - ) - for chunk in stream: - should_print_header = len(chunk.choices) > 1 - for choice in chunk.choices: - if should_print_header: - sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index)) - - content = choice.delta.content or "" - sys.stdout.write(content) - - if should_print_header: - sys.stdout.write("\n") - - sys.stdout.flush() - - sys.stdout.write("\n") diff --git a/src/openai/cli/_api/completions.py b/src/openai/cli/_api/completions.py deleted file mode 100644 index cbdb35bf3a..0000000000 --- a/src/openai/cli/_api/completions.py +++ /dev/null @@ -1,173 +0,0 @@ -from __future__ import annotations - -import sys -from typing import TYPE_CHECKING, Optional, cast -from argparse import ArgumentParser -from functools import partial - -from openai.types.completion import Completion - -from .._utils import get_client -from ..._types import NOT_GIVEN, NotGivenOr -from ..._utils import is_given -from .._errors import CLIError -from .._models import BaseModel -from ..._streaming import Stream - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - sub = subparser.add_parser("completions.create") - - # Required - sub.add_argument( - "-m", - "--model", - help="The model to use", - required=True, - ) - - # Optional - sub.add_argument("-p", "--prompt", help="An optional prompt to complete from") - sub.add_argument("--stream", help="Stream tokens as they're ready.", action="http://github.com/store_true") - sub.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate", type=int) - sub.add_argument( - "-t", - "--temperature", - help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. - -Mutually exclusive with `top_p`.""", - type=float, - ) - sub.add_argument( - "-P", - "--top_p", - help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered. - - Mutually exclusive with `temperature`.""", - type=float, - ) - sub.add_argument( - "-n", - "--n", - help="How many sub-completions to generate for each prompt.", - type=int, - ) - sub.add_argument( - "--logprobs", - help="Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.", - type=int, - ) - sub.add_argument( - "--best_of", - help="Generates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.", - type=int, - ) - sub.add_argument( - "--echo", - help="Echo back the prompt in addition to the completion", - action="http://github.com/store_true", - ) - sub.add_argument( - "--frequency_penalty", - help="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", - type=float, - ) - sub.add_argument( - "--presence_penalty", - help="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", - type=float, - ) - sub.add_argument("--suffix", help="The suffix that comes after a completion of inserted text.") - sub.add_argument("--stop", help="A stop sequence at which to stop generating tokens.") - sub.add_argument( - "--user", - help="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.", - ) - # TODO: add support for logit_bias - sub.set_defaults(func=CLICompletions.create, args_model=CLICompletionCreateArgs) - - -class CLICompletionCreateArgs(BaseModel): - model: str - stream: bool = False - - prompt: Optional[str] = None - n: NotGivenOr[int] = NOT_GIVEN - stop: NotGivenOr[str] = NOT_GIVEN - user: NotGivenOr[str] = NOT_GIVEN - echo: NotGivenOr[bool] = NOT_GIVEN - suffix: NotGivenOr[str] = NOT_GIVEN - best_of: NotGivenOr[int] = NOT_GIVEN - top_p: NotGivenOr[float] = NOT_GIVEN - logprobs: NotGivenOr[int] = NOT_GIVEN - max_tokens: NotGivenOr[int] = NOT_GIVEN - temperature: NotGivenOr[float] = NOT_GIVEN - presence_penalty: NotGivenOr[float] = NOT_GIVEN - frequency_penalty: NotGivenOr[float] = NOT_GIVEN - - -class CLICompletions: - @staticmethod - def create(args: CLICompletionCreateArgs) -> None: - if is_given(args.n) and args.n > 1 and args.stream: - raise CLIError("Can't stream completions with n>1 with the current CLI") - - make_request = partial( - get_client().completions.create, - n=args.n, - echo=args.echo, - stop=args.stop, - user=args.user, - model=args.model, - top_p=args.top_p, - prompt=args.prompt, - suffix=args.suffix, - best_of=args.best_of, - logprobs=args.logprobs, - max_tokens=args.max_tokens, - temperature=args.temperature, - presence_penalty=args.presence_penalty, - frequency_penalty=args.frequency_penalty, - ) - - if args.stream: - return CLICompletions._stream_create( - # mypy doesn't understand the `partial` function but pyright does - cast(Stream[Completion], make_request(stream=True)) # pyright: ignore[reportUnnecessaryCast] - ) - - return CLICompletions._create(make_request()) - - @staticmethod - def _create(completion: Completion) -> None: - should_print_header = len(completion.choices) > 1 - for choice in completion.choices: - if should_print_header: - sys.stdout.write("===== Completion {} =====\n".format(choice.index)) - - sys.stdout.write(choice.text) - - if should_print_header or not choice.text.endswith("\n"): - sys.stdout.write("\n") - - sys.stdout.flush() - - @staticmethod - def _stream_create(stream: Stream[Completion]) -> None: - for completion in stream: - should_print_header = len(completion.choices) > 1 - for choice in sorted(completion.choices, key=lambda c: c.index): - if should_print_header: - sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index)) - - sys.stdout.write(choice.text) - - if should_print_header: - sys.stdout.write("\n") - - sys.stdout.flush() - - sys.stdout.write("\n") diff --git a/src/openai/cli/_api/files.py b/src/openai/cli/_api/files.py deleted file mode 100644 index 5f3631b284..0000000000 --- a/src/openai/cli/_api/files.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, cast -from argparse import ArgumentParser - -from .._utils import get_client, print_model -from .._models import BaseModel -from .._progress import BufferReader - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - sub = subparser.add_parser("files.create") - - sub.add_argument( - "-f", - "--file", - required=True, - help="File to upload", - ) - sub.add_argument( - "-p", - "--purpose", - help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)", - required=True, - ) - sub.set_defaults(func=CLIFile.create, args_model=CLIFileCreateArgs) - - sub = subparser.add_parser("files.retrieve") - sub.add_argument("-i", "--id", required=True, help="The files ID") - sub.set_defaults(func=CLIFile.get, args_model=CLIFileCreateArgs) - - sub = subparser.add_parser("files.delete") - sub.add_argument("-i", "--id", required=True, help="The files ID") - sub.set_defaults(func=CLIFile.delete, args_model=CLIFileCreateArgs) - - sub = subparser.add_parser("files.list") - sub.set_defaults(func=CLIFile.list) - - -class CLIFileIDArgs(BaseModel): - id: str - - -class CLIFileCreateArgs(BaseModel): - file: str - purpose: str - - -class CLIFile: - @staticmethod - def create(args: CLIFileCreateArgs) -> None: - with open(args.file, "rb") as file_reader: - buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") - - file = get_client().files.create( - file=(args.file, buffer_reader), - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - purpose=cast(Any, args.purpose), - ) - print_model(file) - - @staticmethod - def get(args: CLIFileIDArgs) -> None: - file = get_client().files.retrieve(file_id=args.id) - print_model(file) - - @staticmethod - def delete(args: CLIFileIDArgs) -> None: - file = get_client().files.delete(file_id=args.id) - print_model(file) - - @staticmethod - def list() -> None: - files = get_client().files.list() - for file in files: - print_model(file) diff --git a/src/openai/cli/_api/image.py b/src/openai/cli/_api/image.py deleted file mode 100644 index 3e2a0a90f1..0000000000 --- a/src/openai/cli/_api/image.py +++ /dev/null @@ -1,139 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, cast -from argparse import ArgumentParser - -from .._utils import get_client, print_model -from ..._types import NOT_GIVEN, NotGiven, NotGivenOr -from .._models import BaseModel -from .._progress import BufferReader - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - sub = subparser.add_parser("images.generate") - sub.add_argument("-m", "--model", type=str) - sub.add_argument("-p", "--prompt", type=str, required=True) - sub.add_argument("-n", "--num-images", type=int, default=1) - sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image") - sub.add_argument("--response-format", type=str, default="url") - sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs) - - sub = subparser.add_parser("images.edit") - sub.add_argument("-m", "--model", type=str) - sub.add_argument("-p", "--prompt", type=str, required=True) - sub.add_argument("-n", "--num-images", type=int, default=1) - sub.add_argument( - "-I", - "--image", - type=str, - required=True, - help="Image to modify. Should be a local path and a PNG encoded image.", - ) - sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image") - sub.add_argument("--response-format", type=str, default="url") - sub.add_argument( - "-M", - "--mask", - type=str, - required=False, - help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.", - ) - sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs) - - sub = subparser.add_parser("images.create_variation") - sub.add_argument("-m", "--model", type=str) - sub.add_argument("-n", "--num-images", type=int, default=1) - sub.add_argument( - "-I", - "--image", - type=str, - required=True, - help="Image to modify. Should be a local path and a PNG encoded image.", - ) - sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image") - sub.add_argument("--response-format", type=str, default="url") - sub.set_defaults(func=CLIImage.create_variation, args_model=CLIImageCreateVariationArgs) - - -class CLIImageCreateArgs(BaseModel): - prompt: str - num_images: int - size: str - response_format: str - model: NotGivenOr[str] = NOT_GIVEN - - -class CLIImageCreateVariationArgs(BaseModel): - image: str - num_images: int - size: str - response_format: str - model: NotGivenOr[str] = NOT_GIVEN - - -class CLIImageEditArgs(BaseModel): - image: str - num_images: int - size: str - response_format: str - prompt: str - mask: NotGivenOr[str] = NOT_GIVEN - model: NotGivenOr[str] = NOT_GIVEN - - -class CLIImage: - @staticmethod - def create(args: CLIImageCreateArgs) -> None: - image = get_client().images.generate( - model=args.model, - prompt=args.prompt, - n=args.num_images, - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - size=cast(Any, args.size), - response_format=cast(Any, args.response_format), - ) - print_model(image) - - @staticmethod - def create_variation(args: CLIImageCreateVariationArgs) -> None: - with open(args.image, "rb") as file_reader: - buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") - - image = get_client().images.create_variation( - model=args.model, - image=("image", buffer_reader), - n=args.num_images, - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - size=cast(Any, args.size), - response_format=cast(Any, args.response_format), - ) - print_model(image) - - @staticmethod - def edit(args: CLIImageEditArgs) -> None: - with open(args.image, "rb") as file_reader: - buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress") - - if isinstance(args.mask, NotGiven): - mask: NotGivenOr[BufferReader] = NOT_GIVEN - else: - with open(args.mask, "rb") as file_reader: - mask = BufferReader(file_reader.read(), desc="Mask progress") - - image = get_client().images.edit( - model=args.model, - prompt=args.prompt, - image=("image", buffer_reader), - n=args.num_images, - mask=("mask", mask) if not isinstance(mask, NotGiven) else mask, - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - size=cast(Any, args.size), - response_format=cast(Any, args.response_format), - ) - print_model(image) diff --git a/src/openai/cli/_api/models.py b/src/openai/cli/_api/models.py deleted file mode 100644 index 017218fa6e..0000000000 --- a/src/openai/cli/_api/models.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING -from argparse import ArgumentParser - -from .._utils import get_client, print_model -from .._models import BaseModel - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - sub = subparser.add_parser("models.list") - sub.set_defaults(func=CLIModels.list) - - sub = subparser.add_parser("models.retrieve") - sub.add_argument("-i", "--id", required=True, help="The model ID") - sub.set_defaults(func=CLIModels.get, args_model=CLIModelIDArgs) - - sub = subparser.add_parser("models.delete") - sub.add_argument("-i", "--id", required=True, help="The model ID") - sub.set_defaults(func=CLIModels.delete, args_model=CLIModelIDArgs) - - -class CLIModelIDArgs(BaseModel): - id: str - - -class CLIModels: - @staticmethod - def get(args: CLIModelIDArgs) -> None: - model = get_client().models.retrieve(model=args.id) - print_model(model) - - @staticmethod - def delete(args: CLIModelIDArgs) -> None: - model = get_client().models.delete(model=args.id) - print_model(model) - - @staticmethod - def list() -> None: - models = get_client().models.list() - for model in models: - print_model(model) diff --git a/src/openai/cli/_cli.py b/src/openai/cli/_cli.py deleted file mode 100644 index 72e5c923bd..0000000000 --- a/src/openai/cli/_cli.py +++ /dev/null @@ -1,234 +0,0 @@ -from __future__ import annotations - -import sys -import logging -import argparse -from typing import Any, List, Type, Optional -from typing_extensions import ClassVar - -import httpx -import pydantic - -import openai - -from . import _tools -from .. import _ApiType, __version__ -from ._api import register_commands -from ._utils import can_use_http2 -from .._types import ProxiesDict -from ._errors import CLIError, display_error -from .._compat import PYDANTIC_V2, ConfigDict, model_parse -from .._models import BaseModel -from .._exceptions import APIError - -logger = logging.getLogger() -formatter = logging.Formatter("[%(asctime)s] %(message)s") -handler = logging.StreamHandler(sys.stderr) -handler.setFormatter(formatter) -logger.addHandler(handler) - - -class Arguments(BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict( - extra="ignore", - ) - else: - - class Config(pydantic.BaseConfig): # type: ignore - extra: Any = pydantic.Extra.ignore # type: ignore - - verbosity: int - version: Optional[str] = None - - api_key: Optional[str] - api_base: Optional[str] - organization: Optional[str] - proxy: Optional[List[str]] - api_type: Optional[_ApiType] = None - api_version: Optional[str] = None - - # azure - azure_endpoint: Optional[str] = None - azure_ad_token: Optional[str] = None - - # internal, set by subparsers to parse their specific args - args_model: Optional[Type[BaseModel]] = None - - # internal, used so that subparsers can forward unknown arguments - unknown_args: List[str] = [] - allow_unknown_args: bool = False - - -def _build_parser() -> argparse.ArgumentParser: - parser = argparse.ArgumentParser(description=None, prog="openai") - parser.add_argument( - "-v", - "--verbose", - action="http://github.com/count", - dest="verbosity", - default=0, - help="Set verbosity.", - ) - parser.add_argument("-b", "--api-base", help="What API base url to use.") - parser.add_argument("-k", "--api-key", help="What API key to use.") - parser.add_argument("-p", "--proxy", nargs="+", help="What proxy to use.") - parser.add_argument( - "-o", - "--organization", - help="Which organization to run as (will use your default organization if not specified)", - ) - parser.add_argument( - "-t", - "--api-type", - type=str, - choices=("openai", "azure"), - help="The backend API to call, must be `openai` or `azure`", - ) - parser.add_argument( - "--api-version", - help="The Azure API version, e.g. 'https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning'", - ) - - # azure - parser.add_argument( - "--azure-endpoint", - help="The Azure endpoint, e.g. 'https://endpoint.openai.azure.com/'", - ) - parser.add_argument( - "--azure-ad-token", - help="A token from Azure Active Directory, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id", - ) - - # prints the package version - parser.add_argument( - "-V", - "--version", - action="http://github.com/version", - version="%(prog)s " + __version__, - ) - - def help() -> None: - parser.print_help() - - parser.set_defaults(func=help) - - subparsers = parser.add_subparsers() - sub_api = subparsers.add_parser("api", help="Direct API calls") - - register_commands(sub_api) - - sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience") - _tools.register_commands(sub_tools, subparsers) - - return parser - - -def main() -> int: - try: - _main() - except (APIError, CLIError, pydantic.ValidationError) as err: - display_error(err) - return 1 - except KeyboardInterrupt: - sys.stderr.write("\n") - return 1 - return 0 - - -def _parse_args(parser: argparse.ArgumentParser) -> tuple[argparse.Namespace, Arguments, list[str]]: - # argparse by default will strip out the `--` but we want to keep it for unknown arguments - if "--" in sys.argv: - idx = sys.argv.index("--") - known_args = sys.argv[1:idx] - unknown_args = sys.argv[idx:] - else: - known_args = sys.argv[1:] - unknown_args = [] - - parsed, remaining_unknown = parser.parse_known_args(known_args) - - # append any remaining unknown arguments from the initial parsing - remaining_unknown.extend(unknown_args) - - args = model_parse(Arguments, vars(parsed)) - if not args.allow_unknown_args: - # we have to parse twice to ensure any unknown arguments - # result in an error if that behaviour is desired - parser.parse_args() - - return parsed, args, remaining_unknown - - -def _main() -> None: - parser = _build_parser() - parsed, args, unknown = _parse_args(parser) - - if args.verbosity != 0: - sys.stderr.write("Warning: --verbosity isn't supported yet\n") - - proxies: ProxiesDict = {} - if args.proxy is not None: - for proxy in args.proxy: - key = "https://" if proxy.startswith("https") else "http://" - if key in proxies: - raise CLIError(f"Multiple {key} proxies given - only the last one would be used") - - proxies[key] = proxy - - http_client = httpx.Client( - proxies=proxies or None, - http2=can_use_http2(), - ) - openai.http_client = http_client - - if args.organization: - openai.organization = args.organization - - if args.api_key: - openai.api_key = args.api_key - - if args.api_base: - openai.base_url = args.api_base - - # azure - if args.api_type is not None: - openai.api_type = args.api_type - - if args.azure_endpoint is not None: - openai.azure_endpoint = args.azure_endpoint - - if args.api_version is not None: - openai.api_version = args.api_version - - if args.azure_ad_token is not None: - openai.azure_ad_token = args.azure_ad_token - - try: - if args.args_model: - parsed.func( - model_parse( - args.args_model, - { - **{ - # we omit None values so that they can be defaulted to `NotGiven` - # and we'll strip it from the API request - key: value - for key, value in vars(parsed).items() - if value is not None - }, - "unknown_args": unknown, - }, - ) - ) - else: - parsed.func() - finally: - try: - http_client.close() - except Exception: - pass - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/src/openai/cli/_errors.py b/src/openai/cli/_errors.py deleted file mode 100644 index 2bf06070d6..0000000000 --- a/src/openai/cli/_errors.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import annotations - -import sys - -import pydantic - -from ._utils import Colors, organization_info -from .._exceptions import APIError, OpenAIError - - -class CLIError(OpenAIError): - ... - - -class SilentCLIError(CLIError): - ... - - -def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None: - if isinstance(err, SilentCLIError): - return - - sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err)) diff --git a/src/openai/cli/_models.py b/src/openai/cli/_models.py deleted file mode 100644 index 5583db2609..0000000000 --- a/src/openai/cli/_models.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Any -from typing_extensions import ClassVar - -import pydantic - -from .. import _models -from .._compat import PYDANTIC_V2, ConfigDict - - -class BaseModel(_models.BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) - else: - - class Config(pydantic.BaseConfig): # type: ignore - extra: Any = pydantic.Extra.ignore # type: ignore - arbitrary_types_allowed: bool = True diff --git a/src/openai/cli/_progress.py b/src/openai/cli/_progress.py deleted file mode 100644 index 8a7f2525de..0000000000 --- a/src/openai/cli/_progress.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -import io -from typing import Callable -from typing_extensions import override - - -class CancelledError(Exception): - def __init__(self, msg: str) -> None: - self.msg = msg - super().__init__(msg) - - @override - def __str__(self) -> str: - return self.msg - - __repr__ = __str__ - - -class BufferReader(io.BytesIO): - def __init__(self, buf: bytes = b"", desc: str | None = None) -> None: - super().__init__(buf) - self._len = len(buf) - self._progress = 0 - self._callback = progress(len(buf), desc=desc) - - def __len__(self) -> int: - return self._len - - @override - def read(self, n: int | None = -1) -> bytes: - chunk = io.BytesIO.read(self, n) - self._progress += len(chunk) - - try: - self._callback(self._progress) - except Exception as e: # catches exception from the callback - raise CancelledError("The upload was cancelled: {}".format(e)) from e - - return chunk - - -def progress(total: float, desc: str | None) -> Callable[[float], None]: - import tqdm - - meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc) - - def incr(progress: float) -> None: - meter.n = progress - if progress == total: - meter.close() - else: - meter.refresh() - - return incr - - -def MB(i: int) -> int: - return int(i // 1024**2) diff --git a/src/openai/cli/_tools/__init__.py b/src/openai/cli/_tools/__init__.py deleted file mode 100644 index 56a0260a6d..0000000000 --- a/src/openai/cli/_tools/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ._main import register_commands as register_commands diff --git a/src/openai/cli/_tools/_main.py b/src/openai/cli/_tools/_main.py deleted file mode 100644 index bd6cda408f..0000000000 --- a/src/openai/cli/_tools/_main.py +++ /dev/null @@ -1,17 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING -from argparse import ArgumentParser - -from . import migrate, fine_tunes - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register_commands(parser: ArgumentParser, subparser: _SubParsersAction[ArgumentParser]) -> None: - migrate.register(subparser) - - namespaced = parser.add_subparsers(title="Tools", help="Convenience client side tools") - - fine_tunes.register(namespaced) diff --git a/src/openai/cli/_tools/fine_tunes.py b/src/openai/cli/_tools/fine_tunes.py deleted file mode 100644 index 2128b88952..0000000000 --- a/src/openai/cli/_tools/fine_tunes.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import annotations - -import sys -from typing import TYPE_CHECKING -from argparse import ArgumentParser - -from .._models import BaseModel -from ...lib._validators import ( - get_validators, - write_out_file, - read_any_format, - apply_validators, - apply_necessary_remediation, -) - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - sub = subparser.add_parser("fine_tunes.prepare_data") - sub.add_argument( - "-f", - "--file", - required=True, - help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing prompt-completion examples to be analyzed." - "This should be the local file path.", - ) - sub.add_argument( - "-q", - "--quiet", - required=False, - action="http://github.com/store_true", - help="Auto accepts all suggestions, without asking for user input. To be used within scripts.", - ) - sub.set_defaults(func=prepare_data, args_model=PrepareDataArgs) - - -class PrepareDataArgs(BaseModel): - file: str - - quiet: bool - - -def prepare_data(args: PrepareDataArgs) -> None: - sys.stdout.write("Analyzing...\n") - fname = args.file - auto_accept = args.quiet - df, remediation = read_any_format(fname) - apply_necessary_remediation(None, remediation) - - validators = get_validators() - - assert df is not None - - apply_validators( - df, - fname, - remediation, - validators, - auto_accept, - write_out_file_func=write_out_file, - ) diff --git a/src/openai/cli/_tools/migrate.py b/src/openai/cli/_tools/migrate.py deleted file mode 100644 index 53073b866f..0000000000 --- a/src/openai/cli/_tools/migrate.py +++ /dev/null @@ -1,181 +0,0 @@ -from __future__ import annotations - -import os -import sys -import json -import shutil -import tarfile -import platform -import subprocess -from typing import TYPE_CHECKING, List -from pathlib import Path -from argparse import ArgumentParser - -import httpx - -from .._errors import CLIError, SilentCLIError -from .._models import BaseModel - -if TYPE_CHECKING: - from argparse import _SubParsersAction - - -def register(subparser: _SubParsersAction[ArgumentParser]) -> None: - sub = subparser.add_parser("migrate") - sub.set_defaults(func=migrate, args_model=MigrateArgs, allow_unknown_args=True) - - sub = subparser.add_parser("grit") - sub.set_defaults(func=grit, args_model=GritArgs, allow_unknown_args=True) - - -class GritArgs(BaseModel): - # internal - unknown_args: List[str] = [] - - -def grit(args: GritArgs) -> None: - grit_path = install() - - try: - subprocess.check_call([grit_path, *args.unknown_args]) - except subprocess.CalledProcessError: - # stdout and stderr are forwarded by subprocess so an error will already - # have been displayed - raise SilentCLIError() from None - - -class MigrateArgs(BaseModel): - # internal - unknown_args: List[str] = [] - - -def migrate(args: MigrateArgs) -> None: - grit_path = install() - - try: - subprocess.check_call([grit_path, "apply", "openai", *args.unknown_args]) - except subprocess.CalledProcessError: - # stdout and stderr are forwarded by subprocess so an error will already - # have been displayed - raise SilentCLIError() from None - - -# handles downloading the Grit CLI until they provide their own PyPi package - -KEYGEN_ACCOUNT = "custodian-dev" - - -def _cache_dir() -> Path: - xdg = os.environ.get("XDG_CACHE_HOME") - if xdg is not None: - return Path(xdg) - - return Path.home() / ".cache" - - -def _debug(message: str) -> None: - if not os.environ.get("DEBUG"): - return - - sys.stdout.write(f"[DEBUG]: {message}\n") - - -def install() -> Path: - """Installs the Grit CLI and returns the location of the binary""" - if sys.platform == "win32": - raise CLIError("Windows is not supported yet in the migration CLI") - - platform = "macos" if sys.platform == "darwin" else "linux" - - dir_name = _cache_dir() / "openai-python" - install_dir = dir_name / ".install" - target_dir = install_dir / "bin" - - target_path = target_dir / "marzano" - temp_file = target_dir / "marzano.tmp" - - if target_path.exists(): - _debug(f"{target_path} already exists") - sys.stdout.flush() - return target_path - - _debug(f"Using Grit CLI path: {target_path}") - - target_dir.mkdir(parents=True, exist_ok=True) - - if temp_file.exists(): - temp_file.unlink() - - arch = _get_arch() - _debug(f"Using architecture {arch}") - - file_name = f"marzano-{platform}-{arch}" - meta_url = f"https://api.keygen.sh/v1/accounts/%7BKEYGEN_ACCOUNT%7D/artifacts/%7Bfile_name%7D" - - sys.stdout.write(f"Retrieving Grit CLI metadata from {meta_url}\n") - with httpx.Client() as client: - response = client.get(meta_url) # pyright: ignore[reportUnknownMemberType] - - data = response.json() - errors = data.get("errors") - if errors: - for error in errors: - sys.stdout.write(f"{error}\n") - - raise CLIError("Could not locate Grit CLI binary - see above errors") - - write_manifest(install_dir, data["data"]["relationships"]["release"]["data"]["id"]) - - link = data["data"]["links"]["redirect"] - _debug(f"Redirect URL {link}") - - download_response = client.get(link) # pyright: ignore[reportUnknownMemberType] - with open(temp_file, "wb") as file: - for chunk in download_response.iter_bytes(): - file.write(chunk) - - unpacked_dir = target_dir / "cli-bin" - unpacked_dir.mkdir(parents=True, exist_ok=True) - - with tarfile.open(temp_file, "r:gz") as archive: - archive.extractall(unpacked_dir, filter="data") - - for item in unpacked_dir.iterdir(): - item.rename(target_dir / item.name) - - shutil.rmtree(unpacked_dir) - os.remove(temp_file) - os.chmod(target_path, 0o755) - - sys.stdout.flush() - - return target_path - - -def _get_arch() -> str: - architecture = platform.machine().lower() - - # Map the architecture names to Node.js equivalents - arch_map = { - "x86_64": "x64", - "amd64": "x64", - "armv7l": "arm", - "aarch64": "arm64", - } - - return arch_map.get(architecture, architecture) - - -def write_manifest(install_path: Path, release: str) -> None: - manifest = { - "installPath": str(install_path), - "binaries": { - "marzano": { - "name": "marzano", - "release": release, - }, - }, - } - manifest_path = Path(install_path) / "manifests.json" - with open(manifest_path, "w") as f: - json.dump(manifest, f, indent=2) diff --git a/src/openai/cli/_utils.py b/src/openai/cli/_utils.py deleted file mode 100644 index 673eed613c..0000000000 --- a/src/openai/cli/_utils.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import annotations - -import sys - -import openai - -from .. import OpenAI, _load_client -from .._compat import model_json -from .._models import BaseModel - - -class Colors: - HEADER = "\033[95m" - OKBLUE = "\033[94m" - OKGREEN = "\033[92m" - WARNING = "\033[93m" - FAIL = "\033[91m" - ENDC = "\033[0m" - BOLD = "\033[1m" - UNDERLINE = "\033[4m" - - -def get_client() -> OpenAI: - return _load_client() - - -def organization_info() -> str: - organization = openai.organization - if organization is not None: - return "[organization={}] ".format(organization) - - return "" - - -def print_model(model: BaseModel) -> None: - sys.stdout.write(model_json(model, indent=2) + "\n") - - -def can_use_http2() -> bool: - try: - import h2 # type: ignore # noqa - except ImportError: - return False - - return True diff --git a/src/openai/lib/.keep b/src/openai/lib/.keep new file mode 100644 index 0000000000..5e2c99fdbe --- /dev/null +++ b/src/openai/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index c37071529c..5083e59d2b 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -2,9 +2,7 @@ from __future__ import annotations -import typing_extensions from typing import Union, Iterable, Optional, overload -from functools import partial from typing_extensions import Literal import httpx @@ -20,7 +18,6 @@ ) from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ....._utils import ( - is_given, required_args, maybe_transform, async_maybe_transform, @@ -34,14 +31,6 @@ AsyncPaginator, make_request_options, ) -from .....lib.streaming import ( - AssistantEventHandler, - AssistantEventHandlerT, - AssistantStreamManager, - AsyncAssistantEventHandler, - AsyncAssistantEventHandlerT, - AsyncAssistantStreamManager, -) from .....types.beta.threads import ( run_list_params, run_create_params, @@ -782,51 +771,14 @@ def cancel( cast_to=Run, ) - def create_and_poll( + @overload + def submit_tool_outputs( self, + run_id: str, *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, thread_id: str, + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -835,159 +787,159 @@ def create_and_poll( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ - A helper to create a run an poll for a terminal state. More information on Run - lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + tool calls once they're all completed. All outputs must be submitted in a single + request. + + Args: + tool_outputs: A list of tools for which the outputs are being submitted. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds """ - run = self.create( - thread_id=thread_id, - assistant_id=assistant_id, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - instructions=instructions, - max_completion_tokens=max_completion_tokens, - max_prompt_tokens=max_prompt_tokens, - metadata=metadata, - model=model, - response_format=response_format, - temperature=temperature, - tool_choice=tool_choice, - # We assume we are not streaming when polling - stream=False, - tools=tools, - truncation_strategy=truncation_strategy, - top_p=top_p, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return self.poll( - run.id, - thread_id=thread_id, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - poll_interval_ms=poll_interval_ms, - timeout=timeout, - ) + ... @overload - @typing_extensions.deprecated("use `stream` instead") - def create_and_stream( + def submit_tool_outputs( self, + run_id: str, *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, thread_id: str, + stream: Literal[True], + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler]: - """Create a Run stream""" + ) -> Stream[AssistantStreamEvent]: + """ + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + tool calls once they're all completed. All outputs must be submitted in a single + request. + + Args: + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + tool_outputs: A list of tools for which the outputs are being submitted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ ... @overload - @typing_extensions.deprecated("use `stream` instead") - def create_and_stream( + def submit_tool_outputs( self, + run_id: str, *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, thread_id: str, - event_handler: AssistantEventHandlerT, + stream: bool, + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandlerT]: - """Create a Run stream""" + ) -> Run | Stream[AssistantStreamEvent]: + """ + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + tool calls once they're all completed. All outputs must be submitted in a single + request. + + Args: + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + tool_outputs: A list of tools for which the outputs are being submitted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ ... - @typing_extensions.deprecated("use `stream` instead") - def create_and_stream( + @required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"]) + def submit_tool_outputs( + self, + run_id: str, + *, + thread_id: str, + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run | Stream[AssistantStreamEvent]: + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", + body=maybe_transform( + { + "tool_outputs": tool_outputs, + "stream": stream, + }, + run_submit_tool_outputs_params.RunSubmitToolOutputsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + stream=stream or False, + stream_cls=Stream[AssistantStreamEvent], + ) + + +class AsyncRuns(AsyncAPIResource): + @cached_property + def steps(self) -> AsyncSteps: + return AsyncSteps(self._client) + + @cached_property + def with_raw_response(self) -> AsyncRunsWithRawResponse: + return AsyncRunsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRunsWithStreamingResponse: + return AsyncRunsWithStreamingResponse(self) + + @overload + async def create( self, + thread_id: str, *, assistant_id: str, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1024,956 +976,19 @@ def create_and_stream( ] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - thread_id: str, - event_handler: AssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: - """Create a Run stream""" - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.runs.create_and_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - make_request = partial( - self._post, - f"/threads/{thread_id}/runs", - body=maybe_transform( - { - "assistant_id": assistant_id, - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "temperature": temperature, - "tool_choice": tool_choice, - "stream": True, - "tools": tools, - "truncation_strategy": truncation_strategy, - "top_p": top_p, - }, - run_create_params.RunCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=True, - stream_cls=Stream[AssistantStreamEvent], - ) - return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler()) - - def poll( - self, - run_id: str, - thread_id: str, - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> Run: - """ - A helper to poll a run status until it reaches a terminal state. More - information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})} - - if is_given(poll_interval_ms): - extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) - - terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"} - while True: - response = self.with_raw_response.retrieve( - thread_id=thread_id, - run_id=run_id, - extra_headers=extra_headers, - extra_body=extra_body, - extra_query=extra_query, - timeout=timeout, - ) - - run = response.parse() - # Return if we reached a terminal state - if run.status in terminal_states: - return run - - if not is_given(poll_interval_ms): - from_header = response.headers.get("openai-poll-after-ms") - if from_header is not None: - poll_interval_ms = int(from_header) - else: - poll_interval_ms = 1000 - - self._sleep(poll_interval_ms / 1000) - - @overload - def stream( - self, - *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler]: - """Create a Run stream""" - ... - - @overload - def stream( - self, - *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - thread_id: str, - event_handler: AssistantEventHandlerT, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandlerT]: - """Create a Run stream""" - ... - - def stream( - self, - *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - thread_id: str, - event_handler: AssistantEventHandlerT | None = None, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: - """Create a Run stream""" - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.runs.create_and_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - make_request = partial( - self._post, - f"/threads/{thread_id}/runs", - body=maybe_transform( - { - "assistant_id": assistant_id, - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "temperature": temperature, - "tool_choice": tool_choice, - "stream": True, - "tools": tools, - "truncation_strategy": truncation_strategy, - "top_p": top_p, - }, - run_create_params.RunCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=True, - stream_cls=Stream[AssistantStreamEvent], - ) - return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler()) - - @overload - def submit_tool_outputs( - self, - run_id: str, - *, - thread_id: str, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - When a run has the `status: "requires_action"` and `required_action.type` is - `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - tool calls once they're all completed. All outputs must be submitted in a single - request. - - Args: - tool_outputs: A list of tools for which the outputs are being submitted. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def submit_tool_outputs( - self, - run_id: str, - *, - thread_id: str, - stream: Literal[True], - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Stream[AssistantStreamEvent]: - """ - When a run has the `status: "requires_action"` and `required_action.type` is - `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - tool calls once they're all completed. All outputs must be submitted in a single - request. - - Args: - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - tool_outputs: A list of tools for which the outputs are being submitted. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def submit_tool_outputs( - self, - run_id: str, - *, - thread_id: str, - stream: bool, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run | Stream[AssistantStreamEvent]: - """ - When a run has the `status: "requires_action"` and `required_action.type` is - `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - tool calls once they're all completed. All outputs must be submitted in a single - request. - - Args: - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - tool_outputs: A list of tools for which the outputs are being submitted. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"]) - def submit_tool_outputs( - self, - run_id: str, - *, - thread_id: str, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run | Stream[AssistantStreamEvent]: - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} - return self._post( - f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", - body=maybe_transform( - { - "tool_outputs": tool_outputs, - "stream": stream, - }, - run_submit_tool_outputs_params.RunSubmitToolOutputsParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=stream or False, - stream_cls=Stream[AssistantStreamEvent], - ) - - def submit_tool_outputs_and_poll( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - A helper to submit a tool output to a run and poll for a terminal run state. - More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - run = self.submit_tool_outputs( - run_id=run_id, - thread_id=thread_id, - tool_outputs=tool_outputs, - stream=False, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return self.poll( - run_id=run.id, - thread_id=thread_id, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - poll_interval_ms=poll_interval_ms, - ) - - @overload - def submit_tool_outputs_stream( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler]: - """ - Submit the tool outputs from a previous run and stream the run to a terminal - state. More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - ... - - @overload - def submit_tool_outputs_stream( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - event_handler: AssistantEventHandlerT, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandlerT]: - """ - Submit the tool outputs from a previous run and stream the run to a terminal - state. More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - ... - - def submit_tool_outputs_stream( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - event_handler: AssistantEventHandlerT | None = None, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: - """ - Submit the tool outputs from a previous run and stream the run to a terminal - state. More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - request = partial( - self._post, - f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", - body=maybe_transform( - { - "tool_outputs": tool_outputs, - "stream": True, - }, - run_submit_tool_outputs_params.RunSubmitToolOutputsParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=True, - stream_cls=Stream[AssistantStreamEvent], - ) - return AssistantStreamManager(request, event_handler=event_handler or AssistantEventHandler()) - - -class AsyncRuns(AsyncAPIResource): - @cached_property - def steps(self) -> AsyncSteps: - return AsyncSteps(self._client) - - @cached_property - def with_raw_response(self) -> AsyncRunsWithRawResponse: - return AsyncRunsWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRunsWithStreamingResponse: - return AsyncRunsWithStreamingResponse(self) - - @overload - async def create( - self, - thread_id: str, - *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - Create a run. - - Args: - assistant_id: The ID of the - [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - execute this run. - - additional_instructions: Appends additional instructions at the end of the instructions for the run. This - is useful for modifying the behavior on a per-run basis without overriding other - instructions. - - additional_messages: Adds additional messages to the thread before creating the run. - - instructions: Overrides the - [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - of the assistant. This is useful for modifying the behavior on a per-run basis. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 - characters long. - - model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - be used to execute this run. If a value is provided here, it will override the - model associated with the assistant. If not, the model associated with the - assistant will be used. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - tools: Override the tools the assistant can use for this run. This is useful for - modifying the behavior on a per-run basis. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - thread_id: str, - *, - assistant_id: str, - stream: Literal[True], - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncStream[AssistantStreamEvent]: - """ - Create a run. - - Args: - assistant_id: The ID of the - [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - execute this run. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - additional_instructions: Appends additional instructions at the end of the instructions for the run. This - is useful for modifying the behavior on a per-run basis without overriding other - instructions. - - additional_messages: Adds additional messages to the thread before creating the run. - - instructions: Overrides the - [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - of the assistant. This is useful for modifying the behavior on a per-run basis. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 - characters long. - - model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - be used to execute this run. If a value is provided here, it will override the - model associated with the assistant. If not, the model associated with the - assistant will be used. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - tools: Override the tools the assistant can use for this run. This is useful for - modifying the behavior on a per-run basis. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - thread_id: str, - *, - assistant_id: str, - stream: bool, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run | AsyncStream[AssistantStreamEvent]: + ) -> Run: """ Create a run. @@ -1982,10 +997,6 @@ async def create( [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -2005,319 +1016,63 @@ async def create( max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 - characters long. - - model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - be used to execute this run. If a value is provided here, it will override the - model associated with the assistant. If not, the model associated with the - assistant will be used. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - tools: Override the tools the assistant can use for this run. This is useful for - modifying the behavior on a per-run basis. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["assistant_id"], ["assistant_id", "stream"]) - async def create( - self, - thread_id: str, - *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run | AsyncStream[AssistantStreamEvent]: - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} - return await self._post( - f"/threads/{thread_id}/runs", - body=await async_maybe_transform( - { - "assistant_id": assistant_id, - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - }, - run_create_params.RunCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=stream or False, - stream_cls=AsyncStream[AssistantStreamEvent], - ) - - async def retrieve( - self, - run_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - Retrieves a run. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} - return await self._get( - f"/threads/{thread_id}/runs/{run_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - ) - - async def update( - self, - run_id: str, - *, - thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - Modifies a run. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 - characters long. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} - return await self._post( - f"/threads/{thread_id}/runs/{run_id}", - body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - ) + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. - def list( - self, - thread_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncPaginator[Run, AsyncCursorPage[Run]]: - """ - Returns a list of runs belonging to a thread. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. + model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. - extra_headers: Send extra headers + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. - extra_query: Add additional query parameters to the request + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. - extra_body: Add additional JSON properties to the request + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} - return self._get_api_list( - f"/threads/{thread_id}/runs", - page=AsyncCursorPage[Run], - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - run_list_params.RunListParams, - ), - ), - model=Run, - ) + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. - async def cancel( - self, - run_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - Cancels a run that is `in_progress`. + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to + control the intial context window of the run. - Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -2326,23 +1081,15 @@ async def cancel( timeout: Override the client-level default timeout for this request, in seconds """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} - return await self._post( - f"/threads/{thread_id}/runs/{run_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - ) + ... - async def create_and_poll( + @overload + async def create( self, + thread_id: str, *, assistant_id: str, + stream: Literal[True], additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2382,115 +1129,114 @@ async def create_and_poll( tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - A helper to create a run an poll for a terminal state. More information on Run - lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps + ) -> AsyncStream[AssistantStreamEvent]: """ - run = await self.create( - thread_id=thread_id, - assistant_id=assistant_id, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - instructions=instructions, - max_completion_tokens=max_completion_tokens, - max_prompt_tokens=max_prompt_tokens, - metadata=metadata, - model=model, - response_format=response_format, - temperature=temperature, - tool_choice=tool_choice, - # We assume we are not streaming when polling - stream=False, - tools=tools, - truncation_strategy=truncation_strategy, - top_p=top_p, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return await self.poll( - run.id, - thread_id=thread_id, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - poll_interval_ms=poll_interval_ms, - timeout=timeout, - ) + Create a run. - @overload - @typing_extensions.deprecated("use `stream` instead") - def create_and_stream( - self, - *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - """Create a Run stream""" + Args: + assistant_id: The ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + execute this run. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + additional_instructions: Appends additional instructions at the end of the instructions for the run. This + is useful for modifying the behavior on a per-run basis without overriding other + instructions. + + additional_messages: Adds additional messages to the thread before creating the run. + + instructions: Overrides the + [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + of the assistant. This is useful for modifying the behavior on a per-run basis. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to + control the intial context window of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ ... @overload - @typing_extensions.deprecated("use `stream` instead") - def create_and_stream( + async def create( self, + thread_id: str, *, assistant_id: str, + stream: bool, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2530,21 +1276,111 @@ def create_and_stream( tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - thread_id: str, - event_handler: AsyncAssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]: - """Create a Run stream""" + ) -> Run | AsyncStream[AssistantStreamEvent]: + """ + Create a run. + + Args: + assistant_id: The ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + execute this run. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + additional_instructions: Appends additional instructions at the end of the instructions for the run. This + is useful for modifying the behavior on a per-run basis without overriding other + instructions. + + additional_messages: Adds additional messages to the thread before creating the run. + + instructions: Overrides the + [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + of the assistant. This is useful for modifying the behavior on a per-run basis. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to + control the intial context window of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ ... - @typing_extensions.deprecated("use `stream` instead") - def create_and_stream( + @required_args(["assistant_id"], ["assistant_id", "stream"]) + async def create( self, + thread_id: str, *, assistant_id: str, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2581,36 +1417,25 @@ def create_and_stream( ] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - thread_id: str, - event_handler: AsyncAssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] - ): - """Create a Run stream""" + ) -> Run | AsyncStream[AssistantStreamEvent]: if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.runs.create_and_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - request = self._post( + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._post( f"/threads/{thread_id}/runs", - body=maybe_transform( + body=await async_maybe_transform( { "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -2621,12 +1446,12 @@ def create_and_stream( "metadata": metadata, "model": model, "response_format": response_format, + "stream": stream, "temperature": temperature, "tool_choice": tool_choice, - "stream": True, "tools": tools, - "truncation_strategy": truncation_strategy, "top_p": top_p, + "truncation_strategy": truncation_strategy, }, run_create_params.RunCreateParams, ), @@ -2634,262 +1459,194 @@ def create_and_stream( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, - stream=True, + stream=stream or False, stream_cls=AsyncStream[AssistantStreamEvent], ) - return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler()) - async def poll( + async def retrieve( self, run_id: str, + *, thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, ) -> Run: """ - A helper to poll a run status until it reaches a terminal state. More - information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})} - - if is_given(poll_interval_ms): - extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) + Retrieves a run. - terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"} - while True: - response = await self.with_raw_response.retrieve( - thread_id=thread_id, - run_id=run_id, - extra_headers=extra_headers, - extra_body=extra_body, - extra_query=extra_query, - timeout=timeout, - ) + Args: + extra_headers: Send extra headers - run = response.parse() - # Return if we reached a terminal state - if run.status in terminal_states: - return run + extra_query: Add additional query parameters to the request - if not is_given(poll_interval_ms): - from_header = response.headers.get("openai-poll-after-ms") - if from_header is not None: - poll_interval_ms = int(from_header) - else: - poll_interval_ms = 1000 + extra_body: Add additional JSON properties to the request - await self._sleep(poll_interval_ms / 1000) + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._get( + f"/threads/{thread_id}/runs/{run_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) - @overload - def stream( + async def update( self, + run_id: str, *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, thread_id: str, + metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - """Create a Run stream""" - ... - - @overload - def stream( - self, - *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Modifies a run. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/runs/{run_id}", + body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + def list( + self, thread_id: str, - event_handler: AsyncAssistantEventHandlerT, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]: - """Create a Run stream""" - ... + ) -> AsyncPaginator[Run, AsyncCursorPage[Run]]: + """ + Returns a list of runs belonging to a thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request - def stream( + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/runs", + page=AsyncCursorPage[Run], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + run_list_params.RunListParams, + ), + ), + model=Run, + ) + + async def cancel( self, + run_id: str, *, - assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, thread_id: str, - event_handler: AsyncAssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] - ): - """Create a Run stream""" + ) -> Run: + """ + Cancels a run that is `in_progress`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.runs.create_and_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - request = self._post( - f"/threads/{thread_id}/runs", - body=maybe_transform( - { - "assistant_id": assistant_id, - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "temperature": temperature, - "tool_choice": tool_choice, - "stream": True, - "tools": tools, - "truncation_strategy": truncation_strategy, - "top_p": top_p, - }, - run_create_params.RunCreateParams, - ), + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/runs/{run_id}/cancel", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, - stream=True, - stream_cls=AsyncStream[AssistantStreamEvent], ) - return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler()) @overload async def submit_tool_outputs( @@ -3042,140 +1799,6 @@ async def submit_tool_outputs( stream_cls=AsyncStream[AssistantStreamEvent], ) - async def submit_tool_outputs_and_poll( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - A helper to submit a tool output to a run and poll for a terminal run state. - More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - run = await self.submit_tool_outputs( - run_id=run_id, - thread_id=thread_id, - tool_outputs=tool_outputs, - stream=False, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return await self.poll( - run_id=run.id, - thread_id=thread_id, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - poll_interval_ms=poll_interval_ms, - ) - - @overload - def submit_tool_outputs_stream( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - """ - Submit the tool outputs from a previous run and stream the run to a terminal - state. More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - ... - - @overload - def submit_tool_outputs_stream( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - event_handler: AsyncAssistantEventHandlerT, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]: - """ - Submit the tool outputs from a previous run and stream the run to a terminal - state. More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - ... - - def submit_tool_outputs_stream( - self, - *, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - run_id: str, - thread_id: str, - event_handler: AsyncAssistantEventHandlerT | None = None, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] - ): - """ - Submit the tool outputs from a previous run and stream the run to a terminal - state. More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - request = self._post( - f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", - body=maybe_transform( - { - "tool_outputs": tool_outputs, - "stream": True, - }, - run_submit_tool_outputs_params.RunSubmitToolOutputsParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=True, - stream_cls=AsyncStream[AssistantStreamEvent], - ) - return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler()) - class RunsWithRawResponse: def __init__(self, runs: Runs) -> None: diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 36cdd03f91..9637f37d0a 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -3,7 +3,6 @@ from __future__ import annotations from typing import Union, Iterable, Optional, overload -from functools import partial from typing_extensions import Literal import httpx @@ -44,14 +43,6 @@ from ...._base_client import ( make_request_options, ) -from ....lib.streaming import ( - AssistantEventHandler, - AssistantEventHandlerT, - AssistantStreamManager, - AsyncAssistantEventHandler, - AsyncAssistantEventHandlerT, - AsyncAssistantStreamManager, -) from ....types.beta.thread import Thread from ....types.beta.threads.run import Run from ....types.beta.thread_deleted import ThreadDeleted @@ -776,284 +767,6 @@ def create_and_run( stream_cls=Stream[AssistantStreamEvent], ) - def create_and_run_poll( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - A helper to create a thread, start a run and then poll for a terminal state. - More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - run = self.create_and_run( - assistant_id=assistant_id, - instructions=instructions, - max_completion_tokens=max_completion_tokens, - max_prompt_tokens=max_prompt_tokens, - metadata=metadata, - model=model, - response_format=response_format, - temperature=temperature, - stream=False, - thread=thread, - tool_resources=tool_resources, - tool_choice=tool_choice, - truncation_strategy=truncation_strategy, - top_p=top_p, - tools=tools, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return self.runs.poll(run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms) - - @overload - def create_and_run_stream( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler]: - """Create a thread and stream the run back""" - ... - - @overload - def create_and_run_stream( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - event_handler: AssistantEventHandlerT, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandlerT]: - """Create a thread and stream the run back""" - ... - - def create_and_run_stream( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - event_handler: AssistantEventHandlerT | None = None, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: - """Create a thread and stream the run back""" - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.create_and_run_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - make_request = partial( - self._post, - "/threads/runs", - body=maybe_transform( - { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "temperature": temperature, - "tool_choice": tool_choice, - "stream": True, - "thread": thread, - "tools": tools, - "tool": tool_resources, - "truncation_strategy": truncation_strategy, - "top_p": top_p, - }, - thread_create_and_run_params.ThreadCreateAndRunParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=True, - stream_cls=Stream[AssistantStreamEvent], - ) - return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler()) - class AsyncThreads(AsyncAPIResource): @cached_property @@ -1769,288 +1482,6 @@ async def create_and_run( stream_cls=AsyncStream[AssistantStreamEvent], ) - async def create_and_run_poll( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Run: - """ - A helper to create a thread, start a run and then poll for a terminal state. - More information on Run lifecycles can be found here: - https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps - """ - run = await self.create_and_run( - assistant_id=assistant_id, - instructions=instructions, - max_completion_tokens=max_completion_tokens, - max_prompt_tokens=max_prompt_tokens, - metadata=metadata, - model=model, - response_format=response_format, - temperature=temperature, - stream=False, - thread=thread, - tool_resources=tool_resources, - tool_choice=tool_choice, - truncation_strategy=truncation_strategy, - top_p=top_p, - tools=tools, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return await self.runs.poll( - run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms - ) - - @overload - def create_and_run_stream( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - """Create a thread and stream the run back""" - ... - - @overload - def create_and_run_stream( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - event_handler: AsyncAssistantEventHandlerT, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]: - """Create a thread and stream the run back""" - ... - - def create_and_run_stream( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - event_handler: AsyncAssistantEventHandlerT | None = None, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] - ): - """Create a thread and stream the run back""" - extra_headers = { - "OpenAI-Beta": "assistants=v2", - "X-Stainless-Stream-Helper": "threads.create_and_run_stream", - "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", - **(extra_headers or {}), - } - request = self._post( - "/threads/runs", - body=maybe_transform( - { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "temperature": temperature, - "tool_choice": tool_choice, - "stream": True, - "thread": thread, - "tools": tools, - "tool": tool_resources, - "truncation_strategy": truncation_strategy, - "top_p": top_p, - }, - thread_create_and_run_params.ThreadCreateAndRunParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Run, - stream=True, - stream_cls=AsyncStream[AssistantStreamEvent], - ) - return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler()) - class ThreadsWithRawResponse: def __init__(self, threads: Threads) -> None: diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index f1ced51700..38a2799383 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -2,19 +2,14 @@ from __future__ import annotations -import asyncio -from typing import List, Iterable +from typing import List from typing_extensions import Literal -from concurrent.futures import Future, ThreadPoolExecutor, as_completed import httpx -import sniffio from .... import _legacy_response -from ....types import FileObject -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ...._utils import ( - is_given, maybe_transform, async_maybe_transform, ) @@ -158,25 +153,6 @@ def cancel( cast_to=VectorStoreFileBatch, ) - def create_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatch: - """Create a vector store batch and poll until all files have been processed.""" - batch = self.create( - vector_store_id=vector_store_id, - file_ids=file_ids, - ) - # TODO: don't poll unless necessary?? - return self.poll( - batch.id, - vector_store_id=vector_store_id, - poll_interval_ms=poll_interval_ms, - ) - def list_files( self, batch_id: str, @@ -251,91 +227,6 @@ def list_files( model=VectorStoreFile, ) - def poll( - self, - batch_id: str, - *, - vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatch: - """Wait for the given file batch to be processed. - - Note: this will return even if one of the files failed to process, you need to - check batch.file_counts.failed_count to handle this case. - """ - headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"} - if is_given(poll_interval_ms): - headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) - - while True: - response = self.with_raw_response.retrieve( - batch_id, - vector_store_id=vector_store_id, - extra_headers=headers, - ) - - batch = response.parse() - if batch.file_counts.in_progress > 0: - if not is_given(poll_interval_ms): - from_header = response.headers.get("openai-poll-after-ms") - if from_header is not None: - poll_interval_ms = int(from_header) - else: - poll_interval_ms = 1000 - - self._sleep(poll_interval_ms / 1000) - continue - - return batch - - def upload_and_poll( - self, - vector_store_id: str, - *, - files: Iterable[FileTypes], - max_concurrency: int = 5, - file_ids: List[str] = [], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatch: - """Uploads the given files concurrently and then creates a vector store file batch. - - If you've already uploaded certain files that you want to include in this batch - then you can pass their IDs through the `file_ids` argument. - - By default, if any file upload fails then an exception will be eagerly raised. - - The number of concurrency uploads is configurable using the `max_concurrency` - parameter. - - Note: this method only supports `asyncio` or `trio` as the backing async - runtime. - """ - results: list[FileObject] = [] - - with ThreadPoolExecutor(max_workers=max_concurrency) as executor: - futures: list[Future[FileObject]] = [ - executor.submit( - self._client.files.create, - file=file, - purpose="assistants", - ) - for file in files - ] - - for future in as_completed(futures): - exc = future.exception() - if exc: - raise exc - - results.append(future.result()) - - batch = self.create_and_poll( - vector_store_id=vector_store_id, - file_ids=[*file_ids, *(f.id for f in results)], - poll_interval_ms=poll_interval_ms, - ) - return batch - class AsyncFileBatches(AsyncAPIResource): @cached_property @@ -462,25 +353,6 @@ async def cancel( cast_to=VectorStoreFileBatch, ) - async def create_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatch: - """Create a vector store batch and poll until all files have been processed.""" - batch = await self.create( - vector_store_id=vector_store_id, - file_ids=file_ids, - ) - # TODO: don't poll unless necessary?? - return await self.poll( - batch.id, - vector_store_id=vector_store_id, - poll_interval_ms=poll_interval_ms, - ) - def list_files( self, batch_id: str, @@ -555,114 +427,6 @@ def list_files( model=VectorStoreFile, ) - async def poll( - self, - batch_id: str, - *, - vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatch: - """Wait for the given file batch to be processed. - - Note: this will return even if one of the files failed to process, you need to - check batch.file_counts.failed_count to handle this case. - """ - headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"} - if is_given(poll_interval_ms): - headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) - - while True: - response = await self.with_raw_response.retrieve( - batch_id, - vector_store_id=vector_store_id, - extra_headers=headers, - ) - - batch = response.parse() - if batch.file_counts.in_progress > 0: - if not is_given(poll_interval_ms): - from_header = response.headers.get("openai-poll-after-ms") - if from_header is not None: - poll_interval_ms = int(from_header) - else: - poll_interval_ms = 1000 - - await self._sleep(poll_interval_ms / 1000) - continue - - return batch - - async def upload_and_poll( - self, - vector_store_id: str, - *, - files: Iterable[FileTypes], - max_concurrency: int = 5, - file_ids: List[str] = [], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatch: - """Uploads the given files concurrently and then creates a vector store file batch. - - If you've already uploaded certain files that you want to include in this batch - then you can pass their IDs through the `file_ids` argument. - - By default, if any file upload fails then an exception will be eagerly raised. - - The number of concurrency uploads is configurable using the `max_concurrency` - parameter. - - Note: this method only supports `asyncio` or `trio` as the backing async - runtime. - """ - uploaded_files: list[FileObject] = [] - - async_library = sniffio.current_async_library() - - if async_library == "asyncio": - - async def asyncio_upload_file(semaphore: asyncio.Semaphore, file: FileTypes) -> None: - async with semaphore: - file_obj = await self._client.files.create( - file=file, - purpose="assistants", - ) - uploaded_files.append(file_obj) - - semaphore = asyncio.Semaphore(max_concurrency) - - tasks = [asyncio_upload_file(semaphore, file) for file in files] - - await asyncio.gather(*tasks) - elif async_library == "trio": - # We only import if the library is being used. - # We support Python 3.7 so are using an older version of trio that does not have type information - import trio # type: ignore # pyright: ignore[reportMissingTypeStubs] - - async def trio_upload_file(limiter: trio.CapacityLimiter, file: FileTypes) -> None: - async with limiter: - file_obj = await self._client.files.create( - file=file, - purpose="assistants", - ) - uploaded_files.append(file_obj) - - limiter = trio.CapacityLimiter(max_concurrency) - - async with trio.open_nursery() as nursery: - for file in files: - nursery.start_soon(trio_upload_file, limiter, file) # pyright: ignore [reportUnknownMemberType] - else: - raise RuntimeError( - f"Async runtime {async_library} is not supported yet. Only asyncio or trio is supported", - ) - - batch = await self.create_and_poll( - vector_store_id=vector_store_id, - file_ids=[*file_ids, *(f.id for f in uploaded_files)], - poll_interval_ms=poll_interval_ms, - ) - return batch - class FileBatchesWithRawResponse: def __init__(self, file_batches: FileBatches) -> None: diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index 5c3db27619..e1c788a1fd 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -2,15 +2,13 @@ from __future__ import annotations -from typing import TYPE_CHECKING -from typing_extensions import Literal, assert_never +from typing_extensions import Literal import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ...._utils import ( - is_given, maybe_transform, async_maybe_transform, ) @@ -229,92 +227,6 @@ def delete( cast_to=VectorStoreFileDeleted, ) - def create_and_poll( - self, - file_id: str, - *, - vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFile: - """Attach a file to the given vector store and wait for it to be processed.""" - self.create(vector_store_id=vector_store_id, file_id=file_id) - - return self.poll( - file_id, - vector_store_id=vector_store_id, - poll_interval_ms=poll_interval_ms, - ) - - def poll( - self, - file_id: str, - *, - vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFile: - """Wait for the vector store file to finish processing. - - Note: this will return even if the file failed to process, you need to check - file.last_error and file.status to handle these cases - """ - headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"} - if is_given(poll_interval_ms): - headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) - - while True: - response = self.with_raw_response.retrieve( - file_id, - vector_store_id=vector_store_id, - extra_headers=headers, - ) - - file = response.parse() - if file.status == "in_progress": - if not is_given(poll_interval_ms): - from_header = response.headers.get("openai-poll-after-ms") - if from_header is not None: - poll_interval_ms = int(from_header) - else: - poll_interval_ms = 1000 - - self._sleep(poll_interval_ms / 1000) - elif file.status == "cancelled" or file.status == "completed" or file.status == "failed": - return file - else: - if TYPE_CHECKING: # type: ignore[unreachable] - assert_never(file.status) - else: - return file - - def upload( - self, - *, - vector_store_id: str, - file: FileTypes, - ) -> VectorStoreFile: - """Upload a file to the `files` API and then attach it to the given vector store. - - Note the file will be asynchronously processed (you can use the alternative - polling helper method to wait for processing to complete). - """ - file_obj = self._client.files.create(file=file, purpose="assistants") - return self.create(vector_store_id=vector_store_id, file_id=file_obj.id) - - def upload_and_poll( - self, - *, - vector_store_id: str, - file: FileTypes, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFile: - """Add a file to a vector store and poll until processing is complete.""" - file_obj = self._client.files.create(file=file, purpose="assistants") - return self.create_and_poll( - vector_store_id=vector_store_id, - file_id=file_obj.id, - poll_interval_ms=poll_interval_ms, - ) - class AsyncFiles(AsyncAPIResource): @cached_property @@ -516,92 +428,6 @@ async def delete( cast_to=VectorStoreFileDeleted, ) - async def create_and_poll( - self, - file_id: str, - *, - vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFile: - """Attach a file to the given vector store and wait for it to be processed.""" - await self.create(vector_store_id=vector_store_id, file_id=file_id) - - return await self.poll( - file_id, - vector_store_id=vector_store_id, - poll_interval_ms=poll_interval_ms, - ) - - async def poll( - self, - file_id: str, - *, - vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFile: - """Wait for the vector store file to finish processing. - - Note: this will return even if the file failed to process, you need to check - file.last_error and file.status to handle these cases - """ - headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"} - if is_given(poll_interval_ms): - headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) - - while True: - response = await self.with_raw_response.retrieve( - file_id, - vector_store_id=vector_store_id, - extra_headers=headers, - ) - - file = response.parse() - if file.status == "in_progress": - if not is_given(poll_interval_ms): - from_header = response.headers.get("openai-poll-after-ms") - if from_header is not None: - poll_interval_ms = int(from_header) - else: - poll_interval_ms = 1000 - - await self._sleep(poll_interval_ms / 1000) - elif file.status == "cancelled" or file.status == "completed" or file.status == "failed": - return file - else: - if TYPE_CHECKING: # type: ignore[unreachable] - assert_never(file.status) - else: - return file - - async def upload( - self, - *, - vector_store_id: str, - file: FileTypes, - ) -> VectorStoreFile: - """Upload a file to the `files` API and then attach it to the given vector store. - - Note the file will be asynchronously processed (you can use the alternative - polling helper method to wait for processing to complete). - """ - file_obj = await self._client.files.create(file=file, purpose="assistants") - return await self.create(vector_store_id=vector_store_id, file_id=file_obj.id) - - async def upload_and_poll( - self, - *, - vector_store_id: str, - file: FileTypes, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - ) -> VectorStoreFile: - """Add a file to a vector store and poll until processing is complete.""" - file_obj = await self._client.files.create(file=file, purpose="assistants") - return await self.create_and_poll( - vector_store_id=vector_store_id, - file_id=file_obj.id, - poll_interval_ms=poll_interval_ms, - ) - class FilesWithRawResponse: def __init__(self, files: Files) -> None: diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 773b6f0968..c2719bfe8b 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -2,8 +2,7 @@ from __future__ import annotations -import base64 -from typing import List, Union, Iterable, cast +from typing import List, Union, Iterable from typing_extensions import Literal import httpx @@ -11,9 +10,11 @@ from .. import _legacy_response from ..types import embedding_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import is_given, maybe_transform +from .._utils import ( + maybe_transform, + async_maybe_transform, +) from .._compat import cached_property -from .._extras import numpy as np, has_numpy from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._base_client import ( @@ -84,42 +85,20 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ - params = { - "input": input, - "model": model, - "user": user, - "dimensions": dimensions, - "encoding_format": encoding_format, - } - if not is_given(encoding_format) and has_numpy(): - params["encoding_format"] = "base64" - - def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: - if is_given(encoding_format): - # don't modify the response object if a user explicitly asked for a format - return obj - - for embedding in obj.data: - data = cast(object, embedding.embedding) - if not isinstance(data, str): - # numpy is not installed / base64 optimisation isn't enabled for this model yet - continue - - embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call] - base64.b64decode(data), dtype="float32" - ).tolist() - - return obj - return self._post( "/embeddings", - body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams), + body=maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + "user": user, + }, + embedding_create_params.EmbeddingCreateParams, + ), options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=parser, + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CreateEmbeddingResponse, ) @@ -185,42 +164,20 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ - params = { - "input": input, - "model": model, - "user": user, - "dimensions": dimensions, - "encoding_format": encoding_format, - } - if not is_given(encoding_format) and has_numpy(): - params["encoding_format"] = "base64" - - def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: - if is_given(encoding_format): - # don't modify the response object if a user explicitly asked for a format - return obj - - for embedding in obj.data: - data = cast(object, embedding.embedding) - if not isinstance(data, str): - # numpy is not installed / base64 optimisation isn't enabled for this model yet - continue - - embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call] - base64.b64decode(data), dtype="float32" - ).tolist() - - return obj - return await self._post( "/embeddings", - body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams), + body=await async_maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + "user": user, + }, + embedding_create_params.EmbeddingCreateParams, + ), options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=parser, + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CreateEmbeddingResponse, ) diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index aed0829dfe..f92e901184 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -2,7 +2,6 @@ from __future__ import annotations -import time import typing_extensions from typing import Mapping, cast from typing_extensions import Literal @@ -292,29 +291,6 @@ def retrieve_content( cast_to=str, ) - def wait_for_processing( - self, - id: str, - *, - poll_interval: float = 5.0, - max_wait_seconds: float = 30 * 60, - ) -> FileObject: - """Waits for the given file to be processed, default timeout is 30 mins.""" - TERMINAL_STATES = {"processed", "error", "deleted"} - - start = time.time() - file = self.retrieve(id) - while file.status not in TERMINAL_STATES: - self._sleep(poll_interval) - - file = self.retrieve(id) - if time.time() - start > max_wait_seconds: - raise RuntimeError( - f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds." - ) - - return file - class AsyncFiles(AsyncAPIResource): @cached_property @@ -569,29 +545,6 @@ async def retrieve_content( cast_to=str, ) - async def wait_for_processing( - self, - id: str, - *, - poll_interval: float = 5.0, - max_wait_seconds: float = 30 * 60, - ) -> FileObject: - """Waits for the given file to be processed, default timeout is 30 mins.""" - TERMINAL_STATES = {"processed", "error", "deleted"} - - start = time.time() - file = await self.retrieve(id) - while file.status not in TERMINAL_STATES: - await self._sleep(poll_interval) - - file = await self.retrieve(id) - if time.time() - start > max_wait_seconds: - raise RuntimeError( - f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds." - ) - - return file - class FilesWithRawResponse: def __init__(self, files: Files) -> None: diff --git a/src/openai/types/beta/chat/__init__.py b/src/openai/types/beta/chat/__init__.py deleted file mode 100644 index f8ee8b14b1..0000000000 --- a/src/openai/types/beta/chat/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/src/openai/version.py b/src/openai/version.py deleted file mode 100644 index 01a08ab5a9..0000000000 --- a/src/openai/version.py +++ /dev/null @@ -1,3 +0,0 @@ -from ._version import __version__ - -VERSION: str = __version__ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index 089dd1253e..bf4eba0689 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -14,8 +14,6 @@ Run, ) -# pyright: reportDeprecated=false - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") diff --git a/tests/lib/test_azure.py b/tests/lib/test_azure.py deleted file mode 100644 index 9360b2925a..0000000000 --- a/tests/lib/test_azure.py +++ /dev/null @@ -1,66 +0,0 @@ -from typing import Union -from typing_extensions import Literal - -import pytest - -from openai._models import FinalRequestOptions -from openai.lib.azure import AzureOpenAI, AsyncAzureOpenAI - -Client = Union[AzureOpenAI, AsyncAzureOpenAI] - - -sync_client = AzureOpenAI( - api_version="2023-07-01", - api_key="example API key", - azure_endpoint="https://example-resource.azure.openai.com/", -) - -async_client = AsyncAzureOpenAI( - api_version="2023-07-01", - api_key="example API key", - azure_endpoint="https://example-resource.azure.openai.com/", -) - - -@pytest.mark.parametrize("client", [sync_client, async_client]) -def test_implicit_deployment_path(client: Client) -> None: - req = client._build_request( - FinalRequestOptions.construct( - method="post", - url="/chat/completions", - json_data={"model": "my-deployment-model"}, - ) - ) - assert ( - req.url - == "https://example-resource.azure.openai.com/openai/deployments/my-deployment-model/chat/completions?api-version=2023-07-01" - ) - - -@pytest.mark.parametrize( - "client,method", - [ - (sync_client, "copy"), - (sync_client, "with_options"), - (async_client, "copy"), - (async_client, "with_options"), - ], -) -def test_client_copying(client: Client, method: Literal["copy", "with_options"]) -> None: - if method == "copy": - copied = client.copy() - else: - copied = client.with_options() - - assert copied._custom_query == {"api-version": "2023-07-01"} - - -@pytest.mark.parametrize( - "client", - [sync_client, async_client], -) -def test_client_copying_override_options(client: Client) -> None: - copied = client.copy( - api_version="2022-05-01", - ) - assert copied._custom_query == {"api-version": "2022-05-01"} diff --git a/tests/lib/test_old_api.py b/tests/lib/test_old_api.py deleted file mode 100644 index 261b8acb94..0000000000 --- a/tests/lib/test_old_api.py +++ /dev/null @@ -1,17 +0,0 @@ -import pytest - -import openai -from openai.lib._old_api import APIRemovedInV1 - - -def test_basic_attribute_access_works() -> None: - for attr in dir(openai): - dir(getattr(openai, attr)) - - -def test_helpful_error_is_raised() -> None: - with pytest.raises(APIRemovedInV1): - openai.Completion.create() # type: ignore - - with pytest.raises(APIRemovedInV1): - openai.ChatCompletion.create() # type: ignore diff --git a/tests/test_module_client.py b/tests/test_module_client.py deleted file mode 100644 index 05b5f81111..0000000000 --- a/tests/test_module_client.py +++ /dev/null @@ -1,183 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os as _os - -import httpx -import pytest -from httpx import URL - -import openai -from openai import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES - - -def reset_state() -> None: - openai._reset_client() - openai.api_key = None or "My API Key" - openai.organization = None - openai.project = None - openai.base_url = None - openai.timeout = DEFAULT_TIMEOUT - openai.max_retries = DEFAULT_MAX_RETRIES - openai.default_headers = None - openai.default_query = None - openai.http_client = None - openai.api_type = _os.environ.get("OPENAI_API_TYPE") # type: ignore - openai.api_version = None - openai.azure_endpoint = None - openai.azure_ad_token = None - openai.azure_ad_token_provider = None - - -@pytest.fixture(autouse=True) -def reset_state_fixture() -> None: - reset_state() - - -def test_base_url_option() -> None: - assert openai.base_url is None - assert openai.completions._client.base_url == URL("https://api.openai.com/v1/") - - openai.base_url = "http://foo.com/" - - assert openai.base_url == URL("http://foo.com/") - assert openai.completions._client.base_url == URL("http://foo.com/") - - -def test_timeout_option() -> None: - assert openai.timeout == openai.DEFAULT_TIMEOUT - assert openai.completions._client.timeout == openai.DEFAULT_TIMEOUT - - openai.timeout = 3 - - assert openai.timeout == 3 - assert openai.completions._client.timeout == 3 - - -def test_max_retries_option() -> None: - assert openai.max_retries == openai.DEFAULT_MAX_RETRIES - assert openai.completions._client.max_retries == openai.DEFAULT_MAX_RETRIES - - openai.max_retries = 1 - - assert openai.max_retries == 1 - assert openai.completions._client.max_retries == 1 - - -def test_default_headers_option() -> None: - assert openai.default_headers == None - - openai.default_headers = {"Foo": "Bar"} - - assert openai.default_headers["Foo"] == "Bar" - assert openai.completions._client.default_headers["Foo"] == "Bar" - - -def test_default_query_option() -> None: - assert openai.default_query is None - assert openai.completions._client._custom_query == {} - - openai.default_query = {"Foo": {"nested": 1}} - - assert openai.default_query["Foo"] == {"nested": 1} - assert openai.completions._client._custom_query["Foo"] == {"nested": 1} - - -def test_http_client_option() -> None: - assert openai.http_client is None - - original_http_client = openai.completions._client._client - assert original_http_client is not None - - new_client = httpx.Client() - openai.http_client = new_client - - assert openai.completions._client._client is new_client - - -import contextlib -from typing import Iterator - -from openai.lib.azure import AzureOpenAI - - -@contextlib.contextmanager -def fresh_env() -> Iterator[None]: - old = _os.environ.copy() - - try: - _os.environ.clear() - yield - finally: - _os.environ.update(old) - - -def test_only_api_key_results_in_openai_api() -> None: - with fresh_env(): - openai.api_type = None - openai.api_key = "example API key" - - assert type(openai.completions._client).__name__ == "_ModuleClient" - - -def test_azure_api_key_env_without_api_version() -> None: - with fresh_env(): - openai.api_type = None - _os.environ["AZURE_OPENAI_API_KEY"] = "example API key" - - with pytest.raises( - ValueError, - match=r"Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable", - ): - openai.completions._client # noqa: B018 - - -def test_azure_api_key_and_version_env() -> None: - with fresh_env(): - openai.api_type = None - _os.environ["AZURE_OPENAI_API_KEY"] = "example API key" - _os.environ["OPENAI_API_VERSION"] = "example-version" - - with pytest.raises( - ValueError, - match=r"Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable", - ): - openai.completions._client # noqa: B018 - - -def test_azure_api_key_version_and_endpoint_env() -> None: - with fresh_env(): - openai.api_type = None - _os.environ["AZURE_OPENAI_API_KEY"] = "example API key" - _os.environ["OPENAI_API_VERSION"] = "example-version" - _os.environ["AZURE_OPENAI_ENDPOINT"] = "https://www.example/" - - openai.completions._client # noqa: B018 - - assert openai.api_type == "azure" - - -def test_azure_azure_ad_token_version_and_endpoint_env() -> None: - with fresh_env(): - openai.api_type = None - _os.environ["AZURE_OPENAI_AD_TOKEN"] = "example AD token" - _os.environ["OPENAI_API_VERSION"] = "example-version" - _os.environ["AZURE_OPENAI_ENDPOINT"] = "https://www.example/" - - client = openai.completions._client - assert isinstance(client, AzureOpenAI) - assert client._azure_ad_token == "example AD token" - - -def test_azure_azure_ad_token_provider_version_and_endpoint_env() -> None: - with fresh_env(): - openai.api_type = None - _os.environ["OPENAI_API_VERSION"] = "example-version" - _os.environ["AZURE_OPENAI_ENDPOINT"] = "https://www.example/" - openai.azure_ad_token_provider = lambda: "token" - - client = openai.completions._client - assert isinstance(client, AzureOpenAI) - assert client._azure_ad_token_provider is not None - assert client._azure_ad_token_provider() == "token" From 61fbc37001259799094b68dc38285451b62d711b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 23 May 2024 10:49:18 +0100 Subject: [PATCH 003/192] chore(ci): update rye install location (#1436) the site is currently down due to DNS issues --- .devcontainer/Dockerfile | 2 +- .github/workflows/ci.yml | 4 ++-- .github/workflows/create-releases.yml | 2 +- .github/workflows/publish-pypi.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index dd93962010..e9841a168d 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} USER vscode -RUN curl -sSf https://rye-up.com/get | RYE_VERSION="0.24.0" RYE_INSTALL_OPTION="--yes" bash +RUN curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | RYE_VERSION="0.24.0" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 76655ed7d6..c084831fa9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ jobs: - name: Install Rye run: | - curl -sSf https://rye-up.com/get | bash + curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 @@ -39,7 +39,7 @@ jobs: - name: Install Rye run: | - curl -sSf https://rye-up.com/get | bash + curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index a641be287b..ddc4de19ef 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -25,7 +25,7 @@ jobs: - name: Install Rye if: ${{ steps.release.outputs.releases_created }} run: | - curl -sSf https://rye-up.com/get | bash + curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 2f88f86407..db855cbbd9 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -14,7 +14,7 @@ jobs: - name: Install Rye run: | - curl -sSf https://rye-up.com/get | bash + curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 From 44cbaae7635e0be297872f5344161f397d14568d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 23 May 2024 20:03:38 +0100 Subject: [PATCH 004/192] chore(ci): update rye install location (#1440) --- .devcontainer/Dockerfile | 2 +- .github/workflows/ci.yml | 4 ++-- .github/workflows/create-releases.yml | 2 +- .github/workflows/publish-pypi.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index e9841a168d..83bca8f716 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} USER vscode -RUN curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | RYE_VERSION="0.24.0" RYE_INSTALL_OPTION="--yes" bash +RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.24.0" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c084831fa9..6fc5b36597 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ jobs: - name: Install Rye run: | - curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash + curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 @@ -39,7 +39,7 @@ jobs: - name: Install Rye run: | - curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash + curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index ddc4de19ef..1ac03ede3f 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -25,7 +25,7 @@ jobs: - name: Install Rye if: ${{ steps.release.outputs.releases_created }} run: | - curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash + curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index db855cbbd9..aae985b27e 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -14,7 +14,7 @@ jobs: - name: Install Rye run: | - curl -sSf https://raw.githubusercontent.com/astral-sh/rye/main/scripts/install.sh | bash + curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.24.0 From 1c91c65a1a6e76eade18de6594d0e5327ffe91c8 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 24 May 2024 09:06:50 +0100 Subject: [PATCH 005/192] chore(internal): bump pyright (#1442) --- requirements-dev.lock | 2 +- src/openai/_utils/_utils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index b6e5d7dc7a..802f827d44 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -63,7 +63,7 @@ pydantic==2.7.1 # via openai pydantic-core==2.18.2 # via pydantic -pyright==1.1.359 +pyright==1.1.364 pytest==7.1.1 # via pytest-asyncio pytest-asyncio==0.21.1 diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py index 17904ce60d..34797c2905 100644 --- a/src/openai/_utils/_utils.py +++ b/src/openai/_utils/_utils.py @@ -20,7 +20,7 @@ import sniffio -from .._types import Headers, NotGiven, FileTypes, NotGivenOr, HeadersLike +from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike from .._compat import parse_date as parse_date, parse_datetime as parse_datetime _T = TypeVar("_T") @@ -370,7 +370,6 @@ def file_from_path(path: str) -> FileTypes: def get_required_header(headers: HeadersLike, header: str) -> str: lower_header = header.lower() if isinstance(headers, Mapping): - headers = cast(Headers, headers) for k, v in headers.items(): if k.lower() == lower_header and isinstance(v, str): return v From b0af4429665835c6ad3645f3a8c45ec539fefab1 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 24 May 2024 15:54:18 +0100 Subject: [PATCH 006/192] docs(contributing): update references to rye-up.com (#1445) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 354d21b2d2..0f1f31488e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ### With Rye -We use [Rye](https://rye-up.com/) to manage dependencies so we highly recommend [installing it](https://rye-up.com/guide/installation/) as it will automatically provision a Python environment with the expected Python version. +We use [Rye](https://rye.astral.sh/) to manage dependencies so we highly recommend [installing it](https://rye.astral.sh/guide/installation/) as it will automatically provision a Python environment with the expected Python version. After installing Rye, you'll just have to run this command: From d0256e0d47f1e9fa2997414b44fe147427a82960 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 28 May 2024 05:22:34 -0400 Subject: [PATCH 007/192] chore: add missing __all__ definitions (#1451) --- src/openai/types/fine_tuning/fine_tuning_job_integration.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/openai/types/fine_tuning/fine_tuning_job_integration.py b/src/openai/types/fine_tuning/fine_tuning_job_integration.py index 8076313cae..4904b85c11 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job_integration.py +++ b/src/openai/types/fine_tuning/fine_tuning_job_integration.py @@ -4,4 +4,6 @@ from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject +__all__ = ["FineTuningJobIntegration"] + FineTuningJobIntegration = FineTuningJobWandbIntegrationObject From 7dcabfc410cd883a5c0f33364cc26a4308211814 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 28 May 2024 19:57:11 +0100 Subject: [PATCH 008/192] chore(internal): update bootstrap script (#1453) --- scripts/bootstrap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/bootstrap b/scripts/bootstrap index 29df07e77b..8c5c60eba3 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -16,4 +16,4 @@ echo "==> Installing Python dependencies…" # experimental uv support makes installations significantly faster rye config --set-bool behavior.use-uv=true -rye sync +rye sync --all-features From 6086210c7adf304e7f5d7ea0012d3230d11d1b08 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:55:27 +0100 Subject: [PATCH 009/192] feat(api): updates (#1461) --- .stats.yml | 2 +- src/openai/resources/batches.py | 18 ++++--- .../beta/vector_stores/file_batches.py | 24 ++++++++- .../resources/beta/vector_stores/files.py | 24 ++++++++- .../beta/vector_stores/vector_stores.py | 10 ++++ src/openai/resources/files.py | 24 ++++++--- src/openai/resources/fine_tuning/jobs/jobs.py | 10 ++++ src/openai/types/batch_create_params.py | 2 +- .../types/beta/assistant_create_params.py | 42 ++++++++++++++++ .../types/beta/assistant_stream_event.py | 12 +++++ src/openai/types/beta/file_search_tool.py | 20 +++++++- .../types/beta/file_search_tool_param.py | 19 ++++++- .../beta/thread_create_and_run_params.py | 43 ++++++++++++++++ src/openai/types/beta/thread_create_params.py | 42 ++++++++++++++++ .../types/beta/vector_store_create_params.py | 48 +++++++++++++++++- .../vector_stores/file_batch_create_params.py | 50 +++++++++++++++++-- .../beta/vector_stores/file_create_params.py | 49 +++++++++++++++++- .../beta/vector_stores/vector_store_file.py | 47 +++++++++++++++-- ...chat_completion_assistant_message_param.py | 2 +- .../types/chat/completion_create_params.py | 5 +- src/openai/types/file_create_params.py | 2 +- .../types/fine_tuning/job_create_params.py | 5 ++ .../types/shared/function_definition.py | 5 +- .../shared_params/function_definition.py | 5 +- tests/api_resources/beta/test_assistants.py | 2 + tests/api_resources/beta/test_threads.py | 6 +++ .../api_resources/beta/test_vector_stores.py | 2 + .../beta/vector_stores/test_file_batches.py | 18 +++++++ .../beta/vector_stores/test_files.py | 18 +++++++ 29 files changed, 515 insertions(+), 41 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2e5c705a0d..11d2b0b181 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index db4c4da235..7152fac622 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -68,7 +68,7 @@ def create( for how to upload a file. Your input file must be formatted as a - [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), + [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @@ -195,8 +195,11 @@ def cancel( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Batch: - """ - Cancels an in-progress batch. + """Cancels an in-progress batch. + + The batch will be in status `cancelling` for up to + 10 minutes, before changing to `cancelled`, where it will have partial results + (if any) available in the output file. Args: extra_headers: Send extra headers @@ -259,7 +262,7 @@ async def create( for how to upload a file. Your input file must be formatted as a - [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), + [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @@ -386,8 +389,11 @@ async def cancel( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Batch: - """ - Cancels an in-progress batch. + """Cancels an in-progress batch. + + The batch will be in status `cancelling` for up to + 10 minutes, before changing to `cancelled`, where it will have partial results + (if any) available in the output file. Args: extra_headers: Send extra headers diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index 38a2799383..35772c4f9b 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -42,6 +42,7 @@ def create( vector_store_id: str, *, file_ids: List[str], + chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -57,6 +58,9 @@ def create( the vector store should use. Useful for tools like `file_search` that can access files. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -70,7 +74,13 @@ def create( extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return self._post( f"/vector_stores/{vector_store_id}/file_batches", - body=maybe_transform({"file_ids": file_ids}, file_batch_create_params.FileBatchCreateParams), + body=maybe_transform( + { + "file_ids": file_ids, + "chunking_strategy": chunking_strategy, + }, + file_batch_create_params.FileBatchCreateParams, + ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -242,6 +252,7 @@ async def create( vector_store_id: str, *, file_ids: List[str], + chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -257,6 +268,9 @@ async def create( the vector store should use. Useful for tools like `file_search` that can access files. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -270,7 +284,13 @@ async def create( extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._post( f"/vector_stores/{vector_store_id}/file_batches", - body=await async_maybe_transform({"file_ids": file_ids}, file_batch_create_params.FileBatchCreateParams), + body=await async_maybe_transform( + { + "file_ids": file_ids, + "chunking_strategy": chunking_strategy, + }, + file_batch_create_params.FileBatchCreateParams, + ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index e1c788a1fd..c1097baa72 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -41,6 +41,7 @@ def create( vector_store_id: str, *, file_id: str, + chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -58,6 +59,9 @@ def create( vector store should use. Useful for tools like `file_search` that can access files. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -71,7 +75,13 @@ def create( extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return self._post( f"/vector_stores/{vector_store_id}/files", - body=maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams), + body=maybe_transform( + { + "file_id": file_id, + "chunking_strategy": chunking_strategy, + }, + file_create_params.FileCreateParams, + ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -242,6 +252,7 @@ async def create( vector_store_id: str, *, file_id: str, + chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -259,6 +270,9 @@ async def create( vector store should use. Useful for tools like `file_search` that can access files. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -272,7 +286,13 @@ async def create( extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._post( f"/vector_stores/{vector_store_id}/files", - body=await async_maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams), + body=await async_maybe_transform( + { + "file_id": file_id, + "chunking_strategy": chunking_strategy, + }, + file_create_params.FileCreateParams, + ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 8a177c2864..cbd56a0693 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -64,6 +64,7 @@ def with_streaming_response(self) -> VectorStoresWithStreamingResponse: def create( self, *, + chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, @@ -79,6 +80,9 @@ def create( Create a vector store. Args: + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. Only applicable if `file_ids` is non-empty. + expires_after: The expiration policy for a vector store. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -105,6 +109,7 @@ def create( "/vector_stores", body=maybe_transform( { + "chunking_strategy": chunking_strategy, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, @@ -326,6 +331,7 @@ def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse: async def create( self, *, + chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, @@ -341,6 +347,9 @@ async def create( Create a vector store. Args: + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. Only applicable if `file_ids` is non-empty. + expires_after: The expiration policy for a vector store. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -367,6 +376,7 @@ async def create( "/vector_stores", body=await async_maybe_transform( { + "chunking_strategy": chunking_strategy, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index f92e901184..cf41ae6ae2 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -51,7 +51,7 @@ def create( self, *, file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune"], + purpose: Literal["assistants", "batch", "fine-tune", "vision"], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -70,9 +70,15 @@ def create( [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for details. - The Fine-tuning API only supports `.jsonl` files. + The Fine-tuning API only supports `.jsonl` files. The input also has certain + required formats for fine-tuning + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + models. - The Batch API only supports `.jsonl` files up to 100 MB in size. + The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + has a specific required + [format](https://platform.openai.com/docs/api-reference/batch/request-input). Please [contact us](https://help.openai.com/) if you need to increase these storage limits. @@ -305,7 +311,7 @@ async def create( self, *, file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune"], + purpose: Literal["assistants", "batch", "fine-tune", "vision"], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -324,9 +330,15 @@ async def create( [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for details. - The Fine-tuning API only supports `.jsonl` files. + The Fine-tuning API only supports `.jsonl` files. The input also has certain + required formats for fine-tuning + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + models. - The Batch API only supports `.jsonl` files up to 100 MB in size. + The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + has a specific required + [format](https://platform.openai.com/docs/api-reference/batch/request-input). Please [contact us](https://help.openai.com/) if you need to increase these storage limits. diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index f38956e6be..14b384a88d 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -87,6 +87,11 @@ def create( Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + The contents of the file should differ depending on if the model uses the + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format. + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @@ -362,6 +367,11 @@ async def create( Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + The contents of the file should differ depending on if the model uses the + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format. + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index 140380d417..55517d285b 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -30,7 +30,7 @@ class BatchCreateParams(TypedDict, total=False): for how to upload a file. Your input file must be formatted as a - [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), + [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. """ diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 67e7f7e78c..c9b0317831 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -14,6 +14,10 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", + "ToolResourcesFileSearchVectorStoreChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -134,7 +138,45 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ +class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): + static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ToolResourcesFileSearchVectorStoreChunkingStrategy = Union[ + ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic +] + + class ToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to diff --git a/src/openai/types/beta/assistant_stream_event.py b/src/openai/types/beta/assistant_stream_event.py index 91925e93b3..de66888403 100644 --- a/src/openai/types/beta/assistant_stream_event.py +++ b/src/openai/types/beta/assistant_stream_event.py @@ -21,6 +21,7 @@ "ThreadRunInProgress", "ThreadRunRequiresAction", "ThreadRunCompleted", + "ThreadRunIncomplete", "ThreadRunFailed", "ThreadRunCancelling", "ThreadRunCancelled", @@ -101,6 +102,16 @@ class ThreadRunCompleted(BaseModel): event: Literal["thread.run.completed"] +class ThreadRunIncomplete(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.incomplete"] + + class ThreadRunFailed(BaseModel): data: Run """ @@ -257,6 +268,7 @@ class ErrorEvent(BaseModel): ThreadRunInProgress, ThreadRunRequiresAction, ThreadRunCompleted, + ThreadRunIncomplete, ThreadRunFailed, ThreadRunCancelling, ThreadRunCancelled, diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py index eea55ea6ac..e2711b9b3d 100644 --- a/src/openai/types/beta/file_search_tool.py +++ b/src/openai/types/beta/file_search_tool.py @@ -1,12 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["FileSearchTool"] +__all__ = ["FileSearchTool", "FileSearch"] + + +class FileSearch(BaseModel): + max_num_results: Optional[int] = None + """The maximum number of results the file search tool should output. + + The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should + be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + for more information. + """ class FileSearchTool(BaseModel): type: Literal["file_search"] """The type of tool being defined: `file_search`""" + + file_search: Optional[FileSearch] = None + """Overrides for the file search tool.""" diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py index d33fd06da4..115f86a444 100644 --- a/src/openai/types/beta/file_search_tool_param.py +++ b/src/openai/types/beta/file_search_tool_param.py @@ -4,9 +4,26 @@ from typing_extensions import Literal, Required, TypedDict -__all__ = ["FileSearchToolParam"] +__all__ = ["FileSearchToolParam", "FileSearch"] + + +class FileSearch(TypedDict, total=False): + max_num_results: int + """The maximum number of results the file search tool should output. + + The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should + be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + for more information. + """ class FileSearchToolParam(TypedDict, total=False): type: Required[Literal["file_search"]] """The type of tool being defined: `file_search`""" + + file_search: FileSearch + """Overrides for the file search tool.""" diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 6efe6e7aee..436c2daddf 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -22,6 +22,10 @@ "ThreadToolResourcesCodeInterpreter", "ThreadToolResourcesFileSearch", "ThreadToolResourcesFileSearchVectorStore", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategy", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", @@ -220,7 +224,46 @@ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): """ +class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): + static: Required[ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ThreadToolResourcesFileSearchVectorStoreChunkingStrategy = Union[ + ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto, + ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic, +] + + class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: ThreadToolResourcesFileSearchVectorStoreChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index ccf50d58dc..5072ed12d9 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -18,6 +18,10 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", + "ToolResourcesFileSearchVectorStoreChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -90,7 +94,45 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ +class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): + static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ToolResourcesFileSearchVectorStoreChunkingStrategy = Union[ + ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic +] + + class ToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py index f1a3abcbdf..365d9923b8 100644 --- a/src/openai/types/beta/vector_store_create_params.py +++ b/src/openai/types/beta/vector_store_create_params.py @@ -2,13 +2,27 @@ from __future__ import annotations -from typing import List, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["VectorStoreCreateParams", "ExpiresAfter"] +__all__ = [ + "VectorStoreCreateParams", + "ChunkingStrategy", + "ChunkingStrategyAuto", + "ChunkingStrategyStatic", + "ChunkingStrategyStaticStatic", + "ExpiresAfter", +] class VectorStoreCreateParams(TypedDict, total=False): + chunking_strategy: ChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. + """ + expires_after: ExpiresAfter """The expiration policy for a vector store.""" @@ -31,6 +45,36 @@ class VectorStoreCreateParams(TypedDict, total=False): """The name of the vector store.""" +class ChunkingStrategyAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ChunkingStrategyStaticStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ChunkingStrategyStatic(TypedDict, total=False): + static: Required[ChunkingStrategyStaticStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ChunkingStrategy = Union[ChunkingStrategyAuto, ChunkingStrategyStatic] + + class ExpiresAfter(TypedDict, total=False): anchor: Required[Literal["last_active_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/src/openai/types/beta/vector_stores/file_batch_create_params.py b/src/openai/types/beta/vector_stores/file_batch_create_params.py index 0882829732..9b98d0699e 100644 --- a/src/openai/types/beta/vector_stores/file_batch_create_params.py +++ b/src/openai/types/beta/vector_stores/file_batch_create_params.py @@ -2,10 +2,16 @@ from __future__ import annotations -from typing import List -from typing_extensions import Required, TypedDict +from typing import List, Union +from typing_extensions import Literal, Required, TypedDict -__all__ = ["FileBatchCreateParams"] +__all__ = [ + "FileBatchCreateParams", + "ChunkingStrategy", + "ChunkingStrategyAutoChunkingStrategyRequestParam", + "ChunkingStrategyStaticChunkingStrategyRequestParam", + "ChunkingStrategyStaticChunkingStrategyRequestParamStatic", +] class FileBatchCreateParams(TypedDict, total=False): @@ -15,3 +21,41 @@ class FileBatchCreateParams(TypedDict, total=False): the vector store should use. Useful for tools like `file_search` that can access files. """ + + chunking_strategy: ChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + + +class ChunkingStrategyAutoChunkingStrategyRequestParam(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ChunkingStrategyStaticChunkingStrategyRequestParamStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False): + static: Required[ChunkingStrategyStaticChunkingStrategyRequestParamStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ChunkingStrategy = Union[ + ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam +] diff --git a/src/openai/types/beta/vector_stores/file_create_params.py b/src/openai/types/beta/vector_stores/file_create_params.py index 2fee588abf..2ae63f1462 100644 --- a/src/openai/types/beta/vector_stores/file_create_params.py +++ b/src/openai/types/beta/vector_stores/file_create_params.py @@ -2,9 +2,16 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing import Union +from typing_extensions import Literal, Required, TypedDict -__all__ = ["FileCreateParams"] +__all__ = [ + "FileCreateParams", + "ChunkingStrategy", + "ChunkingStrategyAutoChunkingStrategyRequestParam", + "ChunkingStrategyStaticChunkingStrategyRequestParam", + "ChunkingStrategyStaticChunkingStrategyRequestParamStatic", +] class FileCreateParams(TypedDict, total=False): @@ -14,3 +21,41 @@ class FileCreateParams(TypedDict, total=False): vector store should use. Useful for tools like `file_search` that can access files. """ + + chunking_strategy: ChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + + +class ChunkingStrategyAutoChunkingStrategyRequestParam(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ChunkingStrategyStaticChunkingStrategyRequestParamStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False): + static: Required[ChunkingStrategyStaticChunkingStrategyRequestParamStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ChunkingStrategy = Union[ + ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam +] diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/beta/vector_stores/vector_store_file.py index 3fab489602..d9d7625f86 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file.py +++ b/src/openai/types/beta/vector_stores/vector_store_file.py @@ -1,11 +1,19 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal +from typing import Union, Optional +from typing_extensions import Literal, Annotated +from ...._utils import PropertyInfo from ...._models import BaseModel -__all__ = ["VectorStoreFile", "LastError"] +__all__ = [ + "VectorStoreFile", + "LastError", + "ChunkingStrategy", + "ChunkingStrategyStatic", + "ChunkingStrategyStaticStatic", + "ChunkingStrategyOther", +] class LastError(BaseModel): @@ -16,6 +24,36 @@ class LastError(BaseModel): """A human-readable description of the error.""" +class ChunkingStrategyStaticStatic(BaseModel): + chunk_overlap_tokens: int + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: int + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ChunkingStrategyStatic(BaseModel): + static: ChunkingStrategyStaticStatic + + type: Literal["static"] + """Always `static`.""" + + +class ChunkingStrategyOther(BaseModel): + type: Literal["other"] + """Always `other`.""" + + +ChunkingStrategy = Annotated[Union[ChunkingStrategyStatic, ChunkingStrategyOther], PropertyInfo(discriminator="type")] + + class VectorStoreFile(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" @@ -52,3 +90,6 @@ class VectorStoreFile(BaseModel): that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. """ + + chunking_strategy: Optional[ChunkingStrategy] = None + """The strategy used to chunk the file.""" diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index e1e399486e..8f7357b96c 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -33,7 +33,7 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): Required unless `tool_calls` or `function_call` is specified. """ - function_call: FunctionCall + function_call: Optional[FunctionCall] """Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 226cf15882..a25f2fdd8f 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -219,9 +219,8 @@ class Function(TypedDict, total=False): parameters: shared_params.FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. - See the - [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - for examples, and the + See the [guide](https://platform.openai.com/docs/guides/function-calling) for + examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. diff --git a/src/openai/types/file_create_params.py b/src/openai/types/file_create_params.py index caa913d4d2..8b1c296f39 100644 --- a/src/openai/types/file_create_params.py +++ b/src/openai/types/file_create_params.py @@ -13,7 +13,7 @@ class FileCreateParams(TypedDict, total=False): file: Required[FileTypes] """The File object (not file name) to be uploaded.""" - purpose: Required[Literal["assistants", "batch", "fine-tune"]] + purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] """The intended purpose of the uploaded file. Use "assistants" for diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index 1925f90d12..c5196e4406 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -25,6 +25,11 @@ class JobCreateParams(TypedDict, total=False): Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + The contents of the file should differ depending on if the model uses the + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format. + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. """ diff --git a/src/openai/types/shared/function_definition.py b/src/openai/types/shared/function_definition.py index a39116d6bd..49f5e67c50 100644 --- a/src/openai/types/shared/function_definition.py +++ b/src/openai/types/shared/function_definition.py @@ -25,9 +25,8 @@ class FunctionDefinition(BaseModel): parameters: Optional[FunctionParameters] = None """The parameters the functions accepts, described as a JSON Schema object. - See the - [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - for examples, and the + See the [guide](https://platform.openai.com/docs/guides/function-calling) for + examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py index 58d0203b4f..29ccc548d4 100644 --- a/src/openai/types/shared_params/function_definition.py +++ b/src/openai/types/shared_params/function_definition.py @@ -26,9 +26,8 @@ class FunctionDefinition(TypedDict, total=False): parameters: shared_params.FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. - See the - [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - for examples, and the + See the [guide](https://platform.openai.com/docs/guides/function-calling) for + examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index a92acb2ca5..dd0ce9266e 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -45,6 +45,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], @@ -276,6 +277,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 02c6e2586e..041562cb38 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -132,6 +132,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], @@ -408,6 +409,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], @@ -576,6 +578,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], @@ -737,6 +740,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], @@ -1013,6 +1017,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], @@ -1181,6 +1186,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "vector_stores": [ { "file_ids": ["string", "string", "string"], + "chunking_strategy": {"type": "auto"}, "metadata": {}, } ], diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py index e671c96a45..39fdb9d1d4 100644 --- a/tests/api_resources/beta/test_vector_stores.py +++ b/tests/api_resources/beta/test_vector_stores.py @@ -29,6 +29,7 @@ def test_method_create(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: vector_store = client.beta.vector_stores.create( + chunking_strategy={"type": "auto"}, expires_after={ "anchor": "last_active_at", "days": 1, @@ -233,6 +234,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: vector_store = await async_client.beta.vector_stores.create( + chunking_strategy={"type": "auto"}, expires_after={ "anchor": "last_active_at", "days": 1, diff --git a/tests/api_resources/beta/vector_stores/test_file_batches.py b/tests/api_resources/beta/vector_stores/test_file_batches.py index 9854d1a138..631f2669ad 100644 --- a/tests/api_resources/beta/vector_stores/test_file_batches.py +++ b/tests/api_resources/beta/vector_stores/test_file_batches.py @@ -29,6 +29,15 @@ def test_method_create(self, client: OpenAI) -> None: ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + file_batch = client.beta.vector_stores.file_batches.create( + "vs_abc123", + file_ids=["string"], + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) + @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.vector_stores.file_batches.with_raw_response.create( @@ -232,6 +241,15 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + file_batch = await async_client.beta.vector_stores.file_batches.create( + "vs_abc123", + file_ids=["string"], + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) + @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.file_batches.with_raw_response.create( diff --git a/tests/api_resources/beta/vector_stores/test_files.py b/tests/api_resources/beta/vector_stores/test_files.py index 58301e2d37..36622e699b 100644 --- a/tests/api_resources/beta/vector_stores/test_files.py +++ b/tests/api_resources/beta/vector_stores/test_files.py @@ -29,6 +29,15 @@ def test_method_create(self, client: OpenAI) -> None: ) assert_matches_type(VectorStoreFile, file, path=["response"]) + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + file = client.beta.vector_stores.files.create( + "vs_abc123", + file_id="string", + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.vector_stores.files.with_raw_response.create( @@ -221,6 +230,15 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: ) assert_matches_type(VectorStoreFile, file, path=["response"]) + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + file = await async_client.beta.vector_stores.files.create( + "vs_abc123", + file_id="string", + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.files.with_raw_response.create( From f387b1b066998518a8c8268932e1fe5cce5d940d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 5 Jun 2024 05:33:56 -0400 Subject: [PATCH 010/192] chore(internal): minor change to tests (#1466) --- tests/api_resources/audio/test_speech.py | 16 ++++++------ tests/api_resources/test_completions.py | 32 ++++++++++++------------ 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py index 781ebeceb9..1f04a66435 100644 --- a/tests/api_resources/audio/test_speech.py +++ b/tests/api_resources/audio/test_speech.py @@ -27,7 +27,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", - model="string", + model="tts-1", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", - model="string", + model="tts-1", voice="alloy", response_format="mp3", speed=0.25, @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No response = client.audio.speech.with_raw_response.create( input="string", - model="string", + model="tts-1", voice="alloy", ) @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.audio.speech.with_streaming_response.create( input="string", - model="string", + model="tts-1", voice="alloy", ) as response: assert not response.is_closed @@ -90,7 +90,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", - model="string", + model="tts-1", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) @@ -102,7 +102,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", - model="string", + model="tts-1", voice="alloy", response_format="mp3", speed=0.25, @@ -117,7 +117,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: response = await async_client.audio.speech.with_raw_response.create( input="string", - model="string", + model="tts-1", voice="alloy", ) @@ -132,7 +132,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_ respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.audio.speech.with_streaming_response.create( input="string", - model="string", + model="tts-1", voice="alloy", ) as response: assert not response.is_closed diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index 69d914200f..fa7ae52131 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -20,7 +20,7 @@ class TestCompletions: @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @@ -28,7 +28,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", best_of=0, echo=True, @@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", ) @@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", ) as response: assert not response.is_closed @@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, ) @@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, best_of=0, @@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, ) @@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, ) as response: @@ -142,7 +142,7 @@ class TestAsyncCompletions: @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @@ -150,7 +150,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", best_of=0, echo=True, @@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", ) @@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", ) as response: assert not response.is_closed @@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, ) @@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, best_of=0, @@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, ) @@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( - model="string", + model="gpt-3.5-turbo-instruct", prompt="This is a test.", stream=True, ) as response: From 2951f876d6d009d6b4fdb891d3e16e40483c4e4a Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 6 Jun 2024 03:39:46 -0400 Subject: [PATCH 011/192] chore(internal): minor refactor of tests (#1471) --- tests/api_resources/audio/test_speech.py | 16 ++++++------ tests/api_resources/test_completions.py | 32 ++++++++++++------------ 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py index 1f04a66435..781ebeceb9 100644 --- a/tests/api_resources/audio/test_speech.py +++ b/tests/api_resources/audio/test_speech.py @@ -27,7 +27,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", response_format="mp3", speed=0.25, @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No response = client.audio.speech.with_raw_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.audio.speech.with_streaming_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) as response: assert not response.is_closed @@ -90,7 +90,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) @@ -102,7 +102,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", response_format="mp3", speed=0.25, @@ -117,7 +117,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: response = await async_client.audio.speech.with_raw_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) @@ -132,7 +132,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_ respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.audio.speech.with_streaming_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) as response: assert not response.is_closed diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index fa7ae52131..69d914200f 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -20,7 +20,7 @@ class TestCompletions: @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @@ -28,7 +28,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", best_of=0, echo=True, @@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) @@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) as response: assert not response.is_closed @@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, best_of=0, @@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) as response: @@ -142,7 +142,7 @@ class TestAsyncCompletions: @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @@ -150,7 +150,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", best_of=0, echo=True, @@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) @@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) as response: assert not response.is_closed @@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, best_of=0, @@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) as response: From 3cde0873c2b71e46e5d591b06f0a5d9064907251 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:55:43 -0400 Subject: [PATCH 012/192] feat(api): updates (#1474) --- .stats.yml | 2 +- .../resources/beta/threads/runs/runs.py | 34 +++++++++++++++++++ src/openai/resources/beta/threads/threads.py | 34 +++++++++++++++++++ src/openai/resources/chat/completions.py | 34 +++++++++++++++++++ .../beta/thread_create_and_run_params.py | 7 ++++ src/openai/types/beta/threads/run.py | 7 ++++ .../types/beta/threads/run_create_params.py | 7 ++++ .../types/chat/completion_create_params.py | 7 ++++ tests/api_resources/beta/test_threads.py | 4 +++ tests/api_resources/beta/threads/test_runs.py | 4 +++ tests/api_resources/chat/test_completions.py | 4 +++ 11 files changed, 143 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 11d2b0b181..eb81a249f1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff436357b12348b7c1c930469332a79cd23ac6ec537e645c411893c42de42e57.yml diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 5083e59d2b..e5dab3a5cd 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -98,6 +98,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -152,6 +153,10 @@ def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -246,6 +251,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -303,6 +309,10 @@ def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -393,6 +403,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -450,6 +461,10 @@ def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -539,6 +554,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -568,6 +584,7 @@ def create( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, @@ -975,6 +992,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1029,6 +1047,10 @@ async def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1123,6 +1145,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1180,6 +1203,10 @@ async def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1270,6 +1297,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1327,6 +1355,10 @@ async def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1416,6 +1448,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1445,6 +1478,7 @@ async def create( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 9637f37d0a..c3d0131146 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -282,6 +282,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -331,6 +332,10 @@ def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -429,6 +434,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -481,6 +487,10 @@ def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -575,6 +585,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -627,6 +638,10 @@ def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -720,6 +735,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -747,6 +763,7 @@ def create_and_run( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, @@ -997,6 +1014,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1046,6 +1064,10 @@ async def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1144,6 +1166,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -1196,6 +1219,10 @@ async def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1290,6 +1317,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -1342,6 +1370,10 @@ async def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1435,6 +1467,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1462,6 +1495,7 @@ async def create_and_run( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index aa25bc1858..ab35b03335 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -55,6 +55,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -131,6 +132,10 @@ def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -227,6 +232,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -309,6 +315,10 @@ def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -398,6 +408,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -480,6 +491,10 @@ def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -568,6 +583,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -600,6 +616,7 @@ def create( "logprobs": logprobs, "max_tokens": max_tokens, "n": n, + "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, @@ -646,6 +663,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -722,6 +740,10 @@ async def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -818,6 +840,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -900,6 +923,10 @@ async def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -989,6 +1016,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -1071,6 +1099,10 @@ async def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -1159,6 +1191,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -1191,6 +1224,7 @@ async def create( "logprobs": logprobs, "max_tokens": max_tokens, "n": n, + "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 436c2daddf..b8c69eb7ac 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -109,6 +109,13 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): assistant will be used. """ + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index 8244ffd598..ea84f1e97c 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -151,6 +151,13 @@ class Run(BaseModel): object: Literal["thread.run"] """The object type, which is always `thread.run`.""" + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + required_action: Optional[RequiredAction] = None """Details on the action required to continue the run. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 90c9708596..a7aa799e00 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -107,6 +107,13 @@ class RunCreateParamsBase(TypedDict, total=False): assistant will be used. """ + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index a25f2fdd8f..47c2a5e24e 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -102,6 +102,13 @@ class CompletionCreateParamsBase(TypedDict, total=False): of the choices. Keep `n` as `1` to minimize costs. """ + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + presence_penalty: Optional[float] """Number between -2.0 and 2.0. diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 041562cb38..9e06b597ef 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -303,6 +303,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -473,6 +474,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, thread={ @@ -911,6 +913,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -1081,6 +1084,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, thread={ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index bf4eba0689..ffadc1df88 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -134,6 +134,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -297,6 +298,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, tool_choice="none", @@ -798,6 +800,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -961,6 +964,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, tool_choice="none", diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 1c195c4001..3099e16815 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -56,6 +56,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, @@ -171,6 +172,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, @@ -288,6 +290,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, @@ -403,6 +406,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, From 89c58404aba34f152c42864b157112f3dd1b7f7b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 7 Jun 2024 15:40:54 -0400 Subject: [PATCH 013/192] fix: remove erroneous thread create argument (#1476) --- .stats.yml | 2 +- src/openai/resources/beta/threads/runs/runs.py | 12 ++++++------ src/openai/resources/beta/threads/threads.py | 12 ++++++------ src/openai/resources/chat/completions.py | 12 ++++++------ .../types/beta/thread_create_and_run_params.py | 10 ++++++++-- src/openai/types/beta/thread_create_params.py | 9 +++++++-- src/openai/types/beta/threads/message.py | 17 ++++++++++++++--- .../types/beta/threads/message_create_params.py | 10 +++++++--- src/openai/types/beta/threads/run.py | 2 +- .../types/beta/threads/run_create_params.py | 11 ++++++++--- .../types/chat/completion_create_params.py | 2 +- 11 files changed, 65 insertions(+), 34 deletions(-) diff --git a/.stats.yml b/.stats.yml index eb81a249f1..a6c08f499b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff436357b12348b7c1c930469332a79cd23ac6ec537e645c411893c42de42e57.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c085faf70d6ff059fbe11b7b6b98123a612524cb9b8a6f649c99526e5b0b1bdb.yml diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index e5dab3a5cd..a59acce667 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -154,7 +154,7 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -310,7 +310,7 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -462,7 +462,7 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -1048,7 +1048,7 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -1204,7 +1204,7 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -1356,7 +1356,7 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index c3d0131146..36715859b5 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -333,7 +333,7 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -488,7 +488,7 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -639,7 +639,7 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -1065,7 +1065,7 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -1220,7 +1220,7 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with @@ -1371,7 +1371,7 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index ab35b03335..ed8e9373b0 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -133,7 +133,7 @@ def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on @@ -316,7 +316,7 @@ def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on @@ -492,7 +492,7 @@ def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on @@ -741,7 +741,7 @@ async def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on @@ -924,7 +924,7 @@ async def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on @@ -1100,7 +1100,7 @@ async def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index b8c69eb7ac..dbbff415ec 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -18,6 +18,7 @@ "ThreadMessage", "ThreadMessageAttachment", "ThreadMessageAttachmentTool", + "ThreadMessageAttachmentToolFileSearch", "ThreadToolResources", "ThreadToolResourcesCodeInterpreter", "ThreadToolResourcesFileSearch", @@ -112,7 +113,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. """ @@ -186,7 +187,12 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): """ -ThreadMessageAttachmentTool = Union[CodeInterpreterToolParam, FileSearchToolParam] +class ThreadMessageAttachmentToolFileSearch(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + +ThreadMessageAttachmentTool = Union[CodeInterpreterToolParam, ThreadMessageAttachmentToolFileSearch] class ThreadMessageAttachment(TypedDict, total=False): diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index 5072ed12d9..e5ea14a94d 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -5,7 +5,6 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict -from .file_search_tool_param import FileSearchToolParam from .code_interpreter_tool_param import CodeInterpreterToolParam from .threads.message_content_part_param import MessageContentPartParam @@ -14,6 +13,7 @@ "Message", "MessageAttachment", "MessageAttachmentTool", + "MessageAttachmentToolFileSearch", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", @@ -49,7 +49,12 @@ class ThreadCreateParams(TypedDict, total=False): """ -MessageAttachmentTool = Union[CodeInterpreterToolParam, FileSearchToolParam] +class MessageAttachmentToolFileSearch(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + +MessageAttachmentTool = Union[CodeInterpreterToolParam, MessageAttachmentToolFileSearch] class MessageAttachment(TypedDict, total=False): diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py index ebaabdb0f5..90f083683d 100644 --- a/src/openai/types/beta/threads/message.py +++ b/src/openai/types/beta/threads/message.py @@ -5,12 +5,23 @@ from ...._models import BaseModel from .message_content import MessageContent -from ..file_search_tool import FileSearchTool from ..code_interpreter_tool import CodeInterpreterTool -__all__ = ["Message", "Attachment", "AttachmentTool", "IncompleteDetails"] +__all__ = [ + "Message", + "Attachment", + "AttachmentTool", + "AttachmentToolAssistantToolsFileSearchTypeOnly", + "IncompleteDetails", +] -AttachmentTool = Union[CodeInterpreterTool, FileSearchTool] + +class AttachmentToolAssistantToolsFileSearchTypeOnly(BaseModel): + type: Literal["file_search"] + """The type of tool being defined: `file_search`""" + + +AttachmentTool = Union[CodeInterpreterTool, AttachmentToolAssistantToolsFileSearchTypeOnly] class Attachment(BaseModel): diff --git a/src/openai/types/beta/threads/message_create_params.py b/src/openai/types/beta/threads/message_create_params.py index 3668df950d..b1b12293b7 100644 --- a/src/openai/types/beta/threads/message_create_params.py +++ b/src/openai/types/beta/threads/message_create_params.py @@ -5,11 +5,10 @@ from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict -from ..file_search_tool_param import FileSearchToolParam from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam -__all__ = ["MessageCreateParams", "Attachment", "AttachmentTool"] +__all__ = ["MessageCreateParams", "Attachment", "AttachmentTool", "AttachmentToolFileSearch"] class MessageCreateParams(TypedDict, total=False): @@ -37,7 +36,12 @@ class MessageCreateParams(TypedDict, total=False): """ -AttachmentTool = Union[CodeInterpreterToolParam, FileSearchToolParam] +class AttachmentToolFileSearch(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + +AttachmentTool = Union[CodeInterpreterToolParam, AttachmentToolFileSearch] class Attachment(TypedDict, total=False): diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index ea84f1e97c..81d10d4a56 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -154,7 +154,7 @@ class Run(BaseModel): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. """ diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index a7aa799e00..89da241965 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -6,7 +6,6 @@ from typing_extensions import Literal, Required, TypedDict from ..assistant_tool_param import AssistantToolParam -from ..file_search_tool_param import FileSearchToolParam from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -17,6 +16,7 @@ "AdditionalMessage", "AdditionalMessageAttachment", "AdditionalMessageAttachmentTool", + "AdditionalMessageAttachmentToolFileSearch", "TruncationStrategy", "RunCreateParamsNonStreaming", "RunCreateParamsStreaming", @@ -110,7 +110,7 @@ class RunCreateParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. """ @@ -173,7 +173,12 @@ class RunCreateParamsBase(TypedDict, total=False): """ -AdditionalMessageAttachmentTool = Union[CodeInterpreterToolParam, FileSearchToolParam] +class AdditionalMessageAttachmentToolFileSearch(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + +AdditionalMessageAttachmentTool = Union[CodeInterpreterToolParam, AdditionalMessageAttachmentToolFileSearch] class AdditionalMessageAttachment(TypedDict, total=False): diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 47c2a5e24e..7dd7067f66 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -105,7 +105,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. """ From 98ece5783a061011ceede7dc2f94fa081666fdcf Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 12 Jun 2024 14:45:02 -0400 Subject: [PATCH 014/192] feat(api): updates (#1481) --- .stats.yml | 2 +- src/openai/types/beta/threads/file_citation_annotation.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.stats.yml b/.stats.yml index a6c08f499b..c5ada3b5df 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c085faf70d6ff059fbe11b7b6b98123a612524cb9b8a6f649c99526e5b0b1bdb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5cb1810135c35c5024698f3365626471a04796e26e393aefe1aa0ba3c0891919.yml diff --git a/src/openai/types/beta/threads/file_citation_annotation.py b/src/openai/types/beta/threads/file_citation_annotation.py index 68571cd477..c3085aed9b 100644 --- a/src/openai/types/beta/threads/file_citation_annotation.py +++ b/src/openai/types/beta/threads/file_citation_annotation.py @@ -11,9 +11,6 @@ class FileCitation(BaseModel): file_id: str """The ID of the specific File the citation is from.""" - quote: str - """The specific quote in the file.""" - class FileCitationAnnotation(BaseModel): end_index: int From 8b1a4ae569c74dda084468b9c34d1d191061d419 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 19:53:21 +0000 Subject: [PATCH 015/192] feat(api): add service tier argument for chat completions (#1486) --- .stats.yml | 2 +- src/openai/_base_client.py | 8 ++- src/openai/resources/chat/completions.py | 70 +++++++++++++++++++ src/openai/types/chat/chat_completion.py | 7 ++ .../types/chat/chat_completion_chunk.py | 7 ++ .../types/chat/completion_create_params.py | 13 ++++ tests/api_resources/chat/test_completions.py | 4 ++ 7 files changed, 109 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index c5ada3b5df..aa7e8427b0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5cb1810135c35c5024698f3365626471a04796e26e393aefe1aa0ba3c0891919.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8fe357c6b5a425d810d731e4102a052d8e38c5e2d66950e6de1025415160bf88.yml diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 5d5d25fca9..1c9a1a03f2 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -457,7 +457,7 @@ def _build_request( raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") headers = self._build_headers(options) - params = _merge_mappings(self._custom_query, options.params) + params = _merge_mappings(self.default_query, options.params) content_type = headers.get("Content-Type") # If the given Content-Type header is multipart/form-data then it @@ -593,6 +593,12 @@ def default_headers(self) -> dict[str, str | Omit]: **self._custom_headers, } + @property + def default_query(self) -> dict[str, object]: + return { + **self._custom_query, + } + def _validate_headers( self, headers: Headers, # noqa: ARG002 diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index ed8e9373b0..d50bce0757 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -59,6 +59,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -163,6 +164,16 @@ def create( should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', the system will utilize scale tier credits until they are + exhausted. + - If set to 'default', the request will be processed in the shared cluster. + + When this parameter is set, the response body will include the `service_tier` + utilized. + stop: Up to 4 sequences where the API will stop generating further tokens. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -236,6 +247,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -346,6 +358,16 @@ def create( should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', the system will utilize scale tier credits until they are + exhausted. + - If set to 'default', the request will be processed in the shared cluster. + + When this parameter is set, the response body will include the `service_tier` + utilized. + stop: Up to 4 sequences where the API will stop generating further tokens. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -412,6 +434,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -522,6 +545,16 @@ def create( should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', the system will utilize scale tier credits until they are + exhausted. + - If set to 'default', the request will be processed in the shared cluster. + + When this parameter is set, the response body will include the `service_tier` + utilized. + stop: Up to 4 sequences where the API will stop generating further tokens. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -587,6 +620,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -620,6 +654,7 @@ def create( "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, + "service_tier": service_tier, "stop": stop, "stream": stream, "stream_options": stream_options, @@ -667,6 +702,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -771,6 +807,16 @@ async def create( should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', the system will utilize scale tier credits until they are + exhausted. + - If set to 'default', the request will be processed in the shared cluster. + + When this parameter is set, the response body will include the `service_tier` + utilized. + stop: Up to 4 sequences where the API will stop generating further tokens. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -844,6 +890,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -954,6 +1001,16 @@ async def create( should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', the system will utilize scale tier credits until they are + exhausted. + - If set to 'default', the request will be processed in the shared cluster. + + When this parameter is set, the response body will include the `service_tier` + utilized. + stop: Up to 4 sequences where the API will stop generating further tokens. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -1020,6 +1077,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1130,6 +1188,16 @@ async def create( should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', the system will utilize scale tier credits until they are + exhausted. + - If set to 'default', the request will be processed in the shared cluster. + + When this parameter is set, the response body will include the `service_tier` + utilized. + stop: Up to 4 sequences where the API will stop generating further tokens. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -1195,6 +1263,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -1228,6 +1297,7 @@ async def create( "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, + "service_tier": service_tier, "stop": stop, "stream": stream, "stream_options": stream_options, diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 61a94a258e..5f4eaf3366 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -56,6 +56,13 @@ class ChatCompletion(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" + service_tier: Optional[Literal["scale", "default"]] = None + """The service tier used for processing the request. + + This field is only included if the `service_tier` parameter is specified in the + request. + """ + system_fingerprint: Optional[str] = None """This fingerprint represents the backend configuration that the model runs with. diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 084a5fcc07..65643c7e60 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -122,6 +122,13 @@ class ChatCompletionChunk(BaseModel): object: Literal["chat.completion.chunk"] """The object type, which is always `chat.completion.chunk`.""" + service_tier: Optional[Literal["scale", "default"]] = None + """The service tier used for processing the request. + + This field is only included if the `service_tier` parameter is specified in the + request. + """ + system_fingerprint: Optional[str] = None """ This fingerprint represents the backend configuration that the model runs with. diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 7dd7067f66..21187f3741 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -146,6 +146,19 @@ class CompletionCreateParamsBase(TypedDict, total=False): in the backend. """ + service_tier: Optional[Literal["auto", "default"]] + """Specifies the latency tier to use for processing the request. + + This parameter is relevant for customers subscribed to the scale tier service: + + - If set to 'auto', the system will utilize scale tier credits until they are + exhausted. + - If set to 'default', the request will be processed in the shared cluster. + + When this parameter is set, the response body will include the `service_tier` + utilized. + """ + stop: Union[Optional[str], List[str]] """Up to 4 sequences where the API will stop generating further tokens.""" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 3099e16815..87df11d1ee 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -60,6 +60,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, + service_tier="auto", stop="string", stream=False, stream_options={"include_usage": True}, @@ -176,6 +177,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, + service_tier="auto", stop="string", stream_options={"include_usage": True}, temperature=1, @@ -294,6 +296,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, + service_tier="auto", stop="string", stream=False, stream_options={"include_usage": True}, @@ -410,6 +413,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, + service_tier="auto", stop="string", stream_options={"include_usage": True}, temperature=1, From e7ae618d45f59210c9c26c53dfcae8496e3e01dd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Jun 2024 11:18:26 +0000 Subject: [PATCH 016/192] fix(client/async): avoid blocking io call for platform headers (#1488) --- src/openai/_base_client.py | 17 +++++++++++++---- src/openai/_utils/__init__.py | 1 + src/openai/_utils/_reflection.py | 8 ++++++++ src/openai/_utils/_sync.py | 19 ++++++++++++++++++- 4 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 src/openai/_utils/_reflection.py diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 1c9a1a03f2..84004ebba5 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -60,7 +60,7 @@ RequestOptions, ModelBuilderProtocol, ) -from ._utils import is_dict, is_list, is_given, lru_cache, is_mapping +from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping from ._compat import model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( @@ -359,6 +359,7 @@ def __init__( self._custom_query = custom_query or {} self._strict_response_validation = _strict_response_validation self._idempotency_header = None + self._platform: Platform | None = None if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( @@ -623,7 +624,10 @@ def base_url(http://github.com/self,%20url:%20URL%20|%20str) -> None: self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url)) def platform_headers(self) -> Dict[str, str]: - return platform_headers(self._version) + # the actual implementation is in a separate `lru_cache` decorated + # function because adding `lru_cache` to methods will leak memory + # https://github.com/python/cpython/issues/88476 + return platform_headers(self._version, platform=self._platform) def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None: """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified. @@ -1513,6 +1517,11 @@ async def _request( stream_cls: type[_AsyncStreamT] | None, remaining_retries: int | None, ) -> ResponseT | _AsyncStreamT: + if self._platform is None: + # `get_platform` can make blocking IO calls so we + # execute it earlier while we are in an async context + self._platform = await asyncify(get_platform)() + cast_to = self._maybe_override_cast_to(cast_to, options) await self._prepare_options(options) @@ -1949,11 +1958,11 @@ def get_platform() -> Platform: @lru_cache(maxsize=None) -def platform_headers(version: str) -> Dict[str, str]: +def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]: return { "X-Stainless-Lang": "python", "X-Stainless-Package-Version": version, - "X-Stainless-OS": str(get_platform()), + "X-Stainless-OS": str(platform or get_platform()), "X-Stainless-Arch": str(get_architecture()), "X-Stainless-Runtime": get_python_runtime(), "X-Stainless-Runtime-Version": get_python_version(), diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 31b5b22799..667e2473f6 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -49,3 +49,4 @@ maybe_transform as maybe_transform, async_maybe_transform as async_maybe_transform, ) +from ._reflection import function_has_argument as function_has_argument diff --git a/src/openai/_utils/_reflection.py b/src/openai/_utils/_reflection.py new file mode 100644 index 0000000000..e134f58e08 --- /dev/null +++ b/src/openai/_utils/_reflection.py @@ -0,0 +1,8 @@ +import inspect +from typing import Any, Callable + + +def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool: + """Returns whether or not the given function has a specific parameter""" + sig = inspect.signature(func) + return arg_name in sig.parameters diff --git a/src/openai/_utils/_sync.py b/src/openai/_utils/_sync.py index 595924e5b1..d0d810337e 100644 --- a/src/openai/_utils/_sync.py +++ b/src/openai/_utils/_sync.py @@ -7,6 +7,8 @@ import anyio import anyio.to_thread +from ._reflection import function_has_argument + T_Retval = TypeVar("T_Retval") T_ParamSpec = ParamSpec("T_ParamSpec") @@ -59,6 +61,21 @@ def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: partial_f = functools.partial(function, *args, **kwargs) - return await anyio.to_thread.run_sync(partial_f, cancellable=cancellable, limiter=limiter) + + # In `v4.1.0` anyio added the `abandon_on_cancel` argument and deprecated the old + # `cancellable` argument, so we need to use the new `abandon_on_cancel` to avoid + # surfacing deprecation warnings. + if function_has_argument(anyio.to_thread.run_sync, "abandon_on_cancel"): + return await anyio.to_thread.run_sync( + partial_f, + abandon_on_cancel=cancellable, + limiter=limiter, + ) + + return await anyio.to_thread.run_sync( + partial_f, + cancellable=cancellable, + limiter=limiter, + ) return wrapper From 278dd0a3b6386f18ac5e8482b91a2492ead29c01 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 21 Jun 2024 10:16:40 +0000 Subject: [PATCH 017/192] chore(doc): clarify service tier default value (#1496) --- .stats.yml | 2 +- src/openai/resources/chat/completions.py | 18 ++++++++++++------ .../types/chat/completion_create_params.py | 3 ++- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.stats.yml b/.stats.yml index aa7e8427b0..04682ea0a6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8fe357c6b5a425d810d731e4102a052d8e38c5e2d66950e6de1025415160bf88.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3a69e1cc9e1efda3fb82d0fb35961749f886a87594dae9d8d2aa5c60f157f5d2.yml diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index d50bce0757..d73ece2109 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -169,7 +169,8 @@ def create( - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed in the shared cluster. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. When this parameter is set, the response body will include the `service_tier` utilized. @@ -363,7 +364,8 @@ def create( - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed in the shared cluster. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. When this parameter is set, the response body will include the `service_tier` utilized. @@ -550,7 +552,8 @@ def create( - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed in the shared cluster. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. When this parameter is set, the response body will include the `service_tier` utilized. @@ -812,7 +815,8 @@ async def create( - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed in the shared cluster. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. When this parameter is set, the response body will include the `service_tier` utilized. @@ -1006,7 +1010,8 @@ async def create( - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed in the shared cluster. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. When this parameter is set, the response body will include the `service_tier` utilized. @@ -1193,7 +1198,8 @@ async def create( - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed in the shared cluster. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. When this parameter is set, the response body will include the `service_tier` utilized. diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 21187f3741..85157653f2 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -153,7 +153,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - - If set to 'default', the request will be processed in the shared cluster. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. When this parameter is set, the response body will include the `service_tier` utilized. From ef749bae85dd097d4a0c1c32f878b1259ed6b1a2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 25 Jun 2024 13:56:05 +0000 Subject: [PATCH 018/192] fix(docs): fix link to advanced python httpx docs (#1499) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8aad7fcd69..06f63081fb 100644 --- a/README.md +++ b/README.md @@ -470,7 +470,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c - Support for proxies - Custom transports -- Additional [advanced](https://www.python-httpx.org/advanced/#client-instances) functionality +- Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality ```python from openai import OpenAI, DefaultHttpxClient From 416a5f76d3e00915f210143c44511a279aa0a549 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Jun 2024 01:12:25 +0000 Subject: [PATCH 019/192] fix: temporarily patch upstream version to fix broken release flow (#1500) --- bin/publish-pypi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bin/publish-pypi b/bin/publish-pypi index 826054e924..05bfccbb71 100644 --- a/bin/publish-pypi +++ b/bin/publish-pypi @@ -3,4 +3,7 @@ set -eux mkdir -p dist rye build --clean +# Patching importlib-metadata version until upstream library version is updated +# https://github.com/pypa/twine/issues/977#issuecomment-2189800841 +"$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1' rye publish --yes --token=$PYPI_TOKEN From 0d855259aea3d5b1065d2e42257e58095b5b48f9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Jun 2024 16:32:04 +0000 Subject: [PATCH 020/192] fix(build): include more files in sdist builds (#1504) --- pyproject.toml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 09d794a271..892ae1d182 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -99,6 +99,21 @@ include = [ [tool.hatch.build.targets.wheel] packages = ["src/openai"] +[tool.hatch.build.targets.sdist] +# Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) +include = [ + "/*.toml", + "/*.json", + "/*.lock", + "/*.md", + "/mypy.ini", + "/noxfile.py", + "bin/*", + "examples/*", + "src/*", + "tests/*", +] + [tool.hatch.metadata.hooks.fancy-pypi-readme] content-type = "text/markdown" From 5f1bf6cf2dc149a0ed553dbf249d9327bc1e2697 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Jun 2024 19:10:32 +0000 Subject: [PATCH 021/192] chore(deps): bump anyio to v4.4.0 (#1506) --- requirements-dev.lock | 3 ++- requirements.lock | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 802f827d44..04b7a2ce4f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -10,7 +10,7 @@ -e file:. annotated-types==0.6.0 # via pydantic -anyio==4.1.0 +anyio==4.4.0 # via httpx # via openai argcomplete==3.1.2 @@ -86,6 +86,7 @@ tomli==2.0.1 # via mypy # via pytest typing-extensions==4.8.0 + # via anyio # via mypy # via openai # via pydantic diff --git a/requirements.lock b/requirements.lock index 027d407e6f..67fb5dddae 100644 --- a/requirements.lock +++ b/requirements.lock @@ -10,7 +10,7 @@ -e file:. annotated-types==0.6.0 # via pydantic -anyio==4.1.0 +anyio==4.4.0 # via httpx # via openai certifi==2023.7.22 @@ -38,6 +38,7 @@ sniffio==1.3.0 # via httpx # via openai typing-extensions==4.8.0 + # via anyio # via openai # via pydantic # via pydantic-core From 22403d63a591b540b74c25d7c1c2bde268c2f585 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 28 Jun 2024 12:41:56 +0000 Subject: [PATCH 022/192] chore(internal): add reflection helper function (#1508) --- src/openai/_utils/__init__.py | 5 ++++- src/openai/_utils/_reflection.py | 34 ++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 667e2473f6..3efe66c8e8 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -49,4 +49,7 @@ maybe_transform as maybe_transform, async_maybe_transform as async_maybe_transform, ) -from ._reflection import function_has_argument as function_has_argument +from ._reflection import ( + function_has_argument as function_has_argument, + assert_signatures_in_sync as assert_signatures_in_sync, +) diff --git a/src/openai/_utils/_reflection.py b/src/openai/_utils/_reflection.py index e134f58e08..9a53c7bd21 100644 --- a/src/openai/_utils/_reflection.py +++ b/src/openai/_utils/_reflection.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import inspect from typing import Any, Callable @@ -6,3 +8,35 @@ def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool: """Returns whether or not the given function has a specific parameter""" sig = inspect.signature(func) return arg_name in sig.parameters + + +def assert_signatures_in_sync( + source_func: Callable[..., Any], + check_func: Callable[..., Any], + *, + exclude_params: set[str] = set(), +) -> None: + """Ensure that the signature of the second function matches the first.""" + + check_sig = inspect.signature(check_func) + source_sig = inspect.signature(source_func) + + errors: list[str] = [] + + for name, source_param in source_sig.parameters.items(): + if name in exclude_params: + continue + + custom_param = check_sig.parameters.get(name) + if not custom_param: + errors.append(f"the `{name}` param is missing") + continue + + if custom_param.annotation != source_param.annotation: + errors.append( + f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(source_param.annotation)}" + ) + continue + + if errors: + raise AssertionError(f"{len(errors)} errors encountered when comparing signatures:\n\n" + "\n\n".join(errors)) From 4cea16dc6c9958dfef968a65091effc694e9d9d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:11:37 +0000 Subject: [PATCH 023/192] chore: gitignore test server logs (#1509) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0f9a66a976..8779740800 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.prism.log .vscode _dev From ce84165ef0610cc4cba3ccec27e1c2fe6d4b147b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 13:36:44 +0000 Subject: [PATCH 024/192] chore(internal): add rich as a dev dependency (#1514) it's often very helpful when writing demo scripts --- pyproject.toml | 1 + requirements-dev.lock | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 892ae1d182..37968f39ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ dev-dependencies = [ "nox", "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", + "rich>=13.7.1", ] diff --git a/requirements-dev.lock b/requirements-dev.lock index 04b7a2ce4f..82e64392f1 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -44,6 +44,10 @@ idna==3.4 importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py mypy==1.7.1 mypy-extensions==1.0.0 # via mypy @@ -63,6 +67,8 @@ pydantic==2.7.1 # via openai pydantic-core==2.18.2 # via pydantic +pygments==2.18.0 + # via rich pyright==1.1.364 pytest==7.1.1 # via pytest-asyncio @@ -72,6 +78,7 @@ python-dateutil==2.8.2 pytz==2023.3.post1 # via dirty-equals respx==0.20.2 +rich==13.7.1 ruff==0.1.9 setuptools==68.2.2 # via nodeenv From 59e2b8b2e6b867dee4c40951c1af5783596b0d28 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 11:20:52 +0000 Subject: [PATCH 025/192] chore(internal): add helper method for constructing `BaseModel`s (#1517) --- src/openai/_models.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/openai/_models.py b/src/openai/_models.py index 75c68cc730..5d95bb4b2b 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -10,6 +10,7 @@ ClassVar, Protocol, Required, + ParamSpec, TypedDict, TypeGuard, final, @@ -67,6 +68,9 @@ __all__ = ["BaseModel", "GenericModel"] _T = TypeVar("_T") +_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel") + +P = ParamSpec("P") @runtime_checkable @@ -379,6 +383,29 @@ def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericMo return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) +def build( + base_model_cls: Callable[P, _BaseModelT], + *args: P.args, + **kwargs: P.kwargs, +) -> _BaseModelT: + """Construct a BaseModel class without validation. + + This is useful for cases where you need to instantiate a `BaseModel` + from an API response as this provides type-safe params which isn't supported + by helpers like `construct_type()`. + + ```py + build(MyModel, my_field_a="foo", my_field_b=123) + ``` + """ + if args: + raise TypeError( + "Received positional arguments which are not supported; Keyword arguments must be used instead", + ) + + return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs)) + + def construct_type(*, value: object, type_: object) -> object: """Loose coercion to the expected type with construction of nested values. From 8496604527d4114909e19436753ee178e2a267b2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:13:30 +0000 Subject: [PATCH 026/192] fix(client): always respect content-type multipart/form-data if provided (#1519) --- src/openai/_base_client.py | 20 +++++++++-- src/openai/resources/audio/transcriptions.py | 18 +++++----- src/openai/resources/audio/translations.py | 18 +++++----- src/openai/resources/files.py | 18 +++++----- src/openai/resources/images.py | 36 +++++++++----------- 5 files changed, 58 insertions(+), 52 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 84004ebba5..2f4b0c7fbd 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -58,6 +58,7 @@ HttpxSendArgs, AsyncTransport, RequestOptions, + HttpxRequestFiles, ModelBuilderProtocol, ) from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping @@ -460,6 +461,7 @@ def _build_request( headers = self._build_headers(options) params = _merge_mappings(self.default_query, options.params) content_type = headers.get("Content-Type") + files = options.files # If the given Content-Type header is multipart/form-data then it # has to be removed so that httpx can generate the header with @@ -473,7 +475,7 @@ def _build_request( headers.pop("Content-Type") # As we are now sending multipart/form-data instead of application/json - # we need to tell httpx to use it, https://www.python-httpx.org/advanced/#multipart-file-encoding + # we need to tell httpx to use it, https://www.python-httpx.org/advanced/clients/#multipart-file-encoding if json_data: if not is_dict(json_data): raise TypeError( @@ -481,6 +483,15 @@ def _build_request( ) kwargs["data"] = self._serialize_multipartform(json_data) + # httpx determines whether or not to send a "multipart/form-data" + # request based on the truthiness of the "files" argument. + # This gets around that issue by generating a dict value that + # evaluates to true. + # + # https://github.com/encode/httpx/discussions/2399#discussioncomment-3814186 + if not files: + files = cast(HttpxRequestFiles, ForceMultipartDict()) + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, @@ -493,7 +504,7 @@ def _build_request( # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, json=json_data, - files=options.files, + files=files, **kwargs, ) @@ -1891,6 +1902,11 @@ def make_request_options( return options +class ForceMultipartDict(Dict[str, None]): + def __bool__(self) -> bool: + return True + + class OtherPlatform: def __init__(self, name: str) -> None: self.name = name diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 995680186b..c03137dbfd 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -108,11 +108,10 @@ def create( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/audio/transcriptions", body=maybe_transform(body, transcription_create_params.TranscriptionCreateParams), @@ -205,11 +204,10 @@ async def create( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/audio/transcriptions", body=await async_maybe_transform(body, transcription_create_params.TranscriptionCreateParams), diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index d711ee2fbd..485e1a33df 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -93,11 +93,10 @@ def create( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/audio/translations", body=maybe_transform(body, translation_create_params.TranslationCreateParams), @@ -175,11 +174,10 @@ async def create( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/audio/translations", body=await async_maybe_transform(body, translation_create_params.TranslationCreateParams), diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index cf41ae6ae2..f35acc23ea 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -110,11 +110,10 @@ def create( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/files", body=maybe_transform(body, file_create_params.FileCreateParams), @@ -370,11 +369,10 @@ async def create( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/files", body=await async_maybe_transform(body, file_create_params.FileCreateParams), diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 74b2a46a3f..3728392f93 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -95,11 +95,10 @@ def create_variation( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/images/variations", body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), @@ -179,11 +178,10 @@ def edit( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/images/edits", body=maybe_transform(body, image_edit_params.ImageEditParams), @@ -343,11 +341,10 @@ async def create_variation( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/images/variations", body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), @@ -427,11 +424,10 @@ async def edit( } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/images/edits", body=await async_maybe_transform(body, image_edit_params.ImageEditParams), From f0493e3ee22fe4dc35361356613204fdcf66e055 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:50:24 +0000 Subject: [PATCH 027/192] chore: minor change to tests (#1521) --- .stats.yml | 2 +- tests/api_resources/chat/test_completions.py | 8 ++++---- tests/api_resources/test_completions.py | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.stats.yml b/.stats.yml index 04682ea0a6..57f5afaffe 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3a69e1cc9e1efda3fb82d0fb35961749f886a87594dae9d8d2aa5c60f157f5d2.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-27d8d6da893c1cdd53b491ec05153df22b1e113965f253a1d6eb8d75b628173f.yml diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 87df11d1ee..5cb2a8c717 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -59,7 +59,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, - seed=-9223372036854776000, + seed=-9007199254740991, service_tier="auto", stop="string", stream=False, @@ -176,7 +176,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, - seed=-9223372036854776000, + seed=-9007199254740991, service_tier="auto", stop="string", stream_options={"include_usage": True}, @@ -295,7 +295,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, - seed=-9223372036854776000, + seed=-9007199254740991, service_tier="auto", stop="string", stream=False, @@ -412,7 +412,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, - seed=-9223372036854776000, + seed=-9007199254740991, service_tier="auto", stop="string", stream_options={"include_usage": True}, diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index 69d914200f..ad2679cabe 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -38,7 +38,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9223372036854776000, + seed=-9007199254740991, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -98,7 +98,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9223372036854776000, + seed=-9007199254740991, stop="\n", stream_options={"include_usage": True}, suffix="test.", @@ -160,7 +160,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9223372036854776000, + seed=-9007199254740991, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -220,7 +220,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9223372036854776000, + seed=-9007199254740991, stop="\n", stream_options={"include_usage": True}, suffix="test.", From 5aee2a1ca5f08d616a9f2cd70aec384c243f253b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:37:28 +0000 Subject: [PATCH 028/192] chore(ci): update rye to v0.35.0 (#1523) --- .devcontainer/Dockerfile | 2 +- .github/workflows/ci.yml | 4 ++-- .github/workflows/create-releases.yml | 4 ++-- .github/workflows/publish-pypi.yml | 4 ++-- requirements-dev.lock | 1 + requirements.lock | 1 + 6 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 83bca8f716..ac9a2e7521 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} USER vscode -RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.24.0" RYE_INSTALL_OPTION="--yes" bash +RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.35.0" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6fc5b36597..7e58412065 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: 0.24.0 + RYE_VERSION: '0.35.0' RYE_INSTALL_OPTION: '--yes' - name: Install dependencies @@ -42,7 +42,7 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: 0.24.0 + RYE_VERSION: '0.35.0' RYE_INSTALL_OPTION: '--yes' - name: Bootstrap diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index 1ac03ede3f..2a97049033 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -28,8 +28,8 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: 0.24.0 - RYE_INSTALL_OPTION: "--yes" + RYE_VERSION: '0.35.0' + RYE_INSTALL_OPTION: '--yes' - name: Publish to PyPI if: ${{ steps.release.outputs.releases_created }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index aae985b27e..44027a3c4c 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -17,8 +17,8 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: 0.24.0 - RYE_INSTALL_OPTION: "--yes" + RYE_VERSION: '0.35.0' + RYE_INSTALL_OPTION: '--yes' - name: Publish to PyPI run: | diff --git a/requirements-dev.lock b/requirements-dev.lock index 82e64392f1..574736c02f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -6,6 +6,7 @@ # features: [] # all-features: true # with-sources: false +# generate-hashes: false -e file:. annotated-types==0.6.0 diff --git a/requirements.lock b/requirements.lock index 67fb5dddae..61e8fb1983 100644 --- a/requirements.lock +++ b/requirements.lock @@ -6,6 +6,7 @@ # features: [] # all-features: true # with-sources: false +# generate-hashes: false -e file:. annotated-types==0.6.0 From fba11ca8e3e364cb86a9403ebb49d3a9f1520480 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 15:52:07 +0000 Subject: [PATCH 029/192] chore(internal): minor request options handling changes (#1534) --- src/openai/_base_client.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 2f4b0c7fbd..7ab2a56169 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -956,6 +956,11 @@ def _request( stream: bool, stream_cls: type[_StreamT] | None, ) -> ResponseT | _StreamT: + # create a copy of the options we were given so that if the + # options are mutated later & we then retry, the retries are + # given the original options + input_options = model_copy(options) + cast_to = self._maybe_override_cast_to(cast_to, options) self._prepare_options(options) @@ -980,7 +985,7 @@ def _request( if retries > 0: return self._retry_request( - options, + input_options, cast_to, retries, stream=stream, @@ -995,7 +1000,7 @@ def _request( if retries > 0: return self._retry_request( - options, + input_options, cast_to, retries, stream=stream, @@ -1024,7 +1029,7 @@ def _request( if retries > 0 and self._should_retry(err.response): err.response.close() return self._retry_request( - options, + input_options, cast_to, retries, err.response.headers, @@ -1533,6 +1538,11 @@ async def _request( # execute it earlier while we are in an async context self._platform = await asyncify(get_platform)() + # create a copy of the options we were given so that if the + # options are mutated later & we then retry, the retries are + # given the original options + input_options = model_copy(options) + cast_to = self._maybe_override_cast_to(cast_to, options) await self._prepare_options(options) @@ -1555,7 +1565,7 @@ async def _request( if retries > 0: return await self._retry_request( - options, + input_options, cast_to, retries, stream=stream, @@ -1570,7 +1580,7 @@ async def _request( if retries > 0: return await self._retry_request( - options, + input_options, cast_to, retries, stream=stream, @@ -1593,7 +1603,7 @@ async def _request( if retries > 0 and self._should_retry(err.response): await err.response.aclose() return await self._retry_request( - options, + input_options, cast_to, retries, err.response.headers, From 78393b410970849412a0a8d57e05dd2843ee46fd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:38:16 +0000 Subject: [PATCH 030/192] chore(internal): add helper function (#1538) --- src/openai/_models.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/openai/_models.py b/src/openai/_models.py index 5d95bb4b2b..eb7ce3bde9 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -643,6 +643,14 @@ def validate_type(*, type_: type[_T], value: object) -> _T: return cast(_T, _validate_non_model_type(type_=type_, value=value)) +def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None: + """Add a pydantic config for the given type. + + Note: this is a no-op on Pydantic v1. + """ + setattr(typ, "__pydantic_config__", config) # noqa: B010 + + # our use of subclasssing here causes weirdness for type checkers, # so we just pretend that we don't subclass if TYPE_CHECKING: From 55c1f07f9e2755f58b26bce586fbfa761a4710bc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 15:22:28 +0000 Subject: [PATCH 031/192] chore(internal): update mypy (#1539) --- requirements-dev.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 574736c02f..6941447b96 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -49,7 +49,7 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -mypy==1.7.1 +mypy==1.10.1 mypy-extensions==1.0.0 # via mypy nodeenv==1.8.0 From ae65e6ff6ffdae3abaf8ee45cdc59f8f0d6ae230 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 10:23:13 +0000 Subject: [PATCH 032/192] chore(ci): also run workflows for PRs targeting `next` (#1541) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7e58412065..c390431e79 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,7 @@ on: pull_request: branches: - main + - next jobs: lint: From d9b5e487e6388f069b1b2316a009989f5efddb47 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 12:09:31 +0000 Subject: [PATCH 033/192] chore(internal): minor import restructuring (#1542) --- src/openai/resources/audio/speech.py | 4 +--- src/openai/resources/audio/transcriptions.py | 4 +--- src/openai/resources/audio/translations.py | 4 +--- src/openai/resources/batches.py | 5 +---- src/openai/resources/beta/assistants.py | 5 +---- src/openai/resources/beta/threads/messages.py | 5 +---- src/openai/resources/beta/threads/runs/runs.py | 5 +---- src/openai/resources/beta/threads/runs/steps.py | 5 +---- src/openai/resources/beta/threads/threads.py | 4 +--- src/openai/resources/beta/vector_stores/file_batches.py | 5 +---- src/openai/resources/beta/vector_stores/files.py | 5 +---- src/openai/resources/beta/vector_stores/vector_stores.py | 5 +---- src/openai/resources/chat/completions.py | 4 +--- src/openai/resources/completions.py | 4 +--- src/openai/resources/embeddings.py | 4 +--- src/openai/resources/files.py | 5 +---- src/openai/resources/fine_tuning/jobs/checkpoints.py | 5 +---- src/openai/resources/fine_tuning/jobs/jobs.py | 5 +---- src/openai/resources/images.py | 4 +--- src/openai/resources/models.py | 5 +---- src/openai/resources/moderations.py | 4 +--- 21 files changed, 21 insertions(+), 75 deletions(-) diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index e26c58051e..c9e6a70b62 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -22,9 +22,7 @@ async_to_custom_streamed_response_wrapper, ) from ...types.audio import speech_create_params -from ..._base_client import ( - make_request_options, -) +from ..._base_client import make_request_options __all__ = ["Speech", "AsyncSpeech"] diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index c03137dbfd..f190e00227 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -19,9 +19,7 @@ from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...types.audio import transcription_create_params -from ..._base_client import ( - make_request_options, -) +from ..._base_client import make_request_options from ...types.audio.transcription import Transcription __all__ = ["Transcriptions", "AsyncTranscriptions"] diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index 485e1a33df..6f84153ba9 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -19,9 +19,7 @@ from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...types.audio import translation_create_params -from ..._base_client import ( - make_request_options, -) +from ..._base_client import make_request_options from ...types.audio.translation import Translation __all__ = ["Translations", "AsyncTranslations"] diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 7152fac622..4e345dd505 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -19,10 +19,7 @@ from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..pagination import SyncCursorPage, AsyncCursorPage from ..types.batch import Batch -from .._base_client import ( - AsyncPaginator, - make_request_options, -) +from .._base_client import AsyncPaginator, make_request_options __all__ = ["Batches", "AsyncBatches"] diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 5912aff77a..204f6c87f4 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -22,10 +22,7 @@ assistant_create_params, assistant_update_params, ) -from ..._base_client import ( - AsyncPaginator, - make_request_options, -) +from ..._base_client import AsyncPaginator, make_request_options from ...types.beta.assistant import Assistant from ...types.beta.assistant_deleted import AssistantDeleted from ...types.beta.assistant_tool_param import AssistantToolParam diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index f0832515ce..5b4f1f2955 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -17,10 +17,7 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.threads import message_list_params, message_create_params, message_update_params from ....types.beta.threads.message import Message from ....types.beta.threads.message_deleted import MessageDeleted diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index a59acce667..0f57dd69f5 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -27,10 +27,7 @@ from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....._streaming import Stream, AsyncStream from .....pagination import SyncCursorPage, AsyncCursorPage -from ....._base_client import ( - AsyncPaginator, - make_request_options, -) +from ....._base_client import AsyncPaginator, make_request_options from .....types.beta.threads import ( run_list_params, run_create_params, diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 512008939c..96b16dfa0a 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -13,10 +13,7 @@ from ....._resource import SyncAPIResource, AsyncAPIResource from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .....pagination import SyncCursorPage, AsyncCursorPage -from ....._base_client import ( - AsyncPaginator, - make_request_options, -) +from ....._base_client import AsyncPaginator, make_request_options from .....types.beta.threads.runs import step_list_params from .....types.beta.threads.runs.run_step import RunStep diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 36715859b5..25dd1ac09e 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -40,9 +40,7 @@ thread_update_params, thread_create_and_run_params, ) -from ...._base_client import ( - make_request_options, -) +from ...._base_client import make_request_options from ....types.beta.thread import Thread from ....types.beta.threads.run import Run from ....types.beta.thread_deleted import ThreadDeleted diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index 35772c4f9b..a3ddf84b1d 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -17,10 +17,7 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_stores import file_batch_create_params, file_batch_list_files_params from ....types.beta.vector_stores.vector_store_file import VectorStoreFile from ....types.beta.vector_stores.vector_store_file_batch import VectorStoreFileBatch diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index c1097baa72..16bfd2d66f 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -16,10 +16,7 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_stores import file_list_params, file_create_params from ....types.beta.vector_stores.vector_store_file import VectorStoreFile from ....types.beta.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index cbd56a0693..58374a9572 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -34,10 +34,7 @@ ) from ....pagination import SyncCursorPage, AsyncCursorPage from ....types.beta import vector_store_list_params, vector_store_create_params, vector_store_update_params -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_store import VectorStore from ....types.beta.vector_store_deleted import VectorStoreDeleted diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index d73ece2109..e7dbe34585 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -19,9 +19,7 @@ from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..._streaming import Stream, AsyncStream from ...types.chat import completion_create_params -from ..._base_client import ( - make_request_options, -) +from ..._base_client import make_request_options from ...types.chat_model import ChatModel from ...types.chat.chat_completion import ChatCompletion from ...types.chat.chat_completion_chunk import ChatCompletionChunk diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 0812000f78..d33862b405 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -19,9 +19,7 @@ from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._streaming import Stream, AsyncStream -from .._base_client import ( - make_request_options, -) +from .._base_client import make_request_options from ..types.completion import Completion from ..types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index c2719bfe8b..3b06eea37e 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -17,9 +17,7 @@ from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from .._base_client import ( - make_request_options, -) +from .._base_client import make_request_options from ..types.create_embedding_response import CreateEmbeddingResponse __all__ = ["Embeddings", "AsyncEmbeddings"] diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index f35acc23ea..f9db4f9ff9 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -28,10 +28,7 @@ async_to_custom_streamed_response_wrapper, ) from ..pagination import SyncPage, AsyncPage -from .._base_client import ( - AsyncPaginator, - make_request_options, -) +from .._base_client import AsyncPaginator, make_request_options from ..types.file_object import FileObject from ..types.file_deleted import FileDeleted diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py index 67f5739a02..5b5a1043d7 100644 --- a/src/openai/resources/fine_tuning/jobs/checkpoints.py +++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py @@ -11,10 +11,7 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ...._base_client import AsyncPaginator, make_request_options from ....types.fine_tuning.jobs import checkpoint_list_params from ....types.fine_tuning.jobs.fine_tuning_job_checkpoint import FineTuningJobCheckpoint diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 14b384a88d..61bd3bfbe5 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -25,10 +25,7 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ...._base_client import AsyncPaginator, make_request_options from ....types.fine_tuning import job_list_params, job_create_params, job_list_events_params from ....types.fine_tuning.fine_tuning_job import FineTuningJob from ....types.fine_tuning.fine_tuning_job_event import FineTuningJobEvent diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 3728392f93..c5e1acd15b 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -19,9 +19,7 @@ from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from .._base_client import ( - make_request_options, -) +from .._base_client import make_request_options from ..types.images_response import ImagesResponse __all__ = ["Images", "AsyncImages"] diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py index e76c496ffa..5d0eb6f602 100644 --- a/src/openai/resources/models.py +++ b/src/openai/resources/models.py @@ -11,10 +11,7 @@ from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..pagination import SyncPage, AsyncPage from ..types.model import Model -from .._base_client import ( - AsyncPaginator, - make_request_options, -) +from .._base_client import AsyncPaginator, make_request_options from ..types.model_deleted import ModelDeleted __all__ = ["Models", "AsyncModels"] diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index 9386e50dae..e5259643e7 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -17,9 +17,7 @@ from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from .._base_client import ( - make_request_options, -) +from .._base_client import make_request_options from ..types.moderation_create_response import ModerationCreateResponse __all__ = ["Moderations", "AsyncModerations"] From d1691a17f2d6ead080de843e2cf9868a61b77c66 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 14:29:10 +0000 Subject: [PATCH 034/192] docs(examples): use named params more (#1543) --- tests/api_resources/audio/test_speech.py | 16 +- .../audio/test_transcriptions.py | 8 +- .../api_resources/audio/test_translations.py | 8 +- tests/api_resources/beta/test_assistants.py | 80 ++-- tests/api_resources/beta/test_threads.py | 192 ++++---- .../api_resources/beta/test_vector_stores.py | 60 +-- .../beta/threads/runs/test_steps.py | 116 ++--- .../beta/threads/test_messages.py | 180 ++++---- tests/api_resources/beta/threads/test_runs.py | 416 +++++++++--------- .../beta/vector_stores/test_file_batches.py | 128 +++--- .../beta/vector_stores/test_files.py | 124 +++--- tests/api_resources/chat/test_completions.py | 104 ++--- .../fine_tuning/jobs/test_checkpoints.py | 24 +- tests/api_resources/fine_tuning/test_jobs.py | 52 +-- tests/api_resources/test_batches.py | 44 +- tests/api_resources/test_files.py | 64 +-- 16 files changed, 808 insertions(+), 808 deletions(-) diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py index 781ebeceb9..5b5dc24156 100644 --- a/tests/api_resources/audio/test_speech.py +++ b/tests/api_resources/audio/test_speech.py @@ -26,7 +26,7 @@ class TestSpeech: def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( - input="string", + input="input", model="string", voice="alloy", ) @@ -38,7 +38,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( - input="string", + input="input", model="string", voice="alloy", response_format="mp3", @@ -53,7 +53,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = client.audio.speech.with_raw_response.create( - input="string", + input="input", model="string", voice="alloy", ) @@ -68,7 +68,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.audio.speech.with_streaming_response.create( - input="string", + input="input", model="string", voice="alloy", ) as response: @@ -89,7 +89,7 @@ class TestAsyncSpeech: async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( - input="string", + input="input", model="string", voice="alloy", ) @@ -101,7 +101,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( - input="string", + input="input", model="string", voice="alloy", response_format="mp3", @@ -116,7 +116,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = await async_client.audio.speech.with_raw_response.create( - input="string", + input="input", model="string", voice="alloy", ) @@ -131,7 +131,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.audio.speech.with_streaming_response.create( - input="string", + input="input", model="string", voice="alloy", ) as response: diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index ba8e9e4099..a459a34c68 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -30,8 +30,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: transcription = client.audio.transcriptions.create( file=b"raw file contents", model="whisper-1", - language="string", - prompt="string", + language="language", + prompt="prompt", response_format="json", temperature=0, timestamp_granularities=["word", "segment"], @@ -81,8 +81,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> transcription = await async_client.audio.transcriptions.create( file=b"raw file contents", model="whisper-1", - language="string", - prompt="string", + language="language", + prompt="prompt", response_format="json", temperature=0, timestamp_granularities=["word", "segment"], diff --git a/tests/api_resources/audio/test_translations.py b/tests/api_resources/audio/test_translations.py index f5c6c68f0b..c6c87c2fef 100644 --- a/tests/api_resources/audio/test_translations.py +++ b/tests/api_resources/audio/test_translations.py @@ -30,8 +30,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: translation = client.audio.translations.create( file=b"raw file contents", model="whisper-1", - prompt="string", - response_format="string", + prompt="prompt", + response_format="response_format", temperature=0, ) assert_matches_type(Translation, translation, path=["response"]) @@ -79,8 +79,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> translation = await async_client.audio.translations.create( file=b"raw file contents", model="whisper-1", - prompt="string", - response_format="string", + prompt="prompt", + response_format="response_format", temperature=0, ) assert_matches_type(Translation, translation, path=["response"]) diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index dd0ce9266e..14f279bbb5 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -32,10 +32,10 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.create( model="gpt-4-turbo", - description="string", - instructions="string", + description="description", + instructions="instructions", metadata={}, - name="string", + name="name", response_format="none", temperature=1, tool_resources={ @@ -83,14 +83,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: assistant = client.beta.assistants.retrieve( - "string", + "assistant_id", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.retrieve( - "string", + "assistant_id", ) assert response.is_closed is True @@ -101,7 +101,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.retrieve( - "string", + "assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -121,19 +121,19 @@ def test_path_params_retrieve(self, client: OpenAI) -> None: @parametrize def test_method_update(self, client: OpenAI) -> None: assistant = client.beta.assistants.update( - "string", + assistant_id="assistant_id", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.update( - "string", - description="string", - instructions="string", + assistant_id="assistant_id", + description="description", + instructions="instructions", metadata={}, - model="string", - name="string", + model="model", + name="name", response_format="none", temperature=1, tool_resources={ @@ -148,7 +148,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.update( - "string", + assistant_id="assistant_id", ) assert response.is_closed is True @@ -159,7 +159,7 @@ def test_raw_response_update(self, client: OpenAI) -> None: @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.update( - "string", + assistant_id="assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -173,7 +173,7 @@ def test_streaming_response_update(self, client: OpenAI) -> None: def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.with_raw_response.update( - "", + assistant_id="", ) @parametrize @@ -184,8 +184,8 @@ def test_method_list(self, client: OpenAI) -> None: @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.list( - after="string", - before="string", + after="after", + before="before", limit=0, order="asc", ) @@ -214,14 +214,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_method_delete(self, client: OpenAI) -> None: assistant = client.beta.assistants.delete( - "string", + "assistant_id", ) assert_matches_type(AssistantDeleted, assistant, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.delete( - "string", + "assistant_id", ) assert response.is_closed is True @@ -232,7 +232,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.delete( - "string", + "assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -264,10 +264,10 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.create( model="gpt-4-turbo", - description="string", - instructions="string", + description="description", + instructions="instructions", metadata={}, - name="string", + name="name", response_format="none", temperature=1, tool_resources={ @@ -315,14 +315,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.retrieve( - "string", + "assistant_id", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.retrieve( - "string", + "assistant_id", ) assert response.is_closed is True @@ -333,7 +333,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.retrieve( - "string", + "assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -353,19 +353,19 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.update( - "string", + assistant_id="assistant_id", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.update( - "string", - description="string", - instructions="string", + assistant_id="assistant_id", + description="description", + instructions="instructions", metadata={}, - model="string", - name="string", + model="model", + name="name", response_format="none", temperature=1, tool_resources={ @@ -380,7 +380,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.update( - "string", + assistant_id="assistant_id", ) assert response.is_closed is True @@ -391,7 +391,7 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.update( - "string", + assistant_id="assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -405,7 +405,7 @@ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> Non async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.with_raw_response.update( - "", + assistant_id="", ) @parametrize @@ -416,8 +416,8 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.list( - after="string", - before="string", + after="after", + before="before", limit=0, order="asc", ) @@ -446,14 +446,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.delete( - "string", + "assistant_id", ) assert_matches_type(AssistantDeleted, assistant, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.delete( - "string", + "assistant_id", ) assert response.is_closed is True @@ -464,7 +464,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.delete( - "string", + "assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 9e06b597ef..d45a1a18d1 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -35,7 +35,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -43,7 +43,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -51,7 +51,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -66,7 +66,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -74,7 +74,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -82,7 +82,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -97,7 +97,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -105,7 +105,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -113,7 +113,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -164,14 +164,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: thread = client.beta.threads.retrieve( - "string", + "thread_id", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.retrieve( - "string", + "thread_id", ) assert response.is_closed is True @@ -182,7 +182,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.retrieve( - "string", + "thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -202,14 +202,14 @@ def test_path_params_retrieve(self, client: OpenAI) -> None: @parametrize def test_method_update(self, client: OpenAI) -> None: thread = client.beta.threads.update( - "string", + thread_id="thread_id", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: thread = client.beta.threads.update( - "string", + thread_id="thread_id", metadata={}, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -221,7 +221,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.update( - "string", + thread_id="thread_id", ) assert response.is_closed is True @@ -232,7 +232,7 @@ def test_raw_response_update(self, client: OpenAI) -> None: @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.update( - "string", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -246,20 +246,20 @@ def test_streaming_response_update(self, client: OpenAI) -> None: def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.with_raw_response.update( - "", + thread_id="", ) @parametrize def test_method_delete(self, client: OpenAI) -> None: thread = client.beta.threads.delete( - "string", + "thread_id", ) assert_matches_type(ThreadDeleted, thread, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.delete( - "string", + "thread_id", ) assert response.is_closed is True @@ -270,7 +270,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.delete( - "string", + "thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -290,15 +290,15 @@ def test_path_params_delete(self, client: OpenAI) -> None: @parametrize def test_method_create_and_run_overload_1(self, client: OpenAI) -> None: thread = client.beta.threads.create_and_run( - assistant_id="string", + assistant_id="assistant_id", ) assert_matches_type(Run, thread, path=["response"]) @parametrize def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) -> None: thread = client.beta.threads.create_and_run( - assistant_id="string", - instructions="string", + assistant_id="assistant_id", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -314,7 +314,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -322,7 +322,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -330,7 +330,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -345,7 +345,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -353,7 +353,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -361,7 +361,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -376,7 +376,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -384,7 +384,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -392,7 +392,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -435,7 +435,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) @parametrize def test_raw_response_create_and_run_overload_1(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", ) assert response.is_closed is True @@ -446,7 +446,7 @@ def test_raw_response_create_and_run_overload_1(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_and_run_overload_1(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -459,7 +459,7 @@ def test_streaming_response_create_and_run_overload_1(self, client: OpenAI) -> N @parametrize def test_method_create_and_run_overload_2(self, client: OpenAI) -> None: thread_stream = client.beta.threads.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, ) thread_stream.response.close() @@ -467,9 +467,9 @@ def test_method_create_and_run_overload_2(self, client: OpenAI) -> None: @parametrize def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) -> None: thread_stream = client.beta.threads.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, - instructions="string", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -484,7 +484,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -492,7 +492,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -500,7 +500,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -515,7 +515,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -523,7 +523,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -531,7 +531,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -546,7 +546,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -554,7 +554,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -562,7 +562,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -605,7 +605,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) @parametrize def test_raw_response_create_and_run_overload_2(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, ) @@ -616,7 +616,7 @@ def test_raw_response_create_and_run_overload_2(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_and_run_overload_2(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, ) as response: assert not response.is_closed @@ -645,7 +645,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -653,7 +653,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -661,7 +661,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -676,7 +676,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -684,7 +684,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -692,7 +692,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -707,7 +707,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -715,7 +715,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -723,7 +723,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -774,14 +774,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.retrieve( - "string", + "thread_id", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.retrieve( - "string", + "thread_id", ) assert response.is_closed is True @@ -792,7 +792,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.retrieve( - "string", + "thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -812,14 +812,14 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.update( - "string", + thread_id="thread_id", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.update( - "string", + thread_id="thread_id", metadata={}, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -831,7 +831,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.update( - "string", + thread_id="thread_id", ) assert response.is_closed is True @@ -842,7 +842,7 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.update( - "string", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -856,20 +856,20 @@ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> Non async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.with_raw_response.update( - "", + thread_id="", ) @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.delete( - "string", + "thread_id", ) assert_matches_type(ThreadDeleted, thread, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.delete( - "string", + "thread_id", ) assert response.is_closed is True @@ -880,7 +880,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.delete( - "string", + "thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -900,15 +900,15 @@ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.create_and_run( - assistant_id="string", + assistant_id="assistant_id", ) assert_matches_type(Run, thread, path=["response"]) @parametrize async def test_method_create_and_run_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.create_and_run( - assistant_id="string", - instructions="string", + assistant_id="assistant_id", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -924,7 +924,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -932,7 +932,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -940,7 +940,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -955,7 +955,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -963,7 +963,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -971,7 +971,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -986,7 +986,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -994,7 +994,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1002,7 +1002,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1045,7 +1045,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie @parametrize async def test_raw_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", ) assert response.is_closed is True @@ -1056,7 +1056,7 @@ async def test_raw_response_create_and_run_overload_1(self, async_client: AsyncO @parametrize async def test_streaming_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1069,7 +1069,7 @@ async def test_streaming_response_create_and_run_overload_1(self, async_client: @parametrize async def test_method_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None: thread_stream = await async_client.beta.threads.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, ) await thread_stream.response.aclose() @@ -1077,9 +1077,9 @@ async def test_method_create_and_run_overload_2(self, async_client: AsyncOpenAI) @parametrize async def test_method_create_and_run_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: thread_stream = await async_client.beta.threads.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, - instructions="string", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -1094,7 +1094,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1102,7 +1102,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1110,7 +1110,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1125,7 +1125,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1133,7 +1133,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1141,7 +1141,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1156,7 +1156,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1164,7 +1164,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1172,7 +1172,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -1215,7 +1215,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie @parametrize async def test_raw_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, ) @@ -1226,7 +1226,7 @@ async def test_raw_response_create_and_run_overload_2(self, async_client: AsyncO @parametrize async def test_streaming_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.create_and_run( - assistant_id="string", + assistant_id="assistant_id", stream=True, ) as response: assert not response.is_closed diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py index 39fdb9d1d4..6f0c4d2144 100644 --- a/tests/api_resources/beta/test_vector_stores.py +++ b/tests/api_resources/beta/test_vector_stores.py @@ -36,7 +36,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: }, file_ids=["string", "string", "string"], metadata={}, - name="string", + name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -63,14 +63,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: vector_store = client.beta.vector_stores.retrieve( - "string", + "vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.vector_stores.with_raw_response.retrieve( - "string", + "vector_store_id", ) assert response.is_closed is True @@ -81,7 +81,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.vector_stores.with_streaming_response.retrieve( - "string", + "vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -101,27 +101,27 @@ def test_path_params_retrieve(self, client: OpenAI) -> None: @parametrize def test_method_update(self, client: OpenAI) -> None: vector_store = client.beta.vector_stores.update( - "string", + vector_store_id="vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: vector_store = client.beta.vector_stores.update( - "string", + vector_store_id="vector_store_id", expires_after={ "anchor": "last_active_at", "days": 1, }, metadata={}, - name="string", + name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.vector_stores.with_raw_response.update( - "string", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -132,7 +132,7 @@ def test_raw_response_update(self, client: OpenAI) -> None: @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.vector_stores.with_streaming_response.update( - "string", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -146,7 +146,7 @@ def test_streaming_response_update(self, client: OpenAI) -> None: def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.with_raw_response.update( - "", + vector_store_id="", ) @parametrize @@ -157,8 +157,8 @@ def test_method_list(self, client: OpenAI) -> None: @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: vector_store = client.beta.vector_stores.list( - after="string", - before="string", + after="after", + before="before", limit=0, order="asc", ) @@ -187,14 +187,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_method_delete(self, client: OpenAI) -> None: vector_store = client.beta.vector_stores.delete( - "string", + "vector_store_id", ) assert_matches_type(VectorStoreDeleted, vector_store, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.vector_stores.with_raw_response.delete( - "string", + "vector_store_id", ) assert response.is_closed is True @@ -205,7 +205,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.vector_stores.with_streaming_response.delete( - "string", + "vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -241,7 +241,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> }, file_ids=["string", "string", "string"], metadata={}, - name="string", + name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -268,14 +268,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: vector_store = await async_client.beta.vector_stores.retrieve( - "string", + "vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.with_raw_response.retrieve( - "string", + "vector_store_id", ) assert response.is_closed is True @@ -286,7 +286,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.with_streaming_response.retrieve( - "string", + "vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -306,27 +306,27 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: vector_store = await async_client.beta.vector_stores.update( - "string", + vector_store_id="vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: vector_store = await async_client.beta.vector_stores.update( - "string", + vector_store_id="vector_store_id", expires_after={ "anchor": "last_active_at", "days": 1, }, metadata={}, - name="string", + name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.with_raw_response.update( - "string", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -337,7 +337,7 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.with_streaming_response.update( - "string", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -351,7 +351,7 @@ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> Non async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.with_raw_response.update( - "", + vector_store_id="", ) @parametrize @@ -362,8 +362,8 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: vector_store = await async_client.beta.vector_stores.list( - after="string", - before="string", + after="after", + before="before", limit=0, order="asc", ) @@ -392,14 +392,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: vector_store = await async_client.beta.vector_stores.delete( - "string", + "vector_store_id", ) assert_matches_type(VectorStoreDeleted, vector_store, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.with_raw_response.delete( - "string", + "vector_store_id", ) assert response.is_closed is True @@ -410,7 +410,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.with_streaming_response.delete( - "string", + "vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/beta/threads/runs/test_steps.py b/tests/api_resources/beta/threads/runs/test_steps.py index e6108d8dad..d5edeb823e 100644 --- a/tests/api_resources/beta/threads/runs/test_steps.py +++ b/tests/api_resources/beta/threads/runs/test_steps.py @@ -21,18 +21,18 @@ class TestSteps: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: step = client.beta.threads.runs.steps.retrieve( - "string", - thread_id="string", - run_id="string", + step_id="step_id", + thread_id="thread_id", + run_id="run_id", ) assert_matches_type(RunStep, step, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.runs.steps.with_raw_response.retrieve( - "string", - thread_id="string", - run_id="string", + step_id="step_id", + thread_id="thread_id", + run_id="run_id", ) assert response.is_closed is True @@ -43,9 +43,9 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.runs.steps.with_streaming_response.retrieve( - "string", - thread_id="string", - run_id="string", + step_id="step_id", + thread_id="thread_id", + run_id="run_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -59,40 +59,40 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.retrieve( - "string", + step_id="step_id", thread_id="", - run_id="string", + run_id="run_id", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.retrieve( - "string", - thread_id="string", + step_id="step_id", + thread_id="thread_id", run_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.retrieve( - "", - thread_id="string", - run_id="string", + step_id="", + thread_id="thread_id", + run_id="run_id", ) @parametrize def test_method_list(self, client: OpenAI) -> None: step = client.beta.threads.runs.steps.list( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(SyncCursorPage[RunStep], step, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: step = client.beta.threads.runs.steps.list( - "string", - thread_id="string", - after="string", - before="string", + run_id="run_id", + thread_id="thread_id", + after="after", + before="before", limit=0, order="asc", ) @@ -101,8 +101,8 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.threads.runs.steps.with_raw_response.list( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -113,8 +113,8 @@ def test_raw_response_list(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.threads.runs.steps.with_streaming_response.list( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -128,14 +128,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.list( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.list( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) @@ -145,18 +145,18 @@ class TestAsyncSteps: @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: step = await async_client.beta.threads.runs.steps.retrieve( - "string", - thread_id="string", - run_id="string", + step_id="step_id", + thread_id="thread_id", + run_id="run_id", ) assert_matches_type(RunStep, step, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve( - "string", - thread_id="string", - run_id="string", + step_id="step_id", + thread_id="thread_id", + run_id="run_id", ) assert response.is_closed is True @@ -167,9 +167,9 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.steps.with_streaming_response.retrieve( - "string", - thread_id="string", - run_id="string", + step_id="step_id", + thread_id="thread_id", + run_id="run_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -183,40 +183,40 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.retrieve( - "string", + step_id="step_id", thread_id="", - run_id="string", + run_id="run_id", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.retrieve( - "string", - thread_id="string", + step_id="step_id", + thread_id="thread_id", run_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.retrieve( - "", - thread_id="string", - run_id="string", + step_id="", + thread_id="thread_id", + run_id="run_id", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: step = await async_client.beta.threads.runs.steps.list( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: step = await async_client.beta.threads.runs.steps.list( - "string", - thread_id="string", - after="string", - before="string", + run_id="run_id", + thread_id="thread_id", + after="after", + before="before", limit=0, order="asc", ) @@ -225,8 +225,8 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.steps.with_raw_response.list( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -237,8 +237,8 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.steps.with_streaming_response.list( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -252,12 +252,12 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.list( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.list( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py index b5be32a421..edd5f77a32 100644 --- a/tests/api_resources/beta/threads/test_messages.py +++ b/tests/api_resources/beta/threads/test_messages.py @@ -24,7 +24,7 @@ class TestMessages: @parametrize def test_method_create(self, client: OpenAI) -> None: message = client.beta.threads.messages.create( - "string", + thread_id="thread_id", content="string", role="user", ) @@ -33,20 +33,20 @@ def test_method_create(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.create( - "string", + thread_id="thread_id", content="string", role="user", attachments=[ { - "file_id": "string", + "file_id": "file_id", "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], }, { - "file_id": "string", + "file_id": "file_id", "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], }, { - "file_id": "string", + "file_id": "file_id", "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], }, ], @@ -57,7 +57,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.create( - "string", + thread_id="thread_id", content="string", role="user", ) @@ -70,7 +70,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.create( - "string", + thread_id="thread_id", content="string", role="user", ) as response: @@ -86,7 +86,7 @@ def test_streaming_response_create(self, client: OpenAI) -> None: def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.create( - "", + thread_id="", content="string", role="user", ) @@ -94,16 +94,16 @@ def test_path_params_create(self, client: OpenAI) -> None: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: message = client.beta.threads.messages.retrieve( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert_matches_type(Message, message, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.retrieve( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -114,8 +114,8 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.retrieve( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -129,29 +129,29 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.retrieve( - "string", + message_id="message_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): client.beta.threads.messages.with_raw_response.retrieve( - "", - thread_id="string", + message_id="", + thread_id="thread_id", ) @parametrize def test_method_update(self, client: OpenAI) -> None: message = client.beta.threads.messages.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert_matches_type(Message, message, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", metadata={}, ) assert_matches_type(Message, message, path=["response"]) @@ -159,8 +159,8 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -171,8 +171,8 @@ def test_raw_response_update(self, client: OpenAI) -> None: @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -186,39 +186,39 @@ def test_streaming_response_update(self, client: OpenAI) -> None: def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.update( - "string", + message_id="message_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): client.beta.threads.messages.with_raw_response.update( - "", - thread_id="string", + message_id="", + thread_id="thread_id", ) @parametrize def test_method_list(self, client: OpenAI) -> None: message = client.beta.threads.messages.list( - "string", + thread_id="thread_id", ) assert_matches_type(SyncCursorPage[Message], message, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.list( - "string", - after="string", - before="string", + thread_id="thread_id", + after="after", + before="before", limit=0, order="asc", - run_id="string", + run_id="run_id", ) assert_matches_type(SyncCursorPage[Message], message, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.list( - "string", + thread_id="thread_id", ) assert response.is_closed is True @@ -229,7 +229,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.list( - "string", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -243,22 +243,22 @@ def test_streaming_response_list(self, client: OpenAI) -> None: def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.list( - "", + thread_id="", ) @parametrize def test_method_delete(self, client: OpenAI) -> None: message = client.beta.threads.messages.delete( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert_matches_type(MessageDeleted, message, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.delete( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -269,8 +269,8 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.delete( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -284,14 +284,14 @@ def test_streaming_response_delete(self, client: OpenAI) -> None: def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.delete( - "string", + message_id="message_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): client.beta.threads.messages.with_raw_response.delete( - "", - thread_id="string", + message_id="", + thread_id="thread_id", ) @@ -301,7 +301,7 @@ class TestAsyncMessages: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.create( - "string", + thread_id="thread_id", content="string", role="user", ) @@ -310,20 +310,20 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.create( - "string", + thread_id="thread_id", content="string", role="user", attachments=[ { - "file_id": "string", + "file_id": "file_id", "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], }, { - "file_id": "string", + "file_id": "file_id", "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], }, { - "file_id": "string", + "file_id": "file_id", "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], }, ], @@ -334,7 +334,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.create( - "string", + thread_id="thread_id", content="string", role="user", ) @@ -347,7 +347,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.create( - "string", + thread_id="thread_id", content="string", role="user", ) as response: @@ -363,7 +363,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.create( - "", + thread_id="", content="string", role="user", ) @@ -371,16 +371,16 @@ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.retrieve( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert_matches_type(Message, message, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.retrieve( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -391,8 +391,8 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.retrieve( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -406,29 +406,29 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.retrieve( - "string", + message_id="message_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.retrieve( - "", - thread_id="string", + message_id="", + thread_id="thread_id", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert_matches_type(Message, message, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", metadata={}, ) assert_matches_type(Message, message, path=["response"]) @@ -436,8 +436,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -448,8 +448,8 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.update( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -463,39 +463,39 @@ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> Non async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.update( - "string", + message_id="message_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.update( - "", - thread_id="string", + message_id="", + thread_id="thread_id", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.list( - "string", + thread_id="thread_id", ) assert_matches_type(AsyncCursorPage[Message], message, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.list( - "string", - after="string", - before="string", + thread_id="thread_id", + after="after", + before="before", limit=0, order="asc", - run_id="string", + run_id="run_id", ) assert_matches_type(AsyncCursorPage[Message], message, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.list( - "string", + thread_id="thread_id", ) assert response.is_closed is True @@ -506,7 +506,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.list( - "string", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -520,22 +520,22 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.list( - "", + thread_id="", ) @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.delete( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert_matches_type(MessageDeleted, message, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.delete( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -546,8 +546,8 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.delete( - "string", - thread_id="string", + message_id="message_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -561,12 +561,12 @@ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> Non async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.delete( - "string", + message_id="message_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.delete( - "", - thread_id="string", + message_id="", + thread_id="thread_id", ) diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index ffadc1df88..ff242126b2 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -23,24 +23,24 @@ class TestRuns: @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: run = client.beta.threads.runs.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: run = client.beta.threads.runs.create( - "string", - assistant_id="string", - additional_instructions="string", + thread_id="thread_id", + assistant_id="assistant_id", + additional_instructions="additional_instructions", additional_messages=[ { "role": "user", "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -48,7 +48,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -56,7 +56,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -71,7 +71,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -79,7 +79,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -87,7 +87,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -102,7 +102,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -110,7 +110,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -118,7 +118,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -129,7 +129,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "metadata": {}, }, ], - instructions="string", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -151,8 +151,8 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", ) assert response.is_closed is True @@ -163,8 +163,8 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -178,15 +178,15 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: def test_path_params_create_overload_1(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.create( - "", - assistant_id="string", + thread_id="", + assistant_id="assistant_id", ) @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: run_stream = client.beta.threads.runs.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, ) run_stream.response.close() @@ -194,17 +194,17 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: run_stream = client.beta.threads.runs.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, - additional_instructions="string", + additional_instructions="additional_instructions", additional_messages=[ { "role": "user", "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -212,7 +212,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -220,7 +220,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -235,7 +235,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -243,7 +243,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -251,7 +251,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -266,7 +266,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -274,7 +274,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -282,7 +282,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -293,7 +293,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "metadata": {}, }, ], - instructions="string", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -314,8 +314,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, ) @@ -326,8 +326,8 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, ) as response: assert not response.is_closed @@ -342,24 +342,24 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: def test_path_params_create_overload_2(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.create( - "", - assistant_id="string", + thread_id="", + assistant_id="assistant_id", stream=True, ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: run = client.beta.threads.runs.retrieve( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.retrieve( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -370,8 +370,8 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.retrieve( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -385,29 +385,29 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.retrieve( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.retrieve( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) @parametrize def test_method_update(self, client: OpenAI) -> None: run = client.beta.threads.runs.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: run = client.beta.threads.runs.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", metadata={}, ) assert_matches_type(Run, run, path=["response"]) @@ -415,8 +415,8 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -427,8 +427,8 @@ def test_raw_response_update(self, client: OpenAI) -> None: @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -442,29 +442,29 @@ def test_streaming_response_update(self, client: OpenAI) -> None: def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.update( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.update( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) @parametrize def test_method_list(self, client: OpenAI) -> None: run = client.beta.threads.runs.list( - "string", + thread_id="thread_id", ) assert_matches_type(SyncCursorPage[Run], run, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: run = client.beta.threads.runs.list( - "string", - after="string", - before="string", + thread_id="thread_id", + after="after", + before="before", limit=0, order="asc", ) @@ -473,7 +473,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.list( - "string", + thread_id="thread_id", ) assert response.is_closed is True @@ -484,7 +484,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.list( - "string", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -498,22 +498,22 @@ def test_streaming_response_list(self, client: OpenAI) -> None: def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.list( - "", + thread_id="", ) @parametrize def test_method_cancel(self, client: OpenAI) -> None: run = client.beta.threads.runs.cancel( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_raw_response_cancel(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.cancel( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -524,8 +524,8 @@ def test_raw_response_cancel(self, client: OpenAI) -> None: @parametrize def test_streaming_response_cancel(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.cancel( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -539,21 +539,21 @@ def test_streaming_response_cancel(self, client: OpenAI) -> None: def test_path_params_cancel(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.cancel( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.cancel( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) @parametrize def test_method_submit_tool_outputs_overload_1(self, client: OpenAI) -> None: run = client.beta.threads.runs.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) assert_matches_type(Run, run, path=["response"]) @@ -561,20 +561,20 @@ def test_method_submit_tool_outputs_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_submit_tool_outputs_with_all_params_overload_1(self, client: OpenAI) -> None: run = client.beta.threads.runs.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[ { - "tool_call_id": "string", - "output": "string", + "tool_call_id": "tool_call_id", + "output": "output", }, { - "tool_call_id": "string", - "output": "string", + "tool_call_id": "tool_call_id", + "output": "output", }, { - "tool_call_id": "string", - "output": "string", + "tool_call_id": "tool_call_id", + "output": "output", }, ], stream=False, @@ -584,8 +584,8 @@ def test_method_submit_tool_outputs_with_all_params_overload_1(self, client: Ope @parametrize def test_raw_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) @@ -597,8 +597,8 @@ def test_raw_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> No @parametrize def test_streaming_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) as response: assert not response.is_closed @@ -613,23 +613,23 @@ def test_streaming_response_submit_tool_outputs_overload_1(self, client: OpenAI) def test_path_params_submit_tool_outputs_overload_1(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", + run_id="run_id", thread_id="", tool_outputs=[{}, {}, {}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "", - thread_id="string", + run_id="", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) @parametrize def test_method_submit_tool_outputs_overload_2(self, client: OpenAI) -> None: run_stream = client.beta.threads.runs.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) @@ -638,8 +638,8 @@ def test_method_submit_tool_outputs_overload_2(self, client: OpenAI) -> None: @parametrize def test_raw_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) @@ -651,8 +651,8 @@ def test_raw_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> No @parametrize def test_streaming_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) as response: @@ -668,7 +668,7 @@ def test_streaming_response_submit_tool_outputs_overload_2(self, client: OpenAI) def test_path_params_submit_tool_outputs_overload_2(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", + run_id="run_id", thread_id="", stream=True, tool_outputs=[{}, {}, {}], @@ -676,8 +676,8 @@ def test_path_params_submit_tool_outputs_overload_2(self, client: OpenAI) -> Non with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "", - thread_id="string", + run_id="", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) @@ -689,24 +689,24 @@ class TestAsyncRuns: @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.create( - "string", - assistant_id="string", - additional_instructions="string", + thread_id="thread_id", + assistant_id="assistant_id", + additional_instructions="additional_instructions", additional_messages=[ { "role": "user", "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -714,7 +714,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -722,7 +722,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -737,7 +737,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -745,7 +745,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -753,7 +753,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -768,7 +768,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -776,7 +776,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -784,7 +784,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -795,7 +795,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "metadata": {}, }, ], - instructions="string", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -817,8 +817,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", ) assert response.is_closed is True @@ -829,8 +829,8 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -844,15 +844,15 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe async def test_path_params_create_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.create( - "", - assistant_id="string", + thread_id="", + assistant_id="assistant_id", ) @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: run_stream = await async_client.beta.threads.runs.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, ) await run_stream.response.aclose() @@ -860,17 +860,17 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: run_stream = await async_client.beta.threads.runs.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, - additional_instructions="string", + additional_instructions="additional_instructions", additional_messages=[ { "role": "user", "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -878,7 +878,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -886,7 +886,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -901,7 +901,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -909,7 +909,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -917,7 +917,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -932,7 +932,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "content": "string", "attachments": [ { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -940,7 +940,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -948,7 +948,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], }, { - "file_id": "string", + "file_id": "file_id", "tools": [ {"type": "code_interpreter"}, {"type": "code_interpreter"}, @@ -959,7 +959,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "metadata": {}, }, ], - instructions="string", + instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={}, @@ -980,8 +980,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, ) @@ -992,8 +992,8 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.create( - "string", - assistant_id="string", + thread_id="thread_id", + assistant_id="assistant_id", stream=True, ) as response: assert not response.is_closed @@ -1008,24 +1008,24 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe async def test_path_params_create_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.create( - "", - assistant_id="string", + thread_id="", + assistant_id="assistant_id", stream=True, ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.retrieve( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.retrieve( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -1036,8 +1036,8 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.retrieve( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1051,29 +1051,29 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.retrieve( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.retrieve( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", metadata={}, ) assert_matches_type(Run, run, path=["response"]) @@ -1081,8 +1081,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -1093,8 +1093,8 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.update( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1108,29 +1108,29 @@ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> Non async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.update( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.update( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.list( - "string", + thread_id="thread_id", ) assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.list( - "string", - after="string", - before="string", + thread_id="thread_id", + after="after", + before="before", limit=0, order="asc", ) @@ -1139,7 +1139,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.list( - "string", + thread_id="thread_id", ) assert response.is_closed is True @@ -1150,7 +1150,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.list( - "string", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1164,22 +1164,22 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.list( - "", + thread_id="", ) @parametrize async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.cancel( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.cancel( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) assert response.is_closed is True @@ -1190,8 +1190,8 @@ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.cancel( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1205,21 +1205,21 @@ async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> Non async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.cancel( - "string", + run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.cancel( - "", - thread_id="string", + run_id="", + thread_id="thread_id", ) @parametrize async def test_method_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) assert_matches_type(Run, run, path=["response"]) @@ -1227,20 +1227,20 @@ async def test_method_submit_tool_outputs_overload_1(self, async_client: AsyncOp @parametrize async def test_method_submit_tool_outputs_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[ { - "tool_call_id": "string", - "output": "string", + "tool_call_id": "tool_call_id", + "output": "output", }, { - "tool_call_id": "string", - "output": "string", + "tool_call_id": "tool_call_id", + "output": "output", }, { - "tool_call_id": "string", - "output": "string", + "tool_call_id": "tool_call_id", + "output": "output", }, ], stream=False, @@ -1250,8 +1250,8 @@ async def test_method_submit_tool_outputs_with_all_params_overload_1(self, async @parametrize async def test_raw_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) @@ -1263,8 +1263,8 @@ async def test_raw_response_submit_tool_outputs_overload_1(self, async_client: A @parametrize async def test_streaming_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) as response: assert not response.is_closed @@ -1279,23 +1279,23 @@ async def test_streaming_response_submit_tool_outputs_overload_1(self, async_cli async def test_path_params_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", + run_id="run_id", thread_id="", tool_outputs=[{}, {}, {}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "", - thread_id="string", + run_id="", + thread_id="thread_id", tool_outputs=[{}, {}, {}], ) @parametrize async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: run_stream = await async_client.beta.threads.runs.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) @@ -1304,8 +1304,8 @@ async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOp @parametrize async def test_raw_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) @@ -1317,8 +1317,8 @@ async def test_raw_response_submit_tool_outputs_overload_2(self, async_client: A @parametrize async def test_streaming_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs( - "string", - thread_id="string", + run_id="run_id", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) as response: @@ -1334,7 +1334,7 @@ async def test_streaming_response_submit_tool_outputs_overload_2(self, async_cli async def test_path_params_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "string", + run_id="run_id", thread_id="", stream=True, tool_outputs=[{}, {}, {}], @@ -1342,8 +1342,8 @@ async def test_path_params_submit_tool_outputs_overload_2(self, async_client: As with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( - "", - thread_id="string", + run_id="", + thread_id="thread_id", stream=True, tool_outputs=[{}, {}, {}], ) diff --git a/tests/api_resources/beta/vector_stores/test_file_batches.py b/tests/api_resources/beta/vector_stores/test_file_batches.py index 631f2669ad..3281622695 100644 --- a/tests/api_resources/beta/vector_stores/test_file_batches.py +++ b/tests/api_resources/beta/vector_stores/test_file_batches.py @@ -24,7 +24,7 @@ class TestFileBatches: @parametrize def test_method_create(self, client: OpenAI) -> None: file_batch = client.beta.vector_stores.file_batches.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: file_batch = client.beta.vector_stores.file_batches.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], chunking_strategy={"type": "auto"}, ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.vector_stores.file_batches.with_raw_response.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], ) @@ -53,7 +53,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.vector_stores.file_batches.with_streaming_response.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], ) as response: assert not response.is_closed @@ -68,14 +68,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.file_batches.with_raw_response.create( - "", + vector_store_id="", file_ids=["string"], ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: file_batch = client.beta.vector_stores.file_batches.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @@ -83,7 +83,7 @@ def test_method_retrieve(self, client: OpenAI) -> None: @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.vector_stores.file_batches.with_raw_response.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) @@ -95,7 +95,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.vector_stores.file_batches.with_streaming_response.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) as response: assert not response.is_closed @@ -110,29 +110,29 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.file_batches.with_raw_response.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): client.beta.vector_stores.file_batches.with_raw_response.retrieve( - "", + batch_id="", vector_store_id="vs_abc123", ) @parametrize def test_method_cancel(self, client: OpenAI) -> None: file_batch = client.beta.vector_stores.file_batches.cancel( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @parametrize def test_raw_response_cancel(self, client: OpenAI) -> None: response = client.beta.vector_stores.file_batches.with_raw_response.cancel( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -143,8 +143,8 @@ def test_raw_response_cancel(self, client: OpenAI) -> None: @parametrize def test_streaming_response_cancel(self, client: OpenAI) -> None: with client.beta.vector_stores.file_batches.with_streaming_response.cancel( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -158,31 +158,31 @@ def test_streaming_response_cancel(self, client: OpenAI) -> None: def test_path_params_cancel(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.file_batches.with_raw_response.cancel( - "string", + batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): client.beta.vector_stores.file_batches.with_raw_response.cancel( - "", - vector_store_id="string", + batch_id="", + vector_store_id="vector_store_id", ) @parametrize def test_method_list_files(self, client: OpenAI) -> None: file_batch = client.beta.vector_stores.file_batches.list_files( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert_matches_type(SyncCursorPage[VectorStoreFile], file_batch, path=["response"]) @parametrize def test_method_list_files_with_all_params(self, client: OpenAI) -> None: file_batch = client.beta.vector_stores.file_batches.list_files( - "string", - vector_store_id="string", - after="string", - before="string", + batch_id="batch_id", + vector_store_id="vector_store_id", + after="after", + before="before", filter="in_progress", limit=0, order="asc", @@ -192,8 +192,8 @@ def test_method_list_files_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list_files(self, client: OpenAI) -> None: response = client.beta.vector_stores.file_batches.with_raw_response.list_files( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -204,8 +204,8 @@ def test_raw_response_list_files(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list_files(self, client: OpenAI) -> None: with client.beta.vector_stores.file_batches.with_streaming_response.list_files( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -219,14 +219,14 @@ def test_streaming_response_list_files(self, client: OpenAI) -> None: def test_path_params_list_files(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.file_batches.with_raw_response.list_files( - "string", + batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): client.beta.vector_stores.file_batches.with_raw_response.list_files( - "", - vector_store_id="string", + batch_id="", + vector_store_id="vector_store_id", ) @@ -236,7 +236,7 @@ class TestAsyncFileBatches: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: file_batch = await async_client.beta.vector_stores.file_batches.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @@ -244,7 +244,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: file_batch = await async_client.beta.vector_stores.file_batches.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], chunking_strategy={"type": "auto"}, ) @@ -253,7 +253,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.file_batches.with_raw_response.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], ) @@ -265,7 +265,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.file_batches.with_streaming_response.create( - "vs_abc123", + vector_store_id="vs_abc123", file_ids=["string"], ) as response: assert not response.is_closed @@ -280,14 +280,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.file_batches.with_raw_response.create( - "", + vector_store_id="", file_ids=["string"], ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: file_batch = await async_client.beta.vector_stores.file_batches.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @@ -295,7 +295,7 @@ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) @@ -307,7 +307,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.file_batches.with_streaming_response.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) as response: assert not response.is_closed @@ -322,29 +322,29 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve( - "vsfb_abc123", + batch_id="vsfb_abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve( - "", + batch_id="", vector_store_id="vs_abc123", ) @parametrize async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: file_batch = await async_client.beta.vector_stores.file_batches.cancel( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @parametrize async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.file_batches.with_raw_response.cancel( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -355,8 +355,8 @@ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.file_batches.with_streaming_response.cancel( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -370,31 +370,31 @@ async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> Non async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.file_batches.with_raw_response.cancel( - "string", + batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): await async_client.beta.vector_stores.file_batches.with_raw_response.cancel( - "", - vector_store_id="string", + batch_id="", + vector_store_id="vector_store_id", ) @parametrize async def test_method_list_files(self, async_client: AsyncOpenAI) -> None: file_batch = await async_client.beta.vector_stores.file_batches.list_files( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"]) @parametrize async def test_method_list_files_with_all_params(self, async_client: AsyncOpenAI) -> None: file_batch = await async_client.beta.vector_stores.file_batches.list_files( - "string", - vector_store_id="string", - after="string", - before="string", + batch_id="batch_id", + vector_store_id="vector_store_id", + after="after", + before="before", filter="in_progress", limit=0, order="asc", @@ -404,8 +404,8 @@ async def test_method_list_files_with_all_params(self, async_client: AsyncOpenAI @parametrize async def test_raw_response_list_files(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.file_batches.with_raw_response.list_files( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -416,8 +416,8 @@ async def test_raw_response_list_files(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list_files(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.file_batches.with_streaming_response.list_files( - "string", - vector_store_id="string", + batch_id="batch_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -431,12 +431,12 @@ async def test_streaming_response_list_files(self, async_client: AsyncOpenAI) -> async def test_path_params_list_files(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.file_batches.with_raw_response.list_files( - "string", + batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): await async_client.beta.vector_stores.file_batches.with_raw_response.list_files( - "", - vector_store_id="string", + batch_id="", + vector_store_id="vector_store_id", ) diff --git a/tests/api_resources/beta/vector_stores/test_files.py b/tests/api_resources/beta/vector_stores/test_files.py index 36622e699b..29fc28f39d 100644 --- a/tests/api_resources/beta/vector_stores/test_files.py +++ b/tests/api_resources/beta/vector_stores/test_files.py @@ -24,16 +24,16 @@ class TestFiles: @parametrize def test_method_create(self, client: OpenAI) -> None: file = client.beta.vector_stores.files.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", ) assert_matches_type(VectorStoreFile, file, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: file = client.beta.vector_stores.files.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", chunking_strategy={"type": "auto"}, ) assert_matches_type(VectorStoreFile, file, path=["response"]) @@ -41,8 +41,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.vector_stores.files.with_raw_response.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", ) assert response.is_closed is True @@ -53,8 +53,8 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.vector_stores.files.with_streaming_response.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -68,14 +68,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.files.with_raw_response.create( - "", - file_id="string", + vector_store_id="", + file_id="file_id", ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: file = client.beta.vector_stores.files.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="vs_abc123", ) assert_matches_type(VectorStoreFile, file, path=["response"]) @@ -83,7 +83,7 @@ def test_method_retrieve(self, client: OpenAI) -> None: @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.vector_stores.files.with_raw_response.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="vs_abc123", ) @@ -95,7 +95,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.vector_stores.files.with_streaming_response.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="vs_abc123", ) as response: assert not response.is_closed @@ -110,29 +110,29 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.files.with_raw_response.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.beta.vector_stores.files.with_raw_response.retrieve( - "", + file_id="", vector_store_id="vs_abc123", ) @parametrize def test_method_list(self, client: OpenAI) -> None: file = client.beta.vector_stores.files.list( - "string", + vector_store_id="vector_store_id", ) assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: file = client.beta.vector_stores.files.list( - "string", - after="string", - before="string", + vector_store_id="vector_store_id", + after="after", + before="before", filter="in_progress", limit=0, order="asc", @@ -142,7 +142,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.vector_stores.files.with_raw_response.list( - "string", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -153,7 +153,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.vector_stores.files.with_streaming_response.list( - "string", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -167,22 +167,22 @@ def test_streaming_response_list(self, client: OpenAI) -> None: def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.files.with_raw_response.list( - "", + vector_store_id="", ) @parametrize def test_method_delete(self, client: OpenAI) -> None: file = client.beta.vector_stores.files.delete( - "string", - vector_store_id="string", + file_id="file_id", + vector_store_id="vector_store_id", ) assert_matches_type(VectorStoreFileDeleted, file, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.vector_stores.files.with_raw_response.delete( - "string", - vector_store_id="string", + file_id="file_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -193,8 +193,8 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.vector_stores.files.with_streaming_response.delete( - "string", - vector_store_id="string", + file_id="file_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -208,14 +208,14 @@ def test_streaming_response_delete(self, client: OpenAI) -> None: def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): client.beta.vector_stores.files.with_raw_response.delete( - "string", + file_id="file_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.beta.vector_stores.files.with_raw_response.delete( - "", - vector_store_id="string", + file_id="", + vector_store_id="vector_store_id", ) @@ -225,16 +225,16 @@ class TestAsyncFiles: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.vector_stores.files.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", ) assert_matches_type(VectorStoreFile, file, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.vector_stores.files.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", chunking_strategy={"type": "auto"}, ) assert_matches_type(VectorStoreFile, file, path=["response"]) @@ -242,8 +242,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.files.with_raw_response.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", ) assert response.is_closed is True @@ -254,8 +254,8 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.files.with_streaming_response.create( - "vs_abc123", - file_id="string", + vector_store_id="vs_abc123", + file_id="file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -269,14 +269,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.files.with_raw_response.create( - "", - file_id="string", + vector_store_id="", + file_id="file_id", ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.vector_stores.files.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="vs_abc123", ) assert_matches_type(VectorStoreFile, file, path=["response"]) @@ -284,7 +284,7 @@ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.files.with_raw_response.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="vs_abc123", ) @@ -296,7 +296,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.files.with_streaming_response.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="vs_abc123", ) as response: assert not response.is_closed @@ -311,29 +311,29 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.files.with_raw_response.retrieve( - "file-abc123", + file_id="file-abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.beta.vector_stores.files.with_raw_response.retrieve( - "", + file_id="", vector_store_id="vs_abc123", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.vector_stores.files.list( - "string", + vector_store_id="vector_store_id", ) assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.vector_stores.files.list( - "string", - after="string", - before="string", + vector_store_id="vector_store_id", + after="after", + before="before", filter="in_progress", limit=0, order="asc", @@ -343,7 +343,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.files.with_raw_response.list( - "string", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -354,7 +354,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.files.with_streaming_response.list( - "string", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -368,22 +368,22 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.files.with_raw_response.list( - "", + vector_store_id="", ) @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.vector_stores.files.delete( - "string", - vector_store_id="string", + file_id="file_id", + vector_store_id="vector_store_id", ) assert_matches_type(VectorStoreFileDeleted, file, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.vector_stores.files.with_raw_response.delete( - "string", - vector_store_id="string", + file_id="file_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -394,8 +394,8 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.vector_stores.files.with_streaming_response.delete( - "string", - vector_store_id="string", + file_id="file_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -409,12 +409,12 @@ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> Non async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): await async_client.beta.vector_stores.files.with_raw_response.delete( - "string", + file_id="file_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.beta.vector_stores.files.with_raw_response.delete( - "", - vector_store_id="string", + file_id="", + vector_store_id="vector_store_id", ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 5cb2a8c717..ca5cada7f3 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -24,7 +24,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -37,9 +37,9 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", - "name": "string", + "name": "name", } ], model="gpt-4-turbo", @@ -47,8 +47,8 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: function_call="none", functions=[ { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, } ], @@ -70,24 +70,24 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, @@ -103,7 +103,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -120,7 +120,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -139,7 +139,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -153,9 +153,9 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", - "name": "string", + "name": "name", } ], model="gpt-4-turbo", @@ -164,8 +164,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: function_call="none", functions=[ { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, } ], @@ -186,24 +186,24 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, @@ -219,7 +219,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -236,7 +236,7 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -260,7 +260,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None completion = await async_client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -273,9 +273,9 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn completion = await async_client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", - "name": "string", + "name": "name", } ], model="gpt-4-turbo", @@ -283,8 +283,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn function_call="none", functions=[ { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, } ], @@ -306,24 +306,24 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, @@ -339,7 +339,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - response = await async_client.chat.completions.with_raw_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -356,7 +356,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe async with async_client.chat.completions.with_streaming_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -375,7 +375,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None completion_stream = await async_client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -389,9 +389,9 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn completion_stream = await async_client.chat.completions.create( messages=[ { - "content": "string", + "content": "content", "role": "system", - "name": "string", + "name": "name", } ], model="gpt-4-turbo", @@ -400,8 +400,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn function_call="none", functions=[ { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, } ], @@ -422,24 +422,24 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { - "description": "string", - "name": "string", + "description": "description", + "name": "name", "parameters": {"foo": "bar"}, }, }, @@ -455,7 +455,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - response = await async_client.chat.completions.with_raw_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], @@ -472,7 +472,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe async with async_client.chat.completions.with_streaming_response.create( messages=[ { - "content": "string", + "content": "content", "role": "system", } ], diff --git a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py index 915d5c6f63..e65f84c818 100644 --- a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py +++ b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py @@ -21,15 +21,15 @@ class TestCheckpoints: @parametrize def test_method_list(self, client: OpenAI) -> None: checkpoint = client.fine_tuning.jobs.checkpoints.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(SyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: checkpoint = client.fine_tuning.jobs.checkpoints.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="string", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", limit=0, ) assert_matches_type(SyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"]) @@ -37,7 +37,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.checkpoints.with_raw_response.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True @@ -48,7 +48,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.fine_tuning.jobs.checkpoints.with_streaming_response.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,7 +62,7 @@ def test_streaming_response_list(self, client: OpenAI) -> None: def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): client.fine_tuning.jobs.checkpoints.with_raw_response.list( - "", + fine_tuning_job_id="", ) @@ -72,15 +72,15 @@ class TestAsyncCheckpoints: @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: checkpoint = await async_client.fine_tuning.jobs.checkpoints.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(AsyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: checkpoint = await async_client.fine_tuning.jobs.checkpoints.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="string", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", limit=0, ) assert_matches_type(AsyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"]) @@ -88,7 +88,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.checkpoints.with_raw_response.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True @@ -99,7 +99,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.checkpoints.with_streaming_response.list( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -113,5 +113,5 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): await async_client.fine_tuning.jobs.checkpoints.with_raw_response.list( - "", + fine_tuning_job_id="", ) diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index 1ff6d63b31..3353547ad7 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -44,8 +44,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "string", - "entity": "string", + "name": "name", + "entity": "entity", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -53,8 +53,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "string", - "entity": "string", + "name": "name", + "entity": "entity", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -62,8 +62,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "string", - "entity": "string", + "name": "name", + "entity": "entity", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -146,7 +146,7 @@ def test_method_list(self, client: OpenAI) -> None: @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list( - after="string", + after="after", limit=0, ) assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"]) @@ -212,15 +212,15 @@ def test_path_params_cancel(self, client: OpenAI) -> None: @parametrize def test_method_list_events(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize def test_method_list_events_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="string", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", limit=0, ) assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"]) @@ -228,7 +228,7 @@ def test_method_list_events_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list_events(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True @@ -239,7 +239,7 @@ def test_raw_response_list_events(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list_events(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -253,7 +253,7 @@ def test_streaming_response_list_events(self, client: OpenAI) -> None: def test_path_params_list_events(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): client.fine_tuning.jobs.with_raw_response.list_events( - "", + fine_tuning_job_id="", ) @@ -283,8 +283,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "string", - "entity": "string", + "name": "name", + "entity": "entity", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -292,8 +292,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "string", - "entity": "string", + "name": "name", + "entity": "entity", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -301,8 +301,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "string", - "entity": "string", + "name": "name", + "entity": "entity", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -385,7 +385,7 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.list( - after="string", + after="after", limit=0, ) assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"]) @@ -451,15 +451,15 @@ async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list_events(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize async def test_method_list_events_with_all_params(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="string", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", limit=0, ) assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"]) @@ -467,7 +467,7 @@ async def test_method_list_events_with_all_params(self, async_client: AsyncOpenA @parametrize async def test_raw_response_list_events(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True @@ -478,7 +478,7 @@ async def test_raw_response_list_events(self, async_client: AsyncOpenAI) -> None @parametrize async def test_streaming_response_list_events(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.list_events( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -492,5 +492,5 @@ async def test_streaming_response_list_events(self, async_client: AsyncOpenAI) - async def test_path_params_list_events(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): await async_client.fine_tuning.jobs.with_raw_response.list_events( - "", + fine_tuning_job_id="", ) diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index 6f9b598e61..047b8bae12 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -23,7 +23,7 @@ def test_method_create(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", ) assert_matches_type(Batch, batch, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", metadata={"foo": "string"}, ) assert_matches_type(Batch, batch, path=["response"]) @@ -42,7 +42,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: response = client.batches.with_raw_response.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", ) assert response.is_closed is True @@ -55,7 +55,7 @@ def test_streaming_response_create(self, client: OpenAI) -> None: with client.batches.with_streaming_response.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -68,14 +68,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: batch = client.batches.retrieve( - "string", + "batch_id", ) assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.batches.with_raw_response.retrieve( - "string", + "batch_id", ) assert response.is_closed is True @@ -86,7 +86,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.batches.with_streaming_response.retrieve( - "string", + "batch_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -111,7 +111,7 @@ def test_method_list(self, client: OpenAI) -> None: @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: batch = client.batches.list( - after="string", + after="after", limit=0, ) assert_matches_type(SyncCursorPage[Batch], batch, path=["response"]) @@ -139,14 +139,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_method_cancel(self, client: OpenAI) -> None: batch = client.batches.cancel( - "string", + "batch_id", ) assert_matches_type(Batch, batch, path=["response"]) @parametrize def test_raw_response_cancel(self, client: OpenAI) -> None: response = client.batches.with_raw_response.cancel( - "string", + "batch_id", ) assert response.is_closed is True @@ -157,7 +157,7 @@ def test_raw_response_cancel(self, client: OpenAI) -> None: @parametrize def test_streaming_response_cancel(self, client: OpenAI) -> None: with client.batches.with_streaming_response.cancel( - "string", + "batch_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -183,7 +183,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", ) assert_matches_type(Batch, batch, path=["response"]) @@ -192,7 +192,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> batch = await async_client.batches.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", metadata={"foo": "string"}, ) assert_matches_type(Batch, batch, path=["response"]) @@ -202,7 +202,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.batches.with_raw_response.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", ) assert response.is_closed is True @@ -215,7 +215,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non async with async_client.batches.with_streaming_response.create( completion_window="24h", endpoint="/v1/chat/completions", - input_file_id="string", + input_file_id="input_file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -228,14 +228,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.retrieve( - "string", + "batch_id", ) assert_matches_type(Batch, batch, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.batches.with_raw_response.retrieve( - "string", + "batch_id", ) assert response.is_closed is True @@ -246,7 +246,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.batches.with_streaming_response.retrieve( - "string", + "batch_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -271,7 +271,7 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.list( - after="string", + after="after", limit=0, ) assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"]) @@ -299,14 +299,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.cancel( - "string", + "batch_id", ) assert_matches_type(Batch, batch, path=["response"]) @parametrize async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: response = await async_client.batches.with_raw_response.cancel( - "string", + "batch_id", ) assert response.is_closed is True @@ -317,7 +317,7 @@ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: async with async_client.batches.with_streaming_response.cancel( - "string", + "batch_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index 882f0ddbe7..725e55c193 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -60,14 +60,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: file = client.files.retrieve( - "string", + "file_id", ) assert_matches_type(FileObject, file, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.files.with_raw_response.retrieve( - "string", + "file_id", ) assert response.is_closed is True @@ -78,7 +78,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.files.with_streaming_response.retrieve( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -103,7 +103,7 @@ def test_method_list(self, client: OpenAI) -> None: @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: file = client.files.list( - purpose="string", + purpose="purpose", ) assert_matches_type(SyncPage[FileObject], file, path=["response"]) @@ -130,14 +130,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_method_delete(self, client: OpenAI) -> None: file = client.files.delete( - "string", + "file_id", ) assert_matches_type(FileDeleted, file, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.files.with_raw_response.delete( - "string", + "file_id", ) assert response.is_closed is True @@ -148,7 +148,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.files.with_streaming_response.delete( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -168,9 +168,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: @parametrize @pytest.mark.respx(base_url=base_url) def test_method_content(self, client: OpenAI, respx_mock: MockRouter) -> None: - respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/files/file_id/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) file = client.files.content( - "string", + "file_id", ) assert isinstance(file, _legacy_response.HttpxBinaryResponseContent) assert file.json() == {"foo": "bar"} @@ -178,10 +178,10 @@ def test_method_content(self, client: OpenAI, respx_mock: MockRouter) -> None: @parametrize @pytest.mark.respx(base_url=base_url) def test_raw_response_content(self, client: OpenAI, respx_mock: MockRouter) -> None: - respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/files/file_id/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = client.files.with_raw_response.content( - "string", + "file_id", ) assert response.is_closed is True @@ -192,9 +192,9 @@ def test_raw_response_content(self, client: OpenAI, respx_mock: MockRouter) -> N @parametrize @pytest.mark.respx(base_url=base_url) def test_streaming_response_content(self, client: OpenAI, respx_mock: MockRouter) -> None: - respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/files/file_id/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.files.with_streaming_response.content( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -216,7 +216,7 @@ def test_path_params_content(self, client: OpenAI) -> None: def test_method_retrieve_content(self, client: OpenAI) -> None: with pytest.warns(DeprecationWarning): file = client.files.retrieve_content( - "string", + "file_id", ) assert_matches_type(str, file, path=["response"]) @@ -225,7 +225,7 @@ def test_method_retrieve_content(self, client: OpenAI) -> None: def test_raw_response_retrieve_content(self, client: OpenAI) -> None: with pytest.warns(DeprecationWarning): response = client.files.with_raw_response.retrieve_content( - "string", + "file_id", ) assert response.is_closed is True @@ -237,7 +237,7 @@ def test_raw_response_retrieve_content(self, client: OpenAI) -> None: def test_streaming_response_retrieve_content(self, client: OpenAI) -> None: with pytest.warns(DeprecationWarning): with client.files.with_streaming_response.retrieve_content( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -296,14 +296,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.retrieve( - "string", + "file_id", ) assert_matches_type(FileObject, file, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.retrieve( - "string", + "file_id", ) assert response.is_closed is True @@ -314,7 +314,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.files.with_streaming_response.retrieve( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -339,7 +339,7 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.list( - purpose="string", + purpose="purpose", ) assert_matches_type(AsyncPage[FileObject], file, path=["response"]) @@ -366,14 +366,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.delete( - "string", + "file_id", ) assert_matches_type(FileDeleted, file, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.delete( - "string", + "file_id", ) assert response.is_closed is True @@ -384,7 +384,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.files.with_streaming_response.delete( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -404,9 +404,9 @@ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: @parametrize @pytest.mark.respx(base_url=base_url) async def test_method_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: - respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/files/file_id/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) file = await async_client.files.content( - "string", + "file_id", ) assert isinstance(file, _legacy_response.HttpxBinaryResponseContent) assert file.json() == {"foo": "bar"} @@ -414,10 +414,10 @@ async def test_method_content(self, async_client: AsyncOpenAI, respx_mock: MockR @parametrize @pytest.mark.respx(base_url=base_url) async def test_raw_response_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: - respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/files/file_id/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = await async_client.files.with_raw_response.content( - "string", + "file_id", ) assert response.is_closed is True @@ -428,9 +428,9 @@ async def test_raw_response_content(self, async_client: AsyncOpenAI, respx_mock: @parametrize @pytest.mark.respx(base_url=base_url) async def test_streaming_response_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: - respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/files/file_id/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.files.with_streaming_response.content( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -452,7 +452,7 @@ async def test_path_params_content(self, async_client: AsyncOpenAI) -> None: async def test_method_retrieve_content(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): file = await async_client.files.retrieve_content( - "string", + "file_id", ) assert_matches_type(str, file, path=["response"]) @@ -461,7 +461,7 @@ async def test_method_retrieve_content(self, async_client: AsyncOpenAI) -> None: async def test_raw_response_retrieve_content(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.files.with_raw_response.retrieve_content( - "string", + "file_id", ) assert response.is_closed is True @@ -473,7 +473,7 @@ async def test_raw_response_retrieve_content(self, async_client: AsyncOpenAI) -> async def test_streaming_response_retrieve_content(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.files.with_streaming_response.retrieve_content( - "string", + "file_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" From 97ac03d02667a62f0ba82fcff55eab66ed2d2b51 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 10:09:58 +0000 Subject: [PATCH 035/192] chore(internal): minor options / compat functions updates (#1549) --- src/openai/_base_client.py | 12 ++++++------ src/openai/_compat.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 7ab2a56169..4b93ab298c 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -880,9 +880,9 @@ def __exit__( def _prepare_options( self, options: FinalRequestOptions, # noqa: ARG002 - ) -> None: + ) -> FinalRequestOptions: """Hook for mutating the given options""" - return None + return options def _prepare_request( self, @@ -962,7 +962,7 @@ def _request( input_options = model_copy(options) cast_to = self._maybe_override_cast_to(cast_to, options) - self._prepare_options(options) + options = self._prepare_options(options) retries = self._remaining_retries(remaining_retries, options) request = self._build_request(options) @@ -1457,9 +1457,9 @@ async def __aexit__( async def _prepare_options( self, options: FinalRequestOptions, # noqa: ARG002 - ) -> None: + ) -> FinalRequestOptions: """Hook for mutating the given options""" - return None + return options async def _prepare_request( self, @@ -1544,7 +1544,7 @@ async def _request( input_options = model_copy(options) cast_to = self._maybe_override_cast_to(cast_to, options) - await self._prepare_options(options) + options = await self._prepare_options(options) retries = self._remaining_retries(remaining_retries, options) request = self._build_request(options) diff --git a/src/openai/_compat.py b/src/openai/_compat.py index 74c7639b4c..c919b5adb3 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -118,10 +118,10 @@ def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: return model.__fields__ # type: ignore -def model_copy(model: _ModelT) -> _ModelT: +def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT: if PYDANTIC_V2: - return model.model_copy() - return model.copy() # type: ignore + return model.model_copy(deep=deep) + return model.copy(deep=deep) # type: ignore def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: From 01654e29091fe984a69a4fec08710b71b7af05de Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 18:20:46 +0000 Subject: [PATCH 036/192] chore(docs): minor update to formatting of API link in README (#1550) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 06f63081fb..7edc346272 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ and offers both synchronous and asynchronous clients powered by [httpx](https:// ## Documentation -The REST API documentation can be found [on platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](api.md). +The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](api.md). ## Installation From 91449fbd4a1dacdc7e65966cf70949ec13e8027b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 14:56:36 +0000 Subject: [PATCH 037/192] chore(internal): update formatting (#1552) --- src/openai/types/audio/transcription.py | 1 - src/openai/types/audio/translation.py | 1 - src/openai/types/batch_request_counts.py | 1 - src/openai/types/beta/assistant_tool_choice_function.py | 1 - src/openai/types/completion_usage.py | 1 - src/openai/types/fine_tuning/fine_tuning_job_integration.py | 1 - src/openai/types/model_deleted.py | 1 - 7 files changed, 7 deletions(-) diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py index 0b6ab39e78..edb5f227fc 100644 --- a/src/openai/types/audio/transcription.py +++ b/src/openai/types/audio/transcription.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["Transcription"] diff --git a/src/openai/types/audio/translation.py b/src/openai/types/audio/translation.py index 3d9ede2939..7c0e905189 100644 --- a/src/openai/types/audio/translation.py +++ b/src/openai/types/audio/translation.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["Translation"] diff --git a/src/openai/types/batch_request_counts.py b/src/openai/types/batch_request_counts.py index ef6c84a0a1..7e1d49fb88 100644 --- a/src/openai/types/batch_request_counts.py +++ b/src/openai/types/batch_request_counts.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from .._models import BaseModel __all__ = ["BatchRequestCounts"] diff --git a/src/openai/types/beta/assistant_tool_choice_function.py b/src/openai/types/beta/assistant_tool_choice_function.py index d0d4255357..0c896d8087 100644 --- a/src/openai/types/beta/assistant_tool_choice_function.py +++ b/src/openai/types/beta/assistant_tool_choice_function.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["AssistantToolChoiceFunction"] diff --git a/src/openai/types/completion_usage.py b/src/openai/types/completion_usage.py index 0d57b96595..ac09afd479 100644 --- a/src/openai/types/completion_usage.py +++ b/src/openai/types/completion_usage.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from .._models import BaseModel __all__ = ["CompletionUsage"] diff --git a/src/openai/types/fine_tuning/fine_tuning_job_integration.py b/src/openai/types/fine_tuning/fine_tuning_job_integration.py index 4904b85c11..8ac55a0b44 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job_integration.py +++ b/src/openai/types/fine_tuning/fine_tuning_job_integration.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject __all__ = ["FineTuningJobIntegration"] diff --git a/src/openai/types/model_deleted.py b/src/openai/types/model_deleted.py index d9a48bb1b5..7f81e1b380 100644 --- a/src/openai/types/model_deleted.py +++ b/src/openai/types/model_deleted.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from .._models import BaseModel __all__ = ["ModelDeleted"] From 429762dffcaba1ab5e42f0d2720fd60995e7b4a1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 22:33:58 +0000 Subject: [PATCH 038/192] chore(internal): update formatting (#1553) --- src/openai/types/audio/transcription.py | 1 + src/openai/types/audio/translation.py | 1 + src/openai/types/batch_request_counts.py | 1 + src/openai/types/beta/assistant_tool_choice_function.py | 1 + src/openai/types/completion_usage.py | 1 + src/openai/types/fine_tuning/fine_tuning_job_integration.py | 1 + src/openai/types/model_deleted.py | 1 + 7 files changed, 7 insertions(+) diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py index edb5f227fc..0b6ab39e78 100644 --- a/src/openai/types/audio/transcription.py +++ b/src/openai/types/audio/transcription.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from ..._models import BaseModel __all__ = ["Transcription"] diff --git a/src/openai/types/audio/translation.py b/src/openai/types/audio/translation.py index 7c0e905189..3d9ede2939 100644 --- a/src/openai/types/audio/translation.py +++ b/src/openai/types/audio/translation.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from ..._models import BaseModel __all__ = ["Translation"] diff --git a/src/openai/types/batch_request_counts.py b/src/openai/types/batch_request_counts.py index 7e1d49fb88..ef6c84a0a1 100644 --- a/src/openai/types/batch_request_counts.py +++ b/src/openai/types/batch_request_counts.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from .._models import BaseModel __all__ = ["BatchRequestCounts"] diff --git a/src/openai/types/beta/assistant_tool_choice_function.py b/src/openai/types/beta/assistant_tool_choice_function.py index 0c896d8087..d0d4255357 100644 --- a/src/openai/types/beta/assistant_tool_choice_function.py +++ b/src/openai/types/beta/assistant_tool_choice_function.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from ..._models import BaseModel __all__ = ["AssistantToolChoiceFunction"] diff --git a/src/openai/types/completion_usage.py b/src/openai/types/completion_usage.py index ac09afd479..0d57b96595 100644 --- a/src/openai/types/completion_usage.py +++ b/src/openai/types/completion_usage.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from .._models import BaseModel __all__ = ["CompletionUsage"] diff --git a/src/openai/types/fine_tuning/fine_tuning_job_integration.py b/src/openai/types/fine_tuning/fine_tuning_job_integration.py index 8ac55a0b44..4904b85c11 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job_integration.py +++ b/src/openai/types/fine_tuning/fine_tuning_job_integration.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject __all__ = ["FineTuningJobIntegration"] diff --git a/src/openai/types/model_deleted.py b/src/openai/types/model_deleted.py index 7f81e1b380..d9a48bb1b5 100644 --- a/src/openai/types/model_deleted.py +++ b/src/openai/types/model_deleted.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from .._models import BaseModel __all__ = ["ModelDeleted"] From 6b53042f4dfeaf0f804ced14c5991e1cf2bfc238 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 18 Jul 2024 16:53:49 +0000 Subject: [PATCH 039/192] chore(docs): document how to do per-request http client customization (#1560) --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 7edc346272..8102ea9673 100644 --- a/README.md +++ b/README.md @@ -485,6 +485,12 @@ client = OpenAI( ) ``` +You can also customize the client on a per-request basis by using `with_options()`: + +```python +client.with_options(http_client=DefaultHttpxClient(...)) +``` + ### Managing HTTP resources By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. From 1939a765e95fdff0490f666d90753a38d4129375 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2024 14:54:42 +0000 Subject: [PATCH 040/192] feat(api): add new gpt-4o-mini models (#1561) --- .stats.yml | 2 +- src/openai/resources/beta/assistants.py | 4 ++++ src/openai/resources/beta/threads/runs/runs.py | 16 ++++++++++++++++ src/openai/resources/beta/threads/threads.py | 16 ++++++++++++++++ src/openai/types/beta/assistant_create_params.py | 2 ++ .../types/beta/thread_create_and_run_params.py | 2 ++ .../types/beta/threads/run_create_params.py | 2 ++ src/openai/types/chat_model.py | 2 ++ 8 files changed, 45 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 57f5afaffe..27e2ce5ede 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-27d8d6da893c1cdd53b491ec05153df22b1e113965f253a1d6eb8d75b628173f.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-518ca6c60061d3e8bc0971facf40d752f2aea62e3522cc168ad29a1f29cab3dd.yml diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 204f6c87f4..531302c126 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -48,6 +48,8 @@ def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -437,6 +439,8 @@ async def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 0f57dd69f5..6e562cb0e5 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -73,6 +73,8 @@ def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -226,6 +228,8 @@ def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -378,6 +382,8 @@ def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -529,6 +535,8 @@ def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -967,6 +975,8 @@ async def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -1120,6 +1130,8 @@ async def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -1272,6 +1284,8 @@ async def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -1423,6 +1437,8 @@ async def create( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 25dd1ac09e..1587813210 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -258,6 +258,8 @@ def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -410,6 +412,8 @@ def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -561,6 +565,8 @@ def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -711,6 +717,8 @@ def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -990,6 +998,8 @@ async def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -1142,6 +1152,8 @@ async def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -1293,6 +1305,8 @@ async def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -1443,6 +1457,8 @@ async def create_and_run( Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index c9b0317831..754752ae65 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -28,6 +28,8 @@ class AssistantCreateParams(TypedDict, total=False): Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index dbbff415ec..9421a894d9 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -82,6 +82,8 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 89da241965..81cd85188b 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -79,6 +79,8 @@ class RunCreateParamsBase(TypedDict, total=False): Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index 0d2937ea32..87b2acb90a 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -7,6 +7,8 @@ ChatModel = Literal[ "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", From a733b3b8a8efb49b0a3b9f86b623fbd31b853adf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:10:52 +0000 Subject: [PATCH 041/192] feat(api): add uploads endpoints (#1568) --- .stats.yml | 4 +- api.md | 26 + src/openai/_client.py | 8 + src/openai/resources/__init__.py | 14 + src/openai/resources/chat/completions.py | 6 + src/openai/resources/uploads/__init__.py | 33 ++ src/openai/resources/uploads/parts.py | 188 +++++++ src/openai/resources/uploads/uploads.py | 473 ++++++++++++++++++ src/openai/types/__init__.py | 3 + .../types/chat/completion_create_params.py | 1 + src/openai/types/upload.py | 42 ++ src/openai/types/upload_complete_params.py | 19 + src/openai/types/upload_create_params.py | 29 ++ src/openai/types/uploads/__init__.py | 6 + .../types/uploads/part_create_params.py | 14 + src/openai/types/uploads/upload_part.py | 21 + tests/api_resources/test_uploads.py | 280 +++++++++++ tests/api_resources/uploads/__init__.py | 1 + tests/api_resources/uploads/test_parts.py | 106 ++++ 19 files changed, 1272 insertions(+), 2 deletions(-) create mode 100644 src/openai/resources/uploads/__init__.py create mode 100644 src/openai/resources/uploads/parts.py create mode 100644 src/openai/resources/uploads/uploads.py create mode 100644 src/openai/types/upload.py create mode 100644 src/openai/types/upload_complete_params.py create mode 100644 src/openai/types/upload_create_params.py create mode 100644 src/openai/types/uploads/__init__.py create mode 100644 src/openai/types/uploads/part_create_params.py create mode 100644 src/openai/types/uploads/upload_part.py create mode 100644 tests/api_resources/test_uploads.py create mode 100644 tests/api_resources/uploads/__init__.py create mode 100644 tests/api_resources/uploads/test_parts.py diff --git a/.stats.yml b/.stats.yml index 27e2ce5ede..4e4cb5509c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-518ca6c60061d3e8bc0971facf40d752f2aea62e3522cc168ad29a1f29cab3dd.yml +configured_endpoints: 68 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-77cfff37114bc9f141c7e6107eb5f1b38d8cc99bc3d4ce03a066db2b6b649c69.yml diff --git a/api.md b/api.md index c03586c447..933095786a 100644 --- a/api.md +++ b/api.md @@ -399,3 +399,29 @@ Methods: - client.batches.retrieve(batch_id) -> Batch - client.batches.list(\*\*params) -> SyncCursorPage[Batch] - client.batches.cancel(batch_id) -> Batch + +# Uploads + +Types: + +```python +from openai.types import Upload +``` + +Methods: + +- client.uploads.create(\*\*params) -> Upload +- client.uploads.cancel(upload_id) -> Upload +- client.uploads.complete(upload_id, \*\*params) -> Upload + +## Parts + +Types: + +```python +from openai.types.uploads import UploadPart +``` + +Methods: + +- client.uploads.parts.create(upload_id, \*\*params) -> UploadPart diff --git a/src/openai/_client.py b/src/openai/_client.py index 8f3060c6f6..8b404e234d 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -58,6 +58,7 @@ class OpenAI(SyncAPIClient): fine_tuning: resources.FineTuning beta: resources.Beta batches: resources.Batches + uploads: resources.Uploads with_raw_response: OpenAIWithRawResponse with_streaming_response: OpenAIWithStreamedResponse @@ -143,6 +144,7 @@ def __init__( self.fine_tuning = resources.FineTuning(self) self.beta = resources.Beta(self) self.batches = resources.Batches(self) + self.uploads = resources.Uploads(self) self.with_raw_response = OpenAIWithRawResponse(self) self.with_streaming_response = OpenAIWithStreamedResponse(self) @@ -270,6 +272,7 @@ class AsyncOpenAI(AsyncAPIClient): fine_tuning: resources.AsyncFineTuning beta: resources.AsyncBeta batches: resources.AsyncBatches + uploads: resources.AsyncUploads with_raw_response: AsyncOpenAIWithRawResponse with_streaming_response: AsyncOpenAIWithStreamedResponse @@ -355,6 +358,7 @@ def __init__( self.fine_tuning = resources.AsyncFineTuning(self) self.beta = resources.AsyncBeta(self) self.batches = resources.AsyncBatches(self) + self.uploads = resources.AsyncUploads(self) self.with_raw_response = AsyncOpenAIWithRawResponse(self) self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self) @@ -483,6 +487,7 @@ def __init__(self, client: OpenAI) -> None: self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning) self.beta = resources.BetaWithRawResponse(client.beta) self.batches = resources.BatchesWithRawResponse(client.batches) + self.uploads = resources.UploadsWithRawResponse(client.uploads) class AsyncOpenAIWithRawResponse: @@ -498,6 +503,7 @@ def __init__(self, client: AsyncOpenAI) -> None: self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning) self.beta = resources.AsyncBetaWithRawResponse(client.beta) self.batches = resources.AsyncBatchesWithRawResponse(client.batches) + self.uploads = resources.AsyncUploadsWithRawResponse(client.uploads) class OpenAIWithStreamedResponse: @@ -513,6 +519,7 @@ def __init__(self, client: OpenAI) -> None: self.fine_tuning = resources.FineTuningWithStreamingResponse(client.fine_tuning) self.beta = resources.BetaWithStreamingResponse(client.beta) self.batches = resources.BatchesWithStreamingResponse(client.batches) + self.uploads = resources.UploadsWithStreamingResponse(client.uploads) class AsyncOpenAIWithStreamedResponse: @@ -528,6 +535,7 @@ def __init__(self, client: AsyncOpenAI) -> None: self.fine_tuning = resources.AsyncFineTuningWithStreamingResponse(client.fine_tuning) self.beta = resources.AsyncBetaWithStreamingResponse(client.beta) self.batches = resources.AsyncBatchesWithStreamingResponse(client.batches) + self.uploads = resources.AsyncUploadsWithStreamingResponse(client.uploads) Client = OpenAI diff --git a/src/openai/resources/__init__.py b/src/openai/resources/__init__.py index ecae4243fc..e2cc1c4b0c 100644 --- a/src/openai/resources/__init__.py +++ b/src/openai/resources/__init__.py @@ -56,6 +56,14 @@ BatchesWithStreamingResponse, AsyncBatchesWithStreamingResponse, ) +from .uploads import ( + Uploads, + AsyncUploads, + UploadsWithRawResponse, + AsyncUploadsWithRawResponse, + UploadsWithStreamingResponse, + AsyncUploadsWithStreamingResponse, +) from .embeddings import ( Embeddings, AsyncEmbeddings, @@ -156,4 +164,10 @@ "AsyncBatchesWithRawResponse", "BatchesWithStreamingResponse", "AsyncBatchesWithStreamingResponse", + "Uploads", + "AsyncUploads", + "UploadsWithRawResponse", + "AsyncUploadsWithRawResponse", + "UploadsWithStreamingResponse", + "AsyncUploadsWithStreamingResponse", ] diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index e7dbe34585..07a35f577b 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -169,6 +169,7 @@ def create( exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. @@ -364,6 +365,7 @@ def create( exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. @@ -552,6 +554,7 @@ def create( exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. @@ -815,6 +818,7 @@ async def create( exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. @@ -1010,6 +1014,7 @@ async def create( exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. @@ -1198,6 +1203,7 @@ async def create( exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. diff --git a/src/openai/resources/uploads/__init__.py b/src/openai/resources/uploads/__init__.py new file mode 100644 index 0000000000..12d1056f9e --- /dev/null +++ b/src/openai/resources/uploads/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .parts import ( + Parts, + AsyncParts, + PartsWithRawResponse, + AsyncPartsWithRawResponse, + PartsWithStreamingResponse, + AsyncPartsWithStreamingResponse, +) +from .uploads import ( + Uploads, + AsyncUploads, + UploadsWithRawResponse, + AsyncUploadsWithRawResponse, + UploadsWithStreamingResponse, + AsyncUploadsWithStreamingResponse, +) + +__all__ = [ + "Parts", + "AsyncParts", + "PartsWithRawResponse", + "AsyncPartsWithRawResponse", + "PartsWithStreamingResponse", + "AsyncPartsWithStreamingResponse", + "Uploads", + "AsyncUploads", + "UploadsWithRawResponse", + "AsyncUploadsWithRawResponse", + "UploadsWithStreamingResponse", + "AsyncUploadsWithStreamingResponse", +] diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py new file mode 100644 index 0000000000..3ec2592b1e --- /dev/null +++ b/src/openai/resources/uploads/parts.py @@ -0,0 +1,188 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Mapping, cast + +import httpx + +from ... import _legacy_response +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ..._utils import ( + extract_files, + maybe_transform, + deepcopy_minimal, + async_maybe_transform, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ..._base_client import make_request_options +from ...types.uploads import part_create_params +from ...types.uploads.upload_part import UploadPart + +__all__ = ["Parts", "AsyncParts"] + + +class Parts(SyncAPIResource): + @cached_property + def with_raw_response(self) -> PartsWithRawResponse: + return PartsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> PartsWithStreamingResponse: + return PartsWithStreamingResponse(self) + + def create( + self, + upload_id: str, + *, + data: FileTypes, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UploadPart: + """ + Adds a + [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended + order of the Parts when you + [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). + + Args: + data: The chunk of bytes for this Part. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + body = deepcopy_minimal({"data": data}) + files = extract_files(cast(Mapping[str, object], body), paths=[["data"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + f"/uploads/{upload_id}/parts", + body=maybe_transform(body, part_create_params.PartCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UploadPart, + ) + + +class AsyncParts(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncPartsWithRawResponse: + return AsyncPartsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncPartsWithStreamingResponse: + return AsyncPartsWithStreamingResponse(self) + + async def create( + self, + upload_id: str, + *, + data: FileTypes, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UploadPart: + """ + Adds a + [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended + order of the Parts when you + [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). + + Args: + data: The chunk of bytes for this Part. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + body = deepcopy_minimal({"data": data}) + files = extract_files(cast(Mapping[str, object], body), paths=[["data"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + f"/uploads/{upload_id}/parts", + body=await async_maybe_transform(body, part_create_params.PartCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UploadPart, + ) + + +class PartsWithRawResponse: + def __init__(self, parts: Parts) -> None: + self._parts = parts + + self.create = _legacy_response.to_raw_response_wrapper( + parts.create, + ) + + +class AsyncPartsWithRawResponse: + def __init__(self, parts: AsyncParts) -> None: + self._parts = parts + + self.create = _legacy_response.async_to_raw_response_wrapper( + parts.create, + ) + + +class PartsWithStreamingResponse: + def __init__(self, parts: Parts) -> None: + self._parts = parts + + self.create = to_streamed_response_wrapper( + parts.create, + ) + + +class AsyncPartsWithStreamingResponse: + def __init__(self, parts: AsyncParts) -> None: + self._parts = parts + + self.create = async_to_streamed_response_wrapper( + parts.create, + ) diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py new file mode 100644 index 0000000000..4100423d3e --- /dev/null +++ b/src/openai/resources/uploads/uploads.py @@ -0,0 +1,473 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from ... import _legacy_response +from .parts import ( + Parts, + AsyncParts, + PartsWithRawResponse, + AsyncPartsWithRawResponse, + PartsWithStreamingResponse, + AsyncPartsWithStreamingResponse, +) +from ...types import upload_create_params, upload_complete_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import ( + maybe_transform, + async_maybe_transform, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ..._base_client import make_request_options +from ...types.upload import Upload + +__all__ = ["Uploads", "AsyncUploads"] + + +class Uploads(SyncAPIResource): + @cached_property + def parts(self) -> Parts: + return Parts(self._client) + + @cached_property + def with_raw_response(self) -> UploadsWithRawResponse: + return UploadsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> UploadsWithStreamingResponse: + return UploadsWithStreamingResponse(self) + + def create( + self, + *, + bytes: int, + filename: str, + mime_type: str, + purpose: Literal["assistants", "batch", "fine-tune", "vision"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Creates an intermediate + [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + that you can add + [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + Currently, an Upload can accept at most 8 GB in total and expires after an hour + after you create it. + + Once you complete the Upload, we will create a + [File](https://platform.openai.com/docs/api-reference/files/object) object that + contains all the parts you uploaded. This File is usable in the rest of our + platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer + to documentation for the supported MIME types for your use case: + + - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow + the documentation on + [creating a File](https://platform.openai.com/docs/api-reference/files/create). + + Args: + bytes: The number of bytes in the file you are uploading. + + filename: The name of the file to upload. + + mime_type: The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the + supported MIME types for assistants and vision. + + purpose: The intended purpose of the uploaded file. + + See the + [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/uploads", + body=maybe_transform( + { + "bytes": bytes, + "filename": filename, + "mime_type": mime_type, + "purpose": purpose, + }, + upload_create_params.UploadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + def cancel( + self, + upload_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """Cancels the Upload. + + No Parts may be added after an Upload is cancelled. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return self._post( + f"/uploads/{upload_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + def complete( + self, + upload_id: str, + *, + part_ids: List[str], + md5: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Completes the + [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested + [File](https://platform.openai.com/docs/api-reference/files/object) object that + is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part + IDs. + + The number of bytes uploaded upon completion must match the number of bytes + initially specified when creating the Upload object. No Parts may be added after + an Upload is completed. + + Args: + part_ids: The ordered list of Part IDs. + + md5: The optional md5 checksum for the file contents to verify if the bytes uploaded + matches what you expect. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return self._post( + f"/uploads/{upload_id}/complete", + body=maybe_transform( + { + "part_ids": part_ids, + "md5": md5, + }, + upload_complete_params.UploadCompleteParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + +class AsyncUploads(AsyncAPIResource): + @cached_property + def parts(self) -> AsyncParts: + return AsyncParts(self._client) + + @cached_property + def with_raw_response(self) -> AsyncUploadsWithRawResponse: + return AsyncUploadsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncUploadsWithStreamingResponse: + return AsyncUploadsWithStreamingResponse(self) + + async def create( + self, + *, + bytes: int, + filename: str, + mime_type: str, + purpose: Literal["assistants", "batch", "fine-tune", "vision"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Creates an intermediate + [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + that you can add + [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + Currently, an Upload can accept at most 8 GB in total and expires after an hour + after you create it. + + Once you complete the Upload, we will create a + [File](https://platform.openai.com/docs/api-reference/files/object) object that + contains all the parts you uploaded. This File is usable in the rest of our + platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer + to documentation for the supported MIME types for your use case: + + - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow + the documentation on + [creating a File](https://platform.openai.com/docs/api-reference/files/create). + + Args: + bytes: The number of bytes in the file you are uploading. + + filename: The name of the file to upload. + + mime_type: The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the + supported MIME types for assistants and vision. + + purpose: The intended purpose of the uploaded file. + + See the + [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/uploads", + body=await async_maybe_transform( + { + "bytes": bytes, + "filename": filename, + "mime_type": mime_type, + "purpose": purpose, + }, + upload_create_params.UploadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + async def cancel( + self, + upload_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """Cancels the Upload. + + No Parts may be added after an Upload is cancelled. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return await self._post( + f"/uploads/{upload_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + async def complete( + self, + upload_id: str, + *, + part_ids: List[str], + md5: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Completes the + [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested + [File](https://platform.openai.com/docs/api-reference/files/object) object that + is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part + IDs. + + The number of bytes uploaded upon completion must match the number of bytes + initially specified when creating the Upload object. No Parts may be added after + an Upload is completed. + + Args: + part_ids: The ordered list of Part IDs. + + md5: The optional md5 checksum for the file contents to verify if the bytes uploaded + matches what you expect. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return await self._post( + f"/uploads/{upload_id}/complete", + body=await async_maybe_transform( + { + "part_ids": part_ids, + "md5": md5, + }, + upload_complete_params.UploadCompleteParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + +class UploadsWithRawResponse: + def __init__(self, uploads: Uploads) -> None: + self._uploads = uploads + + self.create = _legacy_response.to_raw_response_wrapper( + uploads.create, + ) + self.cancel = _legacy_response.to_raw_response_wrapper( + uploads.cancel, + ) + self.complete = _legacy_response.to_raw_response_wrapper( + uploads.complete, + ) + + @cached_property + def parts(self) -> PartsWithRawResponse: + return PartsWithRawResponse(self._uploads.parts) + + +class AsyncUploadsWithRawResponse: + def __init__(self, uploads: AsyncUploads) -> None: + self._uploads = uploads + + self.create = _legacy_response.async_to_raw_response_wrapper( + uploads.create, + ) + self.cancel = _legacy_response.async_to_raw_response_wrapper( + uploads.cancel, + ) + self.complete = _legacy_response.async_to_raw_response_wrapper( + uploads.complete, + ) + + @cached_property + def parts(self) -> AsyncPartsWithRawResponse: + return AsyncPartsWithRawResponse(self._uploads.parts) + + +class UploadsWithStreamingResponse: + def __init__(self, uploads: Uploads) -> None: + self._uploads = uploads + + self.create = to_streamed_response_wrapper( + uploads.create, + ) + self.cancel = to_streamed_response_wrapper( + uploads.cancel, + ) + self.complete = to_streamed_response_wrapper( + uploads.complete, + ) + + @cached_property + def parts(self) -> PartsWithStreamingResponse: + return PartsWithStreamingResponse(self._uploads.parts) + + +class AsyncUploadsWithStreamingResponse: + def __init__(self, uploads: AsyncUploads) -> None: + self._uploads = uploads + + self.create = async_to_streamed_response_wrapper( + uploads.create, + ) + self.cancel = async_to_streamed_response_wrapper( + uploads.cancel, + ) + self.complete = async_to_streamed_response_wrapper( + uploads.complete, + ) + + @cached_property + def parts(self) -> AsyncPartsWithStreamingResponse: + return AsyncPartsWithStreamingResponse(self._uploads.parts) diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 7873efb34f..71f4a59b9e 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -10,6 +10,7 @@ FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, ) +from .upload import Upload as Upload from .embedding import Embedding as Embedding from .chat_model import ChatModel as ChatModel from .completion import Completion as Completion @@ -28,7 +29,9 @@ from .file_create_params import FileCreateParams as FileCreateParams from .batch_create_params import BatchCreateParams as BatchCreateParams from .batch_request_counts import BatchRequestCounts as BatchRequestCounts +from .upload_create_params import UploadCreateParams as UploadCreateParams from .image_generate_params import ImageGenerateParams as ImageGenerateParams +from .upload_complete_params import UploadCompleteParams as UploadCompleteParams from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .moderation_create_params import ModerationCreateParams as ModerationCreateParams diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 85157653f2..783922539f 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -155,6 +155,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. diff --git a/src/openai/types/upload.py b/src/openai/types/upload.py new file mode 100644 index 0000000000..1cf8ee97f8 --- /dev/null +++ b/src/openai/types/upload.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .file_object import FileObject + +__all__ = ["Upload"] + + +class Upload(BaseModel): + id: str + """The Upload unique identifier, which can be referenced in API endpoints.""" + + bytes: int + """The intended number of bytes to be uploaded.""" + + created_at: int + """The Unix timestamp (in seconds) for when the Upload was created.""" + + expires_at: int + """The Unix timestamp (in seconds) for when the Upload was created.""" + + filename: str + """The name of the file to be uploaded.""" + + object: Literal["upload"] + """The object type, which is always "upload".""" + + purpose: str + """The intended purpose of the file. + + [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + for acceptable values. + """ + + status: Literal["pending", "completed", "cancelled", "expired"] + """The status of the Upload.""" + + file: Optional[FileObject] = None + """The ready File object after the Upload is completed.""" diff --git a/src/openai/types/upload_complete_params.py b/src/openai/types/upload_complete_params.py new file mode 100644 index 0000000000..cce568d5c6 --- /dev/null +++ b/src/openai/types/upload_complete_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Required, TypedDict + +__all__ = ["UploadCompleteParams"] + + +class UploadCompleteParams(TypedDict, total=False): + part_ids: Required[List[str]] + """The ordered list of Part IDs.""" + + md5: str + """ + The optional md5 checksum for the file contents to verify if the bytes uploaded + matches what you expect. + """ diff --git a/src/openai/types/upload_create_params.py b/src/openai/types/upload_create_params.py new file mode 100644 index 0000000000..3165ebcc7a --- /dev/null +++ b/src/openai/types/upload_create_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UploadCreateParams"] + + +class UploadCreateParams(TypedDict, total=False): + bytes: Required[int] + """The number of bytes in the file you are uploading.""" + + filename: Required[str] + """The name of the file to upload.""" + + mime_type: Required[str] + """The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the + supported MIME types for assistants and vision. + """ + + purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] + """The intended purpose of the uploaded file. + + See the + [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + """ diff --git a/src/openai/types/uploads/__init__.py b/src/openai/types/uploads/__init__.py new file mode 100644 index 0000000000..41deb0ab4b --- /dev/null +++ b/src/openai/types/uploads/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .upload_part import UploadPart as UploadPart +from .part_create_params import PartCreateParams as PartCreateParams diff --git a/src/openai/types/uploads/part_create_params.py b/src/openai/types/uploads/part_create_params.py new file mode 100644 index 0000000000..9851ca41e9 --- /dev/null +++ b/src/openai/types/uploads/part_create_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from ..._types import FileTypes + +__all__ = ["PartCreateParams"] + + +class PartCreateParams(TypedDict, total=False): + data: Required[FileTypes] + """The chunk of bytes for this Part.""" diff --git a/src/openai/types/uploads/upload_part.py b/src/openai/types/uploads/upload_part.py new file mode 100644 index 0000000000..e09621d8f9 --- /dev/null +++ b/src/openai/types/uploads/upload_part.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["UploadPart"] + + +class UploadPart(BaseModel): + id: str + """The upload Part unique identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the Part was created.""" + + object: Literal["upload.part"] + """The object type, which is always `upload.part`.""" + + upload_id: str + """The ID of the Upload object that this Part was added to.""" diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py new file mode 100644 index 0000000000..cb62df6b51 --- /dev/null +++ b/tests/api_resources/test_uploads.py @@ -0,0 +1,280 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types import Upload + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") + + +class TestUploads: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: OpenAI) -> None: + upload = client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: OpenAI) -> None: + response = client.uploads.with_raw_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: OpenAI) -> None: + with client.uploads.with_streaming_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_cancel(self, client: OpenAI) -> None: + upload = client.uploads.cancel( + "upload_abc123", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + def test_raw_response_cancel(self, client: OpenAI) -> None: + response = client.uploads.with_raw_response.cancel( + "upload_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + def test_streaming_response_cancel(self, client: OpenAI) -> None: + with client.uploads.with_streaming_response.cancel( + "upload_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_cancel(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + client.uploads.with_raw_response.cancel( + "", + ) + + @parametrize + def test_method_complete(self, client: OpenAI) -> None: + upload = client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + def test_method_complete_with_all_params(self, client: OpenAI) -> None: + upload = client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + md5="md5", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + def test_raw_response_complete(self, client: OpenAI) -> None: + response = client.uploads.with_raw_response.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + def test_streaming_response_complete(self, client: OpenAI) -> None: + with client.uploads.with_streaming_response.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_complete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + client.uploads.with_raw_response.complete( + upload_id="", + part_ids=["string", "string", "string"], + ) + + +class TestAsyncUploads: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + async def test_method_create(self, async_client: AsyncOpenAI) -> None: + upload = await async_client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: + response = await async_client.uploads.with_raw_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: + async with async_client.uploads.with_streaming_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: + upload = await async_client.uploads.cancel( + "upload_abc123", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: + response = await async_client.uploads.with_raw_response.cancel( + "upload_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: + async with async_client.uploads.with_streaming_response.cancel( + "upload_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + await async_client.uploads.with_raw_response.cancel( + "", + ) + + @parametrize + async def test_method_complete(self, async_client: AsyncOpenAI) -> None: + upload = await async_client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + async def test_method_complete_with_all_params(self, async_client: AsyncOpenAI) -> None: + upload = await async_client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + md5="md5", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + async def test_raw_response_complete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.uploads.with_raw_response.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @parametrize + async def test_streaming_response_complete(self, async_client: AsyncOpenAI) -> None: + async with async_client.uploads.with_streaming_response.complete( + upload_id="upload_abc123", + part_ids=["string", "string", "string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_complete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + await async_client.uploads.with_raw_response.complete( + upload_id="", + part_ids=["string", "string", "string"], + ) diff --git a/tests/api_resources/uploads/__init__.py b/tests/api_resources/uploads/__init__.py new file mode 100644 index 0000000000..fd8019a9a1 --- /dev/null +++ b/tests/api_resources/uploads/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/uploads/test_parts.py b/tests/api_resources/uploads/test_parts.py new file mode 100644 index 0000000000..2bba241a6d --- /dev/null +++ b/tests/api_resources/uploads/test_parts.py @@ -0,0 +1,106 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types.uploads import UploadPart + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") + + +class TestParts: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: OpenAI) -> None: + part = client.uploads.parts.create( + upload_id="upload_abc123", + data=b"raw file contents", + ) + assert_matches_type(UploadPart, part, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: OpenAI) -> None: + response = client.uploads.parts.with_raw_response.create( + upload_id="upload_abc123", + data=b"raw file contents", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + part = response.parse() + assert_matches_type(UploadPart, part, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: OpenAI) -> None: + with client.uploads.parts.with_streaming_response.create( + upload_id="upload_abc123", + data=b"raw file contents", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + part = response.parse() + assert_matches_type(UploadPart, part, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_create(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + client.uploads.parts.with_raw_response.create( + upload_id="", + data=b"raw file contents", + ) + + +class TestAsyncParts: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + async def test_method_create(self, async_client: AsyncOpenAI) -> None: + part = await async_client.uploads.parts.create( + upload_id="upload_abc123", + data=b"raw file contents", + ) + assert_matches_type(UploadPart, part, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: + response = await async_client.uploads.parts.with_raw_response.create( + upload_id="upload_abc123", + data=b"raw file contents", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + part = response.parse() + assert_matches_type(UploadPart, part, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: + async with async_client.uploads.parts.with_streaming_response.create( + upload_id="upload_abc123", + data=b"raw file contents", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + part = await response.parse() + assert_matches_type(UploadPart, part, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + await async_client.uploads.parts.with_raw_response.create( + upload_id="", + data=b"raw file contents", + ) From 9d974f082ca4b6c6f23085062c8d8fde22dd1627 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:36:03 +0000 Subject: [PATCH 042/192] docs(readme): fix example snippet imports (#1569) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8102ea9673..c5a78cb585 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,7 @@ List methods in the OpenAI API are paginated. This library provides auto-paginating iterators with each list response, so you do not have to request successive pages manually: ```python -import openai +from openai import OpenAI client = OpenAI() @@ -156,7 +156,7 @@ Or, asynchronously: ```python import asyncio -import openai +from openai import AsyncOpenAI client = AsyncOpenAI() From d3d26bb0c0fe25e73ba2fd5b3a91e74b3b711a67 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 23 Jul 2024 10:34:47 +0000 Subject: [PATCH 043/192] chore(tests): update prism version (#1572) --- scripts/mock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock b/scripts/mock index fe89a1d084..f586157699 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" fi From b31a7f318a42b906353a89a576bd2f56feb1a852 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:56:54 +0000 Subject: [PATCH 044/192] chore(internal): add type construction helper (#1584) --- src/openai/_models.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/openai/_models.py b/src/openai/_models.py index eb7ce3bde9..5148d5a7b3 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -406,6 +406,15 @@ def build( return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs)) +def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: + """Loose coercion to the expected type with construction of nested values. + + Note: the returned value from this function is not guaranteed to match the + given type. + """ + return cast(_T, construct_type(value=value, type_=type_)) + + def construct_type(*, value: object, type_: object) -> object: """Loose coercion to the expected type with construction of nested values. From 1de0e82d0439c4eae04e15fdb9ebc45f68aa68b8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 02:47:32 +0000 Subject: [PATCH 045/192] feat: extract out `ImageModel`, `AudioModel`, `SpeechModel` (#1586) --- api.md | 16 +- src/openai/resources/audio/speech.py | 5 +- src/openai/resources/audio/transcriptions.py | 5 +- src/openai/resources/audio/translations.py | 6 +- src/openai/resources/beta/assistants.py | 57 +---- .../resources/beta/threads/runs/runs.py | 241 +----------------- src/openai/resources/beta/threads/threads.py | 241 +----------------- src/openai/resources/images.py | 13 +- src/openai/resources/moderations.py | 6 +- src/openai/types/__init__.py | 3 + src/openai/types/audio/__init__.py | 1 + .../types/audio/speech_create_params.py | 4 +- src/openai/types/audio/speech_model.py | 7 + .../audio/transcription_create_params.py | 3 +- .../types/audio/translation_create_params.py | 5 +- src/openai/types/audio_model.py | 7 + .../types/beta/assistant_create_params.py | 31 +-- .../beta/thread_create_and_run_params.py | 30 +-- .../types/beta/threads/run_create_params.py | 30 +-- .../types/image_create_variation_params.py | 3 +- src/openai/types/image_edit_params.py | 3 +- src/openai/types/image_generate_params.py | 4 +- src/openai/types/image_model.py | 7 + src/openai/types/moderation_create_params.py | 6 +- src/openai/types/moderation_model.py | 7 + 25 files changed, 111 insertions(+), 630 deletions(-) create mode 100644 src/openai/types/audio/speech_model.py create mode 100644 src/openai/types/audio_model.py create mode 100644 src/openai/types/image_model.py create mode 100644 src/openai/types/moderation_model.py diff --git a/api.md b/api.md index 933095786a..31be4e06a7 100644 --- a/api.md +++ b/api.md @@ -91,7 +91,7 @@ Methods: Types: ```python -from openai.types import Image, ImagesResponse +from openai.types import Image, ImageModel, ImagesResponse ``` Methods: @@ -102,6 +102,12 @@ Methods: # Audio +Types: + +```python +from openai.types import AudioModel +``` + ## Transcriptions Types: @@ -128,6 +134,12 @@ Methods: ## Speech +Types: + +```python +from openai.types.audio import SpeechModel +``` + Methods: - client.audio.speech.create(\*\*params) -> HttpxBinaryResponseContent @@ -137,7 +149,7 @@ Methods: Types: ```python -from openai.types import Moderation, ModerationCreateResponse +from openai.types import Moderation, ModerationModel, ModerationCreateResponse ``` Methods: diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index c9e6a70b62..a0df9ec487 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -23,6 +23,7 @@ ) from ...types.audio import speech_create_params from ..._base_client import make_request_options +from ...types.audio.speech_model import SpeechModel __all__ = ["Speech", "AsyncSpeech"] @@ -40,7 +41,7 @@ def create( self, *, input: str, - model: Union[str, Literal["tts-1", "tts-1-hd"]], + model: Union[str, SpeechModel], voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, @@ -113,7 +114,7 @@ async def create( self, *, input: str, - model: Union[str, Literal["tts-1", "tts-1-hd"]], + model: Union[str, SpeechModel], voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index f190e00227..1ee962411c 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -20,6 +20,7 @@ from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...types.audio import transcription_create_params from ..._base_client import make_request_options +from ...types.audio_model import AudioModel from ...types.audio.transcription import Transcription __all__ = ["Transcriptions", "AsyncTranscriptions"] @@ -38,7 +39,7 @@ def create( self, *, file: FileTypes, - model: Union[str, Literal["whisper-1"]], + model: Union[str, AudioModel], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, @@ -134,7 +135,7 @@ async def create( self, *, file: FileTypes, - model: Union[str, Literal["whisper-1"]], + model: Union[str, AudioModel], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index 6f84153ba9..ed97ccf840 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -3,7 +3,6 @@ from __future__ import annotations from typing import Union, Mapping, cast -from typing_extensions import Literal import httpx @@ -20,6 +19,7 @@ from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...types.audio import translation_create_params from ..._base_client import make_request_options +from ...types.audio_model import AudioModel from ...types.audio.translation import Translation __all__ = ["Translations", "AsyncTranslations"] @@ -38,7 +38,7 @@ def create( self, *, file: FileTypes, - model: Union[str, Literal["whisper-1"]], + model: Union[str, AudioModel], prompt: str | NotGiven = NOT_GIVEN, response_format: str | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, @@ -119,7 +119,7 @@ async def create( self, *, file: FileTypes, - model: Union[str, Literal["whisper-1"]], + model: Union[str, AudioModel], prompt: str | NotGiven = NOT_GIVEN, response_format: str | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 531302c126..b4dc3cfdd6 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -23,6 +23,7 @@ assistant_update_params, ) from ..._base_client import AsyncPaginator, make_request_options +from ...types.chat_model import ChatModel from ...types.beta.assistant import Assistant from ...types.beta.assistant_deleted import AssistantDeleted from ...types.beta.assistant_tool_param import AssistantToolParam @@ -43,33 +44,7 @@ def with_streaming_response(self) -> AssistantsWithStreamingResponse: def create( self, *, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - ], + model: Union[str, ChatModel], description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, @@ -434,33 +409,7 @@ def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse: async def create( self, *, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - ], + model: Union[str, ChatModel], description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 6e562cb0e5..92e6c2e1c8 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -28,6 +28,7 @@ from ....._streaming import Stream, AsyncStream from .....pagination import SyncCursorPage, AsyncCursorPage from ....._base_client import AsyncPaginator, make_request_options +from .....types.chat_model import ChatModel from .....types.beta.threads import ( run_list_params, run_create_params, @@ -68,35 +69,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -223,35 +196,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -377,35 +322,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -530,35 +447,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -970,35 +859,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -1125,35 +986,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1279,35 +1112,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1432,35 +1237,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 1587813210..daecb74b7c 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -41,6 +41,7 @@ thread_create_and_run_params, ) from ...._base_client import make_request_options +from ....types.chat_model import ChatModel from ....types.beta.thread import Thread from ....types.beta.threads.run import Run from ....types.beta.thread_deleted import ThreadDeleted @@ -253,35 +254,7 @@ def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -407,35 +380,7 @@ def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -560,35 +505,7 @@ def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -712,35 +629,7 @@ def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -993,35 +882,7 @@ async def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -1147,35 +1008,7 @@ async def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1300,35 +1133,7 @@ async def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1452,35 +1257,7 @@ async def create_and_run( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, + model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index c5e1acd15b..0913b572cb 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -20,6 +20,7 @@ from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._base_client import make_request_options +from ..types.image_model import ImageModel from ..types.images_response import ImagesResponse __all__ = ["Images", "AsyncImages"] @@ -38,7 +39,7 @@ def create_variation( self, *, image: FileTypes, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -113,7 +114,7 @@ def edit( image: FileTypes, prompt: str, mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -194,7 +195,7 @@ def generate( self, *, prompt: str, - model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, @@ -284,7 +285,7 @@ async def create_variation( self, *, image: FileTypes, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -359,7 +360,7 @@ async def edit( image: FileTypes, prompt: str, mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -440,7 +441,7 @@ async def generate( self, *, prompt: str, - model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index e5259643e7..b9ad9972f0 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -3,7 +3,6 @@ from __future__ import annotations from typing import List, Union -from typing_extensions import Literal import httpx @@ -18,6 +17,7 @@ from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._base_client import make_request_options +from ..types.moderation_model import ModerationModel from ..types.moderation_create_response import ModerationCreateResponse __all__ = ["Moderations", "AsyncModerations"] @@ -36,7 +36,7 @@ def create( self, *, input: Union[str, List[str]], - model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN, + model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -96,7 +96,7 @@ async def create( self, *, input: Union[str, List[str]], - model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN, + model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 71f4a59b9e..84916962cc 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -15,14 +15,17 @@ from .chat_model import ChatModel as ChatModel from .completion import Completion as Completion from .moderation import Moderation as Moderation +from .audio_model import AudioModel as AudioModel from .batch_error import BatchError as BatchError from .file_object import FileObject as FileObject +from .image_model import ImageModel as ImageModel from .file_content import FileContent as FileContent from .file_deleted import FileDeleted as FileDeleted from .model_deleted import ModelDeleted as ModelDeleted from .images_response import ImagesResponse as ImagesResponse from .completion_usage import CompletionUsage as CompletionUsage from .file_list_params import FileListParams as FileListParams +from .moderation_model import ModerationModel as ModerationModel from .batch_list_params import BatchListParams as BatchListParams from .completion_choice import CompletionChoice as CompletionChoice from .image_edit_params import ImageEditParams as ImageEditParams diff --git a/src/openai/types/audio/__init__.py b/src/openai/types/audio/__init__.py index 8d2c44c86a..1de5c0ff82 100644 --- a/src/openai/types/audio/__init__.py +++ b/src/openai/types/audio/__init__.py @@ -3,6 +3,7 @@ from __future__ import annotations from .translation import Translation as Translation +from .speech_model import SpeechModel as SpeechModel from .transcription import Transcription as Transcription from .speech_create_params import SpeechCreateParams as SpeechCreateParams from .translation_create_params import TranslationCreateParams as TranslationCreateParams diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index 8d75ec4ccc..dff66e49c7 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -5,6 +5,8 @@ from typing import Union from typing_extensions import Literal, Required, TypedDict +from .speech_model import SpeechModel + __all__ = ["SpeechCreateParams"] @@ -12,7 +14,7 @@ class SpeechCreateParams(TypedDict, total=False): input: Required[str] """The text to generate audio for. The maximum length is 4096 characters.""" - model: Required[Union[str, Literal["tts-1", "tts-1-hd"]]] + model: Required[Union[str, SpeechModel]] """ One of the available [TTS models](https://platform.openai.com/docs/models/tts): `tts-1` or `tts-1-hd` diff --git a/src/openai/types/audio/speech_model.py b/src/openai/types/audio/speech_model.py new file mode 100644 index 0000000000..e92b898b99 --- /dev/null +++ b/src/openai/types/audio/speech_model.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +__all__ = ["SpeechModel"] + +SpeechModel = Literal["tts-1", "tts-1-hd"] diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index 6b2d5bae79..a825fefecb 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypedDict from ..._types import FileTypes +from ..audio_model import AudioModel __all__ = ["TranscriptionCreateParams"] @@ -17,7 +18,7 @@ class TranscriptionCreateParams(TypedDict, total=False): flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. """ - model: Required[Union[str, Literal["whisper-1"]]] + model: Required[Union[str, AudioModel]] """ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is diff --git a/src/openai/types/audio/translation_create_params.py b/src/openai/types/audio/translation_create_params.py index f23a41ed5c..054996a134 100644 --- a/src/openai/types/audio/translation_create_params.py +++ b/src/openai/types/audio/translation_create_params.py @@ -3,9 +3,10 @@ from __future__ import annotations from typing import Union -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Required, TypedDict from ..._types import FileTypes +from ..audio_model import AudioModel __all__ = ["TranslationCreateParams"] @@ -17,7 +18,7 @@ class TranslationCreateParams(TypedDict, total=False): mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. """ - model: Required[Union[str, Literal["whisper-1"]]] + model: Required[Union[str, AudioModel]] """ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is diff --git a/src/openai/types/audio_model.py b/src/openai/types/audio_model.py new file mode 100644 index 0000000000..d48e1c06d3 --- /dev/null +++ b/src/openai/types/audio_model.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +__all__ = ["AudioModel"] + +AudioModel = Literal["whisper-1"] diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 754752ae65..42a42ae04e 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -5,6 +5,7 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from ..chat_model import ChatModel from .assistant_tool_param import AssistantToolParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -22,35 +23,7 @@ class AssistantCreateParams(TypedDict, total=False): - model: Required[ - Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - ] - ] + model: Required[Union[str, ChatModel]] """ID of the model to use. You can use the diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 9421a894d9..c3edf34813 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -5,6 +5,7 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from ..chat_model import ChatModel from .function_tool_param import FunctionToolParam from .file_search_tool_param import FileSearchToolParam from .code_interpreter_tool_param import CodeInterpreterToolParam @@ -77,34 +78,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): a maxium of 512 characters long. """ - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] + model: Union[str, ChatModel, None] """ The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 81cd85188b..dca757ab5f 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -5,6 +5,7 @@ from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from ...chat_model import ChatModel from ..assistant_tool_param import AssistantToolParam from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam @@ -74,34 +75,7 @@ class RunCreateParamsBase(TypedDict, total=False): a maxium of 512 characters long. """ - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] + model: Union[str, ChatModel, None] """ The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the diff --git a/src/openai/types/image_create_variation_params.py b/src/openai/types/image_create_variation_params.py index 2549307372..d6ecf0f1ae 100644 --- a/src/openai/types/image_create_variation_params.py +++ b/src/openai/types/image_create_variation_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes +from .image_model import ImageModel __all__ = ["ImageCreateVariationParams"] @@ -17,7 +18,7 @@ class ImageCreateVariationParams(TypedDict, total=False): Must be a valid PNG file, less than 4MB, and square. """ - model: Union[str, Literal["dall-e-2"], None] + model: Union[str, ImageModel, None] """The model to use for image generation. Only `dall-e-2` is supported at this time. diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index 073456e349..a596a8692b 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes +from .image_model import ImageModel __all__ = ["ImageEditParams"] @@ -31,7 +32,7 @@ class ImageEditParams(TypedDict, total=False): PNG file, less than 4MB, and have the same dimensions as `image`. """ - model: Union[str, Literal["dall-e-2"], None] + model: Union[str, ImageModel, None] """The model to use for image generation. Only `dall-e-2` is supported at this time. diff --git a/src/openai/types/image_generate_params.py b/src/openai/types/image_generate_params.py index 18c56f8ed6..307adeb3da 100644 --- a/src/openai/types/image_generate_params.py +++ b/src/openai/types/image_generate_params.py @@ -5,6 +5,8 @@ from typing import Union, Optional from typing_extensions import Literal, Required, TypedDict +from .image_model import ImageModel + __all__ = ["ImageGenerateParams"] @@ -16,7 +18,7 @@ class ImageGenerateParams(TypedDict, total=False): `dall-e-3`. """ - model: Union[str, Literal["dall-e-2", "dall-e-3"], None] + model: Union[str, ImageModel, None] """The model to use for image generation.""" n: Optional[int] diff --git a/src/openai/types/image_model.py b/src/openai/types/image_model.py new file mode 100644 index 0000000000..ce6535ff2c --- /dev/null +++ b/src/openai/types/image_model.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +__all__ = ["ImageModel"] + +ImageModel = Literal["dall-e-2", "dall-e-3"] diff --git a/src/openai/types/moderation_create_params.py b/src/openai/types/moderation_create_params.py index d4608def54..337682194d 100644 --- a/src/openai/types/moderation_create_params.py +++ b/src/openai/types/moderation_create_params.py @@ -3,7 +3,9 @@ from __future__ import annotations from typing import List, Union -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Required, TypedDict + +from .moderation_model import ModerationModel __all__ = ["ModerationCreateParams"] @@ -12,7 +14,7 @@ class ModerationCreateParams(TypedDict, total=False): input: Required[Union[str, List[str]]] """The input text to classify""" - model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] + model: Union[str, ModerationModel] """ Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. diff --git a/src/openai/types/moderation_model.py b/src/openai/types/moderation_model.py new file mode 100644 index 0000000000..73362596f3 --- /dev/null +++ b/src/openai/types/moderation_model.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +__all__ = ["ModerationModel"] + +ModerationModel = Literal["text-moderation-latest", "text-moderation-stable"] From efc6f8d7ea382fcfc70cf4f50a332aa710c4bdd2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 04:04:44 +0000 Subject: [PATCH 046/192] feat: make enums not nominal (#1588) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4e4cb5509c..6cc7757636 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-77cfff37114bc9f141c7e6107eb5f1b38d8cc99bc3d4ce03a066db2b6b649c69.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml From 748b5f7c77273c2d85c4185077246e3308566f54 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 11:02:33 +0000 Subject: [PATCH 047/192] chore(internal): use `TypeAlias` marker for type assignments (#1597) --- src/openai/types/audio/speech_model.py | 4 ++-- src/openai/types/audio_model.py | 4 ++-- src/openai/types/beta/assistant_create_params.py | 4 ++-- src/openai/types/beta/assistant_response_format_option.py | 4 ++-- .../types/beta/assistant_response_format_option_param.py | 4 ++-- src/openai/types/beta/assistant_stream_event.py | 4 ++-- src/openai/types/beta/assistant_tool.py | 6 ++++-- src/openai/types/beta/assistant_tool_choice_option.py | 4 ++-- .../types/beta/assistant_tool_choice_option_param.py | 4 ++-- src/openai/types/beta/assistant_tool_param.py | 3 ++- src/openai/types/beta/thread_create_and_run_params.py | 8 ++++---- src/openai/types/beta/thread_create_params.py | 6 +++--- src/openai/types/beta/threads/annotation.py | 4 ++-- src/openai/types/beta/threads/annotation_delta.py | 4 ++-- src/openai/types/beta/threads/message.py | 4 ++-- src/openai/types/beta/threads/message_content.py | 4 ++-- src/openai/types/beta/threads/message_content_delta.py | 4 ++-- .../types/beta/threads/message_content_part_param.py | 3 ++- src/openai/types/beta/threads/message_create_params.py | 4 ++-- src/openai/types/beta/threads/run_create_params.py | 4 ++-- src/openai/types/beta/threads/run_status.py | 4 ++-- .../types/beta/threads/runs/code_interpreter_tool_call.py | 4 ++-- .../beta/threads/runs/code_interpreter_tool_call_delta.py | 4 ++-- src/openai/types/beta/threads/runs/run_step.py | 6 ++++-- src/openai/types/beta/threads/runs/run_step_delta.py | 6 ++++-- src/openai/types/beta/threads/runs/tool_call.py | 4 ++-- src/openai/types/beta/threads/runs/tool_call_delta.py | 4 ++-- src/openai/types/beta/vector_store_create_params.py | 4 ++-- .../types/beta/vector_stores/file_batch_create_params.py | 4 ++-- src/openai/types/beta/vector_stores/file_create_params.py | 4 ++-- src/openai/types/beta/vector_stores/vector_store_file.py | 6 ++++-- .../types/chat/chat_completion_content_part_param.py | 5 ++++- src/openai/types/chat/chat_completion_message_param.py | 3 ++- src/openai/types/chat/chat_completion_role.py | 4 ++-- .../chat/chat_completion_tool_choice_option_param.py | 6 ++++-- src/openai/types/chat/completion_create_params.py | 4 ++-- src/openai/types/chat_model.py | 4 ++-- src/openai/types/file_content.py | 3 ++- src/openai/types/image_model.py | 4 ++-- src/openai/types/moderation_model.py | 4 ++-- src/openai/types/shared/function_parameters.py | 3 ++- src/openai/types/shared_params/function_parameters.py | 3 ++- 42 files changed, 99 insertions(+), 80 deletions(-) diff --git a/src/openai/types/audio/speech_model.py b/src/openai/types/audio/speech_model.py index e92b898b99..bd685ab34d 100644 --- a/src/openai/types/audio/speech_model.py +++ b/src/openai/types/audio/speech_model.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias __all__ = ["SpeechModel"] -SpeechModel = Literal["tts-1", "tts-1-hd"] +SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd"] diff --git a/src/openai/types/audio_model.py b/src/openai/types/audio_model.py index d48e1c06d3..94ae84c015 100644 --- a/src/openai/types/audio_model.py +++ b/src/openai/types/audio_model.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias __all__ = ["AudioModel"] -AudioModel = Literal["whisper-1"] +AudioModel: TypeAlias = Literal["whisper-1"] diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 42a42ae04e..c10f7f57ad 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..chat_model import ChatModel from .assistant_tool_param import AssistantToolParam @@ -140,7 +140,7 @@ class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total= """Always `static`.""" -ToolResourcesFileSearchVectorStoreChunkingStrategy = Union[ +ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic ] diff --git a/src/openai/types/beta/assistant_response_format_option.py b/src/openai/types/beta/assistant_response_format_option.py index d4e05e0ea9..6ce390f6d6 100644 --- a/src/openai/types/beta/assistant_response_format_option.py +++ b/src/openai/types/beta/assistant_response_format_option.py @@ -1,10 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias from .assistant_response_format import AssistantResponseFormat __all__ = ["AssistantResponseFormatOption"] -AssistantResponseFormatOption = Union[Literal["none", "auto"], AssistantResponseFormat] +AssistantResponseFormatOption: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormat] diff --git a/src/openai/types/beta/assistant_response_format_option_param.py b/src/openai/types/beta/assistant_response_format_option_param.py index 46e04125d1..8100088723 100644 --- a/src/openai/types/beta/assistant_response_format_option_param.py +++ b/src/openai/types/beta/assistant_response_format_option_param.py @@ -3,10 +3,10 @@ from __future__ import annotations from typing import Union -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias from .assistant_response_format_param import AssistantResponseFormatParam __all__ = ["AssistantResponseFormatOptionParam"] -AssistantResponseFormatOptionParam = Union[Literal["none", "auto"], AssistantResponseFormatParam] +AssistantResponseFormatOptionParam: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormatParam] diff --git a/src/openai/types/beta/assistant_stream_event.py b/src/openai/types/beta/assistant_stream_event.py index de66888403..f1d8898ff2 100644 --- a/src/openai/types/beta/assistant_stream_event.py +++ b/src/openai/types/beta/assistant_stream_event.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Literal, Annotated +from typing_extensions import Literal, Annotated, TypeAlias from .thread import Thread from ..._utils import PropertyInfo @@ -260,7 +260,7 @@ class ErrorEvent(BaseModel): event: Literal["error"] -AssistantStreamEvent = Annotated[ +AssistantStreamEvent: TypeAlias = Annotated[ Union[ ThreadCreated, ThreadRunCreated, diff --git a/src/openai/types/beta/assistant_tool.py b/src/openai/types/beta/assistant_tool.py index 7832da48cc..1bde6858b1 100644 --- a/src/openai/types/beta/assistant_tool.py +++ b/src/openai/types/beta/assistant_tool.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ..._utils import PropertyInfo from .function_tool import FunctionTool @@ -10,4 +10,6 @@ __all__ = ["AssistantTool"] -AssistantTool = Annotated[Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type")] +AssistantTool: TypeAlias = Annotated[ + Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type") +] diff --git a/src/openai/types/beta/assistant_tool_choice_option.py b/src/openai/types/beta/assistant_tool_choice_option.py index 8958bc8fb0..e57c3278fb 100644 --- a/src/openai/types/beta/assistant_tool_choice_option.py +++ b/src/openai/types/beta/assistant_tool_choice_option.py @@ -1,10 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias from .assistant_tool_choice import AssistantToolChoice __all__ = ["AssistantToolChoiceOption"] -AssistantToolChoiceOption = Union[Literal["none", "auto", "required"], AssistantToolChoice] +AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice] diff --git a/src/openai/types/beta/assistant_tool_choice_option_param.py b/src/openai/types/beta/assistant_tool_choice_option_param.py index 81b7f15136..cc0053d37e 100644 --- a/src/openai/types/beta/assistant_tool_choice_option_param.py +++ b/src/openai/types/beta/assistant_tool_choice_option_param.py @@ -3,10 +3,10 @@ from __future__ import annotations from typing import Union -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias from .assistant_tool_choice_param import AssistantToolChoiceParam __all__ = ["AssistantToolChoiceOptionParam"] -AssistantToolChoiceOptionParam = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam] +AssistantToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam] diff --git a/src/openai/types/beta/assistant_tool_param.py b/src/openai/types/beta/assistant_tool_param.py index 5b1d30ba2f..321c4b1ddb 100644 --- a/src/openai/types/beta/assistant_tool_param.py +++ b/src/openai/types/beta/assistant_tool_param.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Union +from typing_extensions import TypeAlias from .function_tool_param import FunctionToolParam from .file_search_tool_param import FileSearchToolParam @@ -10,4 +11,4 @@ __all__ = ["AssistantToolParam"] -AssistantToolParam = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] +AssistantToolParam: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index c3edf34813..62cff921e2 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..chat_model import ChatModel from .function_tool_param import FunctionToolParam @@ -168,7 +168,7 @@ class ThreadMessageAttachmentToolFileSearch(TypedDict, total=False): """The type of tool being defined: `file_search`""" -ThreadMessageAttachmentTool = Union[CodeInterpreterToolParam, ThreadMessageAttachmentToolFileSearch] +ThreadMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, ThreadMessageAttachmentToolFileSearch] class ThreadMessageAttachment(TypedDict, total=False): @@ -240,7 +240,7 @@ class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, """Always `static`.""" -ThreadToolResourcesFileSearchVectorStoreChunkingStrategy = Union[ +ThreadToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic, ] @@ -342,7 +342,7 @@ class ToolResources(TypedDict, total=False): file_search: ToolResourcesFileSearch -Tool = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] +Tool: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] class TruncationStrategy(TypedDict, total=False): diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index e5ea14a94d..f9561aa48c 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from .code_interpreter_tool_param import CodeInterpreterToolParam from .threads.message_content_part_param import MessageContentPartParam @@ -54,7 +54,7 @@ class MessageAttachmentToolFileSearch(TypedDict, total=False): """The type of tool being defined: `file_search`""" -MessageAttachmentTool = Union[CodeInterpreterToolParam, MessageAttachmentToolFileSearch] +MessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, MessageAttachmentToolFileSearch] class MessageAttachment(TypedDict, total=False): @@ -126,7 +126,7 @@ class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total= """Always `static`.""" -ToolResourcesFileSearchVectorStoreChunkingStrategy = Union[ +ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic ] diff --git a/src/openai/types/beta/threads/annotation.py b/src/openai/types/beta/threads/annotation.py index 31e228c831..13c10abf4d 100644 --- a/src/openai/types/beta/threads/annotation.py +++ b/src/openai/types/beta/threads/annotation.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ...._utils import PropertyInfo from .file_path_annotation import FilePathAnnotation @@ -9,4 +9,4 @@ __all__ = ["Annotation"] -Annotation = Annotated[Union[FileCitationAnnotation, FilePathAnnotation], PropertyInfo(discriminator="type")] +Annotation: TypeAlias = Annotated[Union[FileCitationAnnotation, FilePathAnnotation], PropertyInfo(discriminator="type")] diff --git a/src/openai/types/beta/threads/annotation_delta.py b/src/openai/types/beta/threads/annotation_delta.py index 912429672f..c7c6c89837 100644 --- a/src/openai/types/beta/threads/annotation_delta.py +++ b/src/openai/types/beta/threads/annotation_delta.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ...._utils import PropertyInfo from .file_path_delta_annotation import FilePathDeltaAnnotation @@ -9,6 +9,6 @@ __all__ = ["AnnotationDelta"] -AnnotationDelta = Annotated[ +AnnotationDelta: TypeAlias = Annotated[ Union[FileCitationDeltaAnnotation, FilePathDeltaAnnotation], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py index 90f083683d..298a1d4273 100644 --- a/src/openai/types/beta/threads/message.py +++ b/src/openai/types/beta/threads/message.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Union, Optional -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias from ...._models import BaseModel from .message_content import MessageContent @@ -21,7 +21,7 @@ class AttachmentToolAssistantToolsFileSearchTypeOnly(BaseModel): """The type of tool being defined: `file_search`""" -AttachmentTool = Union[CodeInterpreterTool, AttachmentToolAssistantToolsFileSearchTypeOnly] +AttachmentTool: TypeAlias = Union[CodeInterpreterTool, AttachmentToolAssistantToolsFileSearchTypeOnly] class Attachment(BaseModel): diff --git a/src/openai/types/beta/threads/message_content.py b/src/openai/types/beta/threads/message_content.py index 4f17d14786..7b718c3ca9 100644 --- a/src/openai/types/beta/threads/message_content.py +++ b/src/openai/types/beta/threads/message_content.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ...._utils import PropertyInfo from .text_content_block import TextContentBlock @@ -10,6 +10,6 @@ __all__ = ["MessageContent"] -MessageContent = Annotated[ +MessageContent: TypeAlias = Annotated[ Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/beta/threads/message_content_delta.py b/src/openai/types/beta/threads/message_content_delta.py index 6c5f732b12..667172c08f 100644 --- a/src/openai/types/beta/threads/message_content_delta.py +++ b/src/openai/types/beta/threads/message_content_delta.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ...._utils import PropertyInfo from .text_delta_block import TextDeltaBlock @@ -10,6 +10,6 @@ __all__ = ["MessageContentDelta"] -MessageContentDelta = Annotated[ +MessageContentDelta: TypeAlias = Annotated[ Union[ImageFileDeltaBlock, TextDeltaBlock, ImageURLDeltaBlock], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/beta/threads/message_content_part_param.py b/src/openai/types/beta/threads/message_content_part_param.py index d11442a3a9..dc09a01c27 100644 --- a/src/openai/types/beta/threads/message_content_part_param.py +++ b/src/openai/types/beta/threads/message_content_part_param.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Union +from typing_extensions import TypeAlias from .text_content_block_param import TextContentBlockParam from .image_url_content_block_param import ImageURLContentBlockParam @@ -10,4 +11,4 @@ __all__ = ["MessageContentPartParam"] -MessageContentPartParam = Union[ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam] +MessageContentPartParam: TypeAlias = Union[ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam] diff --git a/src/openai/types/beta/threads/message_create_params.py b/src/openai/types/beta/threads/message_create_params.py index b1b12293b7..2b450deb5d 100644 --- a/src/openai/types/beta/threads/message_create_params.py +++ b/src/openai/types/beta/threads/message_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam @@ -41,7 +41,7 @@ class AttachmentToolFileSearch(TypedDict, total=False): """The type of tool being defined: `file_search`""" -AttachmentTool = Union[CodeInterpreterToolParam, AttachmentToolFileSearch] +AttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, AttachmentToolFileSearch] class Attachment(TypedDict, total=False): diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index dca757ab5f..e0c42fd23f 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from ...chat_model import ChatModel from ..assistant_tool_param import AssistantToolParam @@ -154,7 +154,7 @@ class AdditionalMessageAttachmentToolFileSearch(TypedDict, total=False): """The type of tool being defined: `file_search`""" -AdditionalMessageAttachmentTool = Union[CodeInterpreterToolParam, AdditionalMessageAttachmentToolFileSearch] +AdditionalMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, AdditionalMessageAttachmentToolFileSearch] class AdditionalMessageAttachment(TypedDict, total=False): diff --git a/src/openai/types/beta/threads/run_status.py b/src/openai/types/beta/threads/run_status.py index 6666d00e5a..47c7cbd007 100644 --- a/src/openai/types/beta/threads/run_status.py +++ b/src/openai/types/beta/threads/run_status.py @@ -1,10 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias __all__ = ["RunStatus"] -RunStatus = Literal[ +RunStatus: TypeAlias = Literal[ "queued", "in_progress", "requires_action", diff --git a/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py b/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py index 2f07243684..e7df4e19c4 100644 --- a/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py +++ b/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Union -from typing_extensions import Literal, Annotated +from typing_extensions import Literal, Annotated, TypeAlias from ....._utils import PropertyInfo from ....._models import BaseModel @@ -39,7 +39,7 @@ class CodeInterpreterOutputImage(BaseModel): """Always `image`.""" -CodeInterpreterOutput = Annotated[ +CodeInterpreterOutput: TypeAlias = Annotated[ Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py b/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py index eff76355b3..9d7a1563cd 100644 --- a/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +++ b/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Union, Optional -from typing_extensions import Literal, Annotated +from typing_extensions import Literal, Annotated, TypeAlias from ....._utils import PropertyInfo from ....._models import BaseModel @@ -10,7 +10,7 @@ __all__ = ["CodeInterpreterToolCallDelta", "CodeInterpreter", "CodeInterpreterOutput"] -CodeInterpreterOutput = Annotated[ +CodeInterpreterOutput: TypeAlias = Annotated[ Union[CodeInterpreterLogs, CodeInterpreterOutputImage], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/beta/threads/runs/run_step.py b/src/openai/types/beta/threads/runs/run_step.py index 7c81dcac2b..e3163c508b 100644 --- a/src/openai/types/beta/threads/runs/run_step.py +++ b/src/openai/types/beta/threads/runs/run_step.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union, Optional -from typing_extensions import Literal, Annotated +from typing_extensions import Literal, Annotated, TypeAlias from ....._utils import PropertyInfo from ....._models import BaseModel @@ -19,7 +19,9 @@ class LastError(BaseModel): """A human-readable description of the error.""" -StepDetails = Annotated[Union[MessageCreationStepDetails, ToolCallsStepDetails], PropertyInfo(discriminator="type")] +StepDetails: TypeAlias = Annotated[ + Union[MessageCreationStepDetails, ToolCallsStepDetails], PropertyInfo(discriminator="type") +] class Usage(BaseModel): diff --git a/src/openai/types/beta/threads/runs/run_step_delta.py b/src/openai/types/beta/threads/runs/run_step_delta.py index d6b4aefeb9..1139088fb4 100644 --- a/src/openai/types/beta/threads/runs/run_step_delta.py +++ b/src/openai/types/beta/threads/runs/run_step_delta.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union, Optional -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ....._utils import PropertyInfo from ....._models import BaseModel @@ -10,7 +10,9 @@ __all__ = ["RunStepDelta", "StepDetails"] -StepDetails = Annotated[Union[RunStepDeltaMessageDelta, ToolCallDeltaObject], PropertyInfo(discriminator="type")] +StepDetails: TypeAlias = Annotated[ + Union[RunStepDeltaMessageDelta, ToolCallDeltaObject], PropertyInfo(discriminator="type") +] class RunStepDelta(BaseModel): diff --git a/src/openai/types/beta/threads/runs/tool_call.py b/src/openai/types/beta/threads/runs/tool_call.py index 77d86b46d9..565e3109be 100644 --- a/src/openai/types/beta/threads/runs/tool_call.py +++ b/src/openai/types/beta/threads/runs/tool_call.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ....._utils import PropertyInfo from .function_tool_call import FunctionToolCall @@ -10,6 +10,6 @@ __all__ = ["ToolCall"] -ToolCall = Annotated[ +ToolCall: TypeAlias = Annotated[ Union[CodeInterpreterToolCall, FileSearchToolCall, FunctionToolCall], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/beta/threads/runs/tool_call_delta.py b/src/openai/types/beta/threads/runs/tool_call_delta.py index 90cfe0657e..f0b8070c97 100644 --- a/src/openai/types/beta/threads/runs/tool_call_delta.py +++ b/src/openai/types/beta/threads/runs/tool_call_delta.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAlias from ....._utils import PropertyInfo from .function_tool_call_delta import FunctionToolCallDelta @@ -10,7 +10,7 @@ __all__ = ["ToolCallDelta"] -ToolCallDelta = Annotated[ +ToolCallDelta: TypeAlias = Annotated[ Union[CodeInterpreterToolCallDelta, FileSearchToolCallDelta, FunctionToolCallDelta], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py index 365d9923b8..4f74af49f8 100644 --- a/src/openai/types/beta/vector_store_create_params.py +++ b/src/openai/types/beta/vector_store_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict __all__ = [ "VectorStoreCreateParams", @@ -72,7 +72,7 @@ class ChunkingStrategyStatic(TypedDict, total=False): """Always `static`.""" -ChunkingStrategy = Union[ChunkingStrategyAuto, ChunkingStrategyStatic] +ChunkingStrategy: TypeAlias = Union[ChunkingStrategyAuto, ChunkingStrategyStatic] class ExpiresAfter(TypedDict, total=False): diff --git a/src/openai/types/beta/vector_stores/file_batch_create_params.py b/src/openai/types/beta/vector_stores/file_batch_create_params.py index 9b98d0699e..e1c3303cf3 100644 --- a/src/openai/types/beta/vector_stores/file_batch_create_params.py +++ b/src/openai/types/beta/vector_stores/file_batch_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List, Union -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict __all__ = [ "FileBatchCreateParams", @@ -56,6 +56,6 @@ class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False) """Always `static`.""" -ChunkingStrategy = Union[ +ChunkingStrategy: TypeAlias = Union[ ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam ] diff --git a/src/openai/types/beta/vector_stores/file_create_params.py b/src/openai/types/beta/vector_stores/file_create_params.py index 2ae63f1462..cfb80657c6 100644 --- a/src/openai/types/beta/vector_stores/file_create_params.py +++ b/src/openai/types/beta/vector_stores/file_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Union -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict __all__ = [ "FileCreateParams", @@ -56,6 +56,6 @@ class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False) """Always `static`.""" -ChunkingStrategy = Union[ +ChunkingStrategy: TypeAlias = Union[ ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam ] diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/beta/vector_stores/vector_store_file.py index d9d7625f86..4762de0ebd 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file.py +++ b/src/openai/types/beta/vector_stores/vector_store_file.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union, Optional -from typing_extensions import Literal, Annotated +from typing_extensions import Literal, Annotated, TypeAlias from ...._utils import PropertyInfo from ...._models import BaseModel @@ -51,7 +51,9 @@ class ChunkingStrategyOther(BaseModel): """Always `other`.""" -ChunkingStrategy = Annotated[Union[ChunkingStrategyStatic, ChunkingStrategyOther], PropertyInfo(discriminator="type")] +ChunkingStrategy: TypeAlias = Annotated[ + Union[ChunkingStrategyStatic, ChunkingStrategyOther], PropertyInfo(discriminator="type") +] class VectorStoreFile(BaseModel): diff --git a/src/openai/types/chat/chat_completion_content_part_param.py b/src/openai/types/chat/chat_completion_content_part_param.py index f9b5f71e43..e0c6e480f2 100644 --- a/src/openai/types/chat/chat_completion_content_part_param.py +++ b/src/openai/types/chat/chat_completion_content_part_param.py @@ -3,10 +3,13 @@ from __future__ import annotations from typing import Union +from typing_extensions import TypeAlias from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam __all__ = ["ChatCompletionContentPartParam"] -ChatCompletionContentPartParam = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam] +ChatCompletionContentPartParam: TypeAlias = Union[ + ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam +] diff --git a/src/openai/types/chat/chat_completion_message_param.py b/src/openai/types/chat/chat_completion_message_param.py index a3644a5310..ec65d94cae 100644 --- a/src/openai/types/chat/chat_completion_message_param.py +++ b/src/openai/types/chat/chat_completion_message_param.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Union +from typing_extensions import TypeAlias from .chat_completion_tool_message_param import ChatCompletionToolMessageParam from .chat_completion_user_message_param import ChatCompletionUserMessageParam @@ -12,7 +13,7 @@ __all__ = ["ChatCompletionMessageParam"] -ChatCompletionMessageParam = Union[ +ChatCompletionMessageParam: TypeAlias = Union[ ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, diff --git a/src/openai/types/chat/chat_completion_role.py b/src/openai/types/chat/chat_completion_role.py index 1fd83888d3..c2ebef74c8 100644 --- a/src/openai/types/chat/chat_completion_role.py +++ b/src/openai/types/chat/chat_completion_role.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias __all__ = ["ChatCompletionRole"] -ChatCompletionRole = Literal["system", "user", "assistant", "tool", "function"] +ChatCompletionRole: TypeAlias = Literal["system", "user", "assistant", "tool", "function"] diff --git a/src/openai/types/chat/chat_completion_tool_choice_option_param.py b/src/openai/types/chat/chat_completion_tool_choice_option_param.py index 1d3c2506ab..7dedf041b7 100644 --- a/src/openai/types/chat/chat_completion_tool_choice_option_param.py +++ b/src/openai/types/chat/chat_completion_tool_choice_option_param.py @@ -3,10 +3,12 @@ from __future__ import annotations from typing import Union -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam __all__ = ["ChatCompletionToolChoiceOptionParam"] -ChatCompletionToolChoiceOptionParam = Union[Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam] +ChatCompletionToolChoiceOptionParam: TypeAlias = Union[ + Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam +] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 783922539f..9e81881b9e 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from ...types import shared_params from ..chat_model import ChatModel @@ -221,7 +221,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ -FunctionCall = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam] +FunctionCall: TypeAlias = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam] class Function(TypedDict, total=False): diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index 87b2acb90a..edb7b732bf 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -1,10 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias __all__ = ["ChatModel"] -ChatModel = Literal[ +ChatModel: TypeAlias = Literal[ "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-mini", diff --git a/src/openai/types/file_content.py b/src/openai/types/file_content.py index b4aa08a9a3..d89eee623e 100644 --- a/src/openai/types/file_content.py +++ b/src/openai/types/file_content.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing_extensions import TypeAlias __all__ = ["FileContent"] -FileContent = str +FileContent: TypeAlias = str diff --git a/src/openai/types/image_model.py b/src/openai/types/image_model.py index ce6535ff2c..1672369bea 100644 --- a/src/openai/types/image_model.py +++ b/src/openai/types/image_model.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias __all__ = ["ImageModel"] -ImageModel = Literal["dall-e-2", "dall-e-3"] +ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3"] diff --git a/src/openai/types/moderation_model.py b/src/openai/types/moderation_model.py index 73362596f3..f549aeeb7a 100644 --- a/src/openai/types/moderation_model.py +++ b/src/openai/types/moderation_model.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias __all__ = ["ModerationModel"] -ModerationModel = Literal["text-moderation-latest", "text-moderation-stable"] +ModerationModel: TypeAlias = Literal["text-moderation-latest", "text-moderation-stable"] diff --git a/src/openai/types/shared/function_parameters.py b/src/openai/types/shared/function_parameters.py index c9524e4cb8..a3d83e3496 100644 --- a/src/openai/types/shared/function_parameters.py +++ b/src/openai/types/shared/function_parameters.py @@ -1,7 +1,8 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Dict +from typing_extensions import TypeAlias __all__ = ["FunctionParameters"] -FunctionParameters = Dict[str, object] +FunctionParameters: TypeAlias = Dict[str, object] diff --git a/src/openai/types/shared_params/function_parameters.py b/src/openai/types/shared_params/function_parameters.py index 5b40efb78f..45fc742d3b 100644 --- a/src/openai/types/shared_params/function_parameters.py +++ b/src/openai/types/shared_params/function_parameters.py @@ -3,7 +3,8 @@ from __future__ import annotations from typing import Dict +from typing_extensions import TypeAlias __all__ = ["FunctionParameters"] -FunctionParameters = Dict[str, object] +FunctionParameters: TypeAlias = Dict[str, object] From fb5cb8d7a9d99d505e7824f717bf0b62f2e4fa05 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 11:50:14 +0000 Subject: [PATCH 048/192] chore(internal): bump pyright (#1599) --- requirements-dev.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 6941447b96..673f0c9a87 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -70,7 +70,7 @@ pydantic-core==2.18.2 # via pydantic pygments==2.18.0 # via rich -pyright==1.1.364 +pyright==1.1.374 pytest==7.1.1 # via pytest-asyncio pytest-asyncio==0.21.1 From 3534fce496940e73804e0a10c81aac682e0741bd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 13:20:50 +0000 Subject: [PATCH 049/192] feat(client): add `retries_taken` to raw response class (#1601) --- src/openai/_base_client.py | 10 +++ src/openai/_legacy_response.py | 18 ++++- src/openai/_response.py | 5 ++ tests/test_client.py | 122 +++++++++++++++++++++++++++++++++ 4 files changed, 154 insertions(+), 1 deletion(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 4b93ab298c..c8fce0bea4 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -1051,6 +1051,7 @@ def _request( response=response, stream=stream, stream_cls=stream_cls, + retries_taken=options.get_max_retries(self.max_retries) - retries, ) def _retry_request( @@ -1092,6 +1093,7 @@ def _process_response( response: httpx.Response, stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + retries_taken: int = 0, ) -> ResponseT: if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": return cast( @@ -1103,6 +1105,7 @@ def _process_response( stream=stream, stream_cls=stream_cls, options=options, + retries_taken=retries_taken, ), ) @@ -1122,6 +1125,7 @@ def _process_response( stream=stream, stream_cls=stream_cls, options=options, + retries_taken=retries_taken, ), ) @@ -1135,6 +1139,7 @@ def _process_response( stream=stream, stream_cls=stream_cls, options=options, + retries_taken=retries_taken, ) if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): return cast(ResponseT, api_response) @@ -1625,6 +1630,7 @@ async def _request( response=response, stream=stream, stream_cls=stream_cls, + retries_taken=options.get_max_retries(self.max_retries) - retries, ) async def _retry_request( @@ -1664,6 +1670,7 @@ async def _process_response( response: httpx.Response, stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + retries_taken: int = 0, ) -> ResponseT: if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": return cast( @@ -1675,6 +1682,7 @@ async def _process_response( stream=stream, stream_cls=stream_cls, options=options, + retries_taken=retries_taken, ), ) @@ -1694,6 +1702,7 @@ async def _process_response( stream=stream, stream_cls=stream_cls, options=options, + retries_taken=retries_taken, ), ) @@ -1707,6 +1716,7 @@ async def _process_response( stream=stream, stream_cls=stream_cls, options=options, + retries_taken=retries_taken, ) if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): return cast(ResponseT, api_response) diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index 1de906b167..66d7606a60 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -5,7 +5,18 @@ import logging import datetime import functools -from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, Iterator, AsyncIterator, cast, overload +from typing import ( + TYPE_CHECKING, + Any, + Union, + Generic, + TypeVar, + Callable, + Iterator, + AsyncIterator, + cast, + overload, +) from typing_extensions import Awaitable, ParamSpec, override, deprecated, get_origin import anyio @@ -53,6 +64,9 @@ class LegacyAPIResponse(Generic[R]): http_response: httpx.Response + retries_taken: int + """The number of retries made. If no retries happened this will be `0`""" + def __init__( self, *, @@ -62,6 +76,7 @@ def __init__( stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, options: FinalRequestOptions, + retries_taken: int = 0, ) -> None: self._cast_to = cast_to self._client = client @@ -70,6 +85,7 @@ def __init__( self._stream_cls = stream_cls self._options = options self.http_response = raw + self.retries_taken = retries_taken @property def request_id(self) -> str | None: diff --git a/src/openai/_response.py b/src/openai/_response.py index 4ba2ae681c..3bf4de4287 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -55,6 +55,9 @@ class BaseAPIResponse(Generic[R]): http_response: httpx.Response + retries_taken: int + """The number of retries made. If no retries happened this will be `0`""" + def __init__( self, *, @@ -64,6 +67,7 @@ def __init__( stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, options: FinalRequestOptions, + retries_taken: int = 0, ) -> None: self._cast_to = cast_to self._client = client @@ -72,6 +76,7 @@ def __init__( self._stream_cls = stream_cls self._options = options self.http_response = raw + self.retries_taken = retries_taken @property def headers(self) -> httpx.Headers: diff --git a/tests/test_client.py b/tests/test_client.py index c1e545e66f..49e71653c5 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -758,6 +758,65 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> Non assert _get_open_connections(self.client) == 0 + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_retries_taken(self, client: OpenAI, failures_before_success: int, respx_mock: MockRouter) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "content", + "role": "system", + } + ], + model="gpt-4-turbo", + ) + + assert response.retries_taken == failures_before_success + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_retries_taken_new_response_class( + self, client: OpenAI, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "content", + "role": "system", + } + ], + model="gpt-4-turbo", + ) as response: + assert response.retries_taken == failures_before_success + class TestAsyncOpenAI: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @@ -1488,3 +1547,66 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) ) assert _get_open_connections(self.client) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_retries_taken( + self, async_client: AsyncOpenAI, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "content", + "role": "system", + } + ], + model="gpt-4-turbo", + ) + + assert response.retries_taken == failures_before_success + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_retries_taken_new_response_class( + self, async_client: AsyncOpenAI, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + async with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "content", + "role": "system", + } + ], + model="gpt-4-turbo", + ) as response: + assert response.retries_taken == failures_before_success From 4dafd6397694b3d157a6306584fab9b9cf1aad6a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:31:39 +0000 Subject: [PATCH 050/192] chore(internal): test updates (#1602) --- src/openai/_utils/_reflection.py | 2 +- tests/test_client.py | 7 +++++-- tests/utils.py | 10 +++++++--- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/openai/_utils/_reflection.py b/src/openai/_utils/_reflection.py index 9a53c7bd21..89aa712ac4 100644 --- a/src/openai/_utils/_reflection.py +++ b/src/openai/_utils/_reflection.py @@ -34,7 +34,7 @@ def assert_signatures_in_sync( if custom_param.annotation != source_param.annotation: errors.append( - f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(source_param.annotation)}" + f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}" ) continue diff --git a/tests/test_client.py b/tests/test_client.py index 49e71653c5..2402ffa82f 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -17,6 +17,7 @@ from pydantic import ValidationError from openai import OpenAI, AsyncOpenAI, APIResponseValidationError +from openai._types import Omit from openai._models import BaseModel, FinalRequestOptions from openai._constants import RAW_RESPONSE_HEADER from openai._streaming import Stream, AsyncStream @@ -328,7 +329,8 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(OpenAIError): - client2 = OpenAI(base_url=base_url, api_key=None, _strict_response_validation=True) + with update_env(**{"OPENAI_API_KEY": Omit()}): + client2 = OpenAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: @@ -1103,7 +1105,8 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(OpenAIError): - client2 = AsyncOpenAI(base_url=base_url, api_key=None, _strict_response_validation=True) + with update_env(**{"OPENAI_API_KEY": Omit()}): + client2 = AsyncOpenAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: diff --git a/tests/utils.py b/tests/utils.py index 060b99339f..165f4e5bfd 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,7 +8,7 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from openai._types import NoneType +from openai._types import Omit, NoneType from openai._utils import ( is_dict, is_list, @@ -139,11 +139,15 @@ def _assert_list_type(type_: type[object], value: object) -> None: @contextlib.contextmanager -def update_env(**new_env: str) -> Iterator[None]: +def update_env(**new_env: str | Omit) -> Iterator[None]: old = os.environ.copy() try: - os.environ.update(new_env) + for name, value in new_env.items(): + if isinstance(value, Omit): + os.environ.pop(name, None) + else: + os.environ[name] = value yield None finally: From 2c2b74ea54b68a6aa6ade5c07d8f523052df071c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 10:10:58 +0000 Subject: [PATCH 051/192] chore(internal): bump ruff version (#1604) --- pyproject.toml | 12 ++++--- requirements-dev.lock | 2 +- src/openai/_base_client.py | 63 +++++++++++---------------------- src/openai/_compat.py | 24 +++++-------- src/openai/_files.py | 12 +++---- src/openai/_legacy_response.py | 6 ++-- src/openai/_response.py | 12 +++---- src/openai/_types.py | 9 ++--- src/openai/_utils/_proxy.py | 3 +- src/openai/_utils/_utils.py | 18 ++++------ tests/test_deepcopy.py | 3 +- tests/test_legacy_response.py | 3 +- tests/test_response.py | 12 +++---- tests/test_utils/test_typing.py | 15 +++----- 14 files changed, 68 insertions(+), 126 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 37968f39ee..43a0102ec4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,8 +77,8 @@ format = { chain = [ "check:ruff", "typecheck", ]} -"check:ruff" = "ruff ." -"fix:ruff" = "ruff --fix ." +"check:ruff" = "ruff check ." +"fix:ruff" = "ruff check --fix ." typecheck = { chain = [ "typecheck:pyright", @@ -162,6 +162,11 @@ reportPrivateUsage = false line-length = 120 output-format = "grouped" target-version = "py37" + +[tool.ruff.format] +docstring-code-format = true + +[tool.ruff.lint] select = [ # isort "I", @@ -192,9 +197,6 @@ unfixable = [ ] ignore-init-module-imports = true -[tool.ruff.format] -docstring-code-format = true - [tool.ruff.lint.flake8-tidy-imports.banned-api] "functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead" diff --git a/requirements-dev.lock b/requirements-dev.lock index 673f0c9a87..aa6d1a804b 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -80,7 +80,7 @@ pytz==2023.3.post1 # via dirty-equals respx==0.20.2 rich==13.7.1 -ruff==0.1.9 +ruff==0.5.6 setuptools==68.2.2 # via nodeenv six==1.16.0 diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index c8fce0bea4..3388d69fab 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -125,16 +125,14 @@ def __init__( self, *, url: URL, - ) -> None: - ... + ) -> None: ... @overload def __init__( self, *, params: Query, - ) -> None: - ... + ) -> None: ... def __init__( self, @@ -167,8 +165,7 @@ def has_next_page(self) -> bool: return False return self.next_page_info() is not None - def next_page_info(self) -> Optional[PageInfo]: - ... + def next_page_info(self) -> Optional[PageInfo]: ... def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body] ... @@ -904,8 +901,7 @@ def request( *, stream: Literal[True], stream_cls: Type[_StreamT], - ) -> _StreamT: - ... + ) -> _StreamT: ... @overload def request( @@ -915,8 +911,7 @@ def request( remaining_retries: Optional[int] = None, *, stream: Literal[False] = False, - ) -> ResponseT: - ... + ) -> ResponseT: ... @overload def request( @@ -927,8 +922,7 @@ def request( *, stream: bool = False, stream_cls: Type[_StreamT] | None = None, - ) -> ResponseT | _StreamT: - ... + ) -> ResponseT | _StreamT: ... def request( self, @@ -1172,8 +1166,7 @@ def get( cast_to: Type[ResponseT], options: RequestOptions = {}, stream: Literal[False] = False, - ) -> ResponseT: - ... + ) -> ResponseT: ... @overload def get( @@ -1184,8 +1177,7 @@ def get( options: RequestOptions = {}, stream: Literal[True], stream_cls: type[_StreamT], - ) -> _StreamT: - ... + ) -> _StreamT: ... @overload def get( @@ -1196,8 +1188,7 @@ def get( options: RequestOptions = {}, stream: bool, stream_cls: type[_StreamT] | None = None, - ) -> ResponseT | _StreamT: - ... + ) -> ResponseT | _StreamT: ... def get( self, @@ -1223,8 +1214,7 @@ def post( options: RequestOptions = {}, files: RequestFiles | None = None, stream: Literal[False] = False, - ) -> ResponseT: - ... + ) -> ResponseT: ... @overload def post( @@ -1237,8 +1227,7 @@ def post( files: RequestFiles | None = None, stream: Literal[True], stream_cls: type[_StreamT], - ) -> _StreamT: - ... + ) -> _StreamT: ... @overload def post( @@ -1251,8 +1240,7 @@ def post( files: RequestFiles | None = None, stream: bool, stream_cls: type[_StreamT] | None = None, - ) -> ResponseT | _StreamT: - ... + ) -> ResponseT | _StreamT: ... def post( self, @@ -1485,8 +1473,7 @@ async def request( *, stream: Literal[False] = False, remaining_retries: Optional[int] = None, - ) -> ResponseT: - ... + ) -> ResponseT: ... @overload async def request( @@ -1497,8 +1484,7 @@ async def request( stream: Literal[True], stream_cls: type[_AsyncStreamT], remaining_retries: Optional[int] = None, - ) -> _AsyncStreamT: - ... + ) -> _AsyncStreamT: ... @overload async def request( @@ -1509,8 +1495,7 @@ async def request( stream: bool, stream_cls: type[_AsyncStreamT] | None = None, remaining_retries: Optional[int] = None, - ) -> ResponseT | _AsyncStreamT: - ... + ) -> ResponseT | _AsyncStreamT: ... async def request( self, @@ -1739,8 +1724,7 @@ async def get( cast_to: Type[ResponseT], options: RequestOptions = {}, stream: Literal[False] = False, - ) -> ResponseT: - ... + ) -> ResponseT: ... @overload async def get( @@ -1751,8 +1735,7 @@ async def get( options: RequestOptions = {}, stream: Literal[True], stream_cls: type[_AsyncStreamT], - ) -> _AsyncStreamT: - ... + ) -> _AsyncStreamT: ... @overload async def get( @@ -1763,8 +1746,7 @@ async def get( options: RequestOptions = {}, stream: bool, stream_cls: type[_AsyncStreamT] | None = None, - ) -> ResponseT | _AsyncStreamT: - ... + ) -> ResponseT | _AsyncStreamT: ... async def get( self, @@ -1788,8 +1770,7 @@ async def post( files: RequestFiles | None = None, options: RequestOptions = {}, stream: Literal[False] = False, - ) -> ResponseT: - ... + ) -> ResponseT: ... @overload async def post( @@ -1802,8 +1783,7 @@ async def post( options: RequestOptions = {}, stream: Literal[True], stream_cls: type[_AsyncStreamT], - ) -> _AsyncStreamT: - ... + ) -> _AsyncStreamT: ... @overload async def post( @@ -1816,8 +1796,7 @@ async def post( options: RequestOptions = {}, stream: bool, stream_cls: type[_AsyncStreamT] | None = None, - ) -> ResponseT | _AsyncStreamT: - ... + ) -> ResponseT | _AsyncStreamT: ... async def post( self, diff --git a/src/openai/_compat.py b/src/openai/_compat.py index c919b5adb3..7c6f91a870 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -159,22 +159,19 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT: # generic models if TYPE_CHECKING: - class GenericModel(pydantic.BaseModel): - ... + class GenericModel(pydantic.BaseModel): ... else: if PYDANTIC_V2: # there no longer needs to be a distinction in v2 but # we still have to create our own subclass to avoid # inconsistent MRO ordering errors - class GenericModel(pydantic.BaseModel): - ... + class GenericModel(pydantic.BaseModel): ... else: import pydantic.generics - class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): - ... + class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... # cached properties @@ -193,26 +190,21 @@ class typed_cached_property(Generic[_T]): func: Callable[[Any], _T] attrname: str | None - def __init__(self, func: Callable[[Any], _T]) -> None: - ... + def __init__(self, func: Callable[[Any], _T]) -> None: ... @overload - def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: - ... + def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ... @overload - def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: - ... + def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ... def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self: raise NotImplementedError() - def __set_name__(self, owner: type[Any], name: str) -> None: - ... + def __set_name__(self, owner: type[Any], name: str) -> None: ... # __set__ is not defined at runtime, but @cached_property is designed to be settable - def __set__(self, instance: object, value: _T) -> None: - ... + def __set__(self, instance: object, value: _T) -> None: ... else: try: from functools import cached_property as cached_property diff --git a/src/openai/_files.py b/src/openai/_files.py index ad7b668b4b..801a0d2928 100644 --- a/src/openai/_files.py +++ b/src/openai/_files.py @@ -39,13 +39,11 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None: @overload -def to_httpx_files(files: None) -> None: - ... +def to_httpx_files(files: None) -> None: ... @overload -def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: - ... +def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: @@ -83,13 +81,11 @@ def _read_file_content(file: FileContent) -> HttpxFileContent: @overload -async def async_to_httpx_files(files: None) -> None: - ... +async def async_to_httpx_files(files: None) -> None: ... @overload -async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: - ... +async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index 66d7606a60..c42fb8b83e 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -92,12 +92,10 @@ def request_id(self) -> str | None: return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return] @overload - def parse(self, *, to: type[_T]) -> _T: - ... + def parse(self, *, to: type[_T]) -> _T: ... @overload - def parse(self) -> R: - ... + def parse(self) -> R: ... def parse(self, *, to: type[_T] | None = None) -> R | _T: """Returns the rich python representation of this response's data. diff --git a/src/openai/_response.py b/src/openai/_response.py index 3bf4de4287..f9d91786f6 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -268,12 +268,10 @@ def request_id(self) -> str | None: return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return] @overload - def parse(self, *, to: type[_T]) -> _T: - ... + def parse(self, *, to: type[_T]) -> _T: ... @overload - def parse(self) -> R: - ... + def parse(self) -> R: ... def parse(self, *, to: type[_T] | None = None) -> R | _T: """Returns the rich python representation of this response's data. @@ -376,12 +374,10 @@ def request_id(self) -> str | None: return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return] @overload - async def parse(self, *, to: type[_T]) -> _T: - ... + async def parse(self, *, to: type[_T]) -> _T: ... @overload - async def parse(self) -> R: - ... + async def parse(self) -> R: ... async def parse(self, *, to: type[_T] | None = None) -> R | _T: """Returns the rich python representation of this response's data. diff --git a/src/openai/_types.py b/src/openai/_types.py index de9b1dd48b..5611b2d38f 100644 --- a/src/openai/_types.py +++ b/src/openai/_types.py @@ -112,8 +112,7 @@ class NotGiven: For example: ```py - def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: - ... + def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... get(timeout=1) # 1s timeout @@ -163,16 +162,14 @@ def build( *, response: Response, data: object, - ) -> _T: - ... + ) -> _T: ... Headers = Mapping[str, Union[str, Omit]] class HeadersLikeProtocol(Protocol): - def get(self, __key: str) -> str | None: - ... + def get(self, __key: str) -> str | None: ... HeadersLike = Union[Headers, HeadersLikeProtocol] diff --git a/src/openai/_utils/_proxy.py b/src/openai/_utils/_proxy.py index c46a62a698..ffd883e9dd 100644 --- a/src/openai/_utils/_proxy.py +++ b/src/openai/_utils/_proxy.py @@ -59,5 +59,4 @@ def __as_proxied__(self) -> T: return cast(T, self) @abstractmethod - def __load__(self) -> T: - ... + def __load__(self) -> T: ... diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py index 34797c2905..2fc5a1c65a 100644 --- a/src/openai/_utils/_utils.py +++ b/src/openai/_utils/_utils.py @@ -211,20 +211,17 @@ def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]: Example usage: ```py @overload - def foo(*, a: str) -> str: - ... + def foo(*, a: str) -> str: ... @overload - def foo(*, b: bool) -> str: - ... + def foo(*, b: bool) -> str: ... # This enforces the same constraints that a static type checker would # i.e. that either a or b must be passed to the function @required_args(["a"], ["b"]) - def foo(*, a: str | None = None, b: bool | None = None) -> str: - ... + def foo(*, a: str | None = None, b: bool | None = None) -> str: ... ``` """ @@ -286,18 +283,15 @@ def wrapper(*args: object, **kwargs: object) -> object: @overload -def strip_not_given(obj: None) -> None: - ... +def strip_not_given(obj: None) -> None: ... @overload -def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: - ... +def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ... @overload -def strip_not_given(obj: object) -> object: - ... +def strip_not_given(obj: object) -> object: ... def strip_not_given(obj: object | None) -> object: diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 8cf65ce94e..86a2adb1a2 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -41,8 +41,7 @@ def test_nested_list() -> None: assert_different_identities(obj1[1], obj2[1]) -class MyObject: - ... +class MyObject: ... def test_ignores_other_types() -> None: diff --git a/tests/test_legacy_response.py b/tests/test_legacy_response.py index 45025f81d0..3659ee12c1 100644 --- a/tests/test_legacy_response.py +++ b/tests/test_legacy_response.py @@ -12,8 +12,7 @@ from openai._legacy_response import LegacyAPIResponse -class PydanticModel(pydantic.BaseModel): - ... +class PydanticModel(pydantic.BaseModel): ... def test_response_parse_mismatched_basemodel(client: OpenAI) -> None: diff --git a/tests/test_response.py b/tests/test_response.py index af153b67c4..6ea1be1a1a 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -19,16 +19,13 @@ from openai._base_client import FinalRequestOptions -class ConcreteBaseAPIResponse(APIResponse[bytes]): - ... +class ConcreteBaseAPIResponse(APIResponse[bytes]): ... -class ConcreteAPIResponse(APIResponse[List[str]]): - ... +class ConcreteAPIResponse(APIResponse[List[str]]): ... -class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]): - ... +class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]): ... def test_extract_response_type_direct_classes() -> None: @@ -56,8 +53,7 @@ def test_extract_response_type_binary_response() -> None: assert extract_response_type(AsyncBinaryAPIResponse) == bytes -class PydanticModel(pydantic.BaseModel): - ... +class PydanticModel(pydantic.BaseModel): ... def test_response_parse_mismatched_basemodel(client: OpenAI) -> None: diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index 690960802a..535935b9e1 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -9,24 +9,19 @@ _T3 = TypeVar("_T3") -class BaseGeneric(Generic[_T]): - ... +class BaseGeneric(Generic[_T]): ... -class SubclassGeneric(BaseGeneric[_T]): - ... +class SubclassGeneric(BaseGeneric[_T]): ... -class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]): - ... +class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]): ... -class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): - ... +class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): ... -class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]): - ... +class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]): ... def test_extract_type_var() -> None: From e279088e935519bfef005adfcb180e868a25682d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 15:02:40 +0000 Subject: [PATCH 052/192] chore(internal): update pydantic compat helper function (#1607) --- src/openai/_compat.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/openai/_compat.py b/src/openai/_compat.py index 7c6f91a870..21fe6941ce 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -7,7 +7,7 @@ import pydantic from pydantic.fields import FieldInfo -from ._types import StrBytesIntFloat +from ._types import IncEx, StrBytesIntFloat _T = TypeVar("_T") _ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) @@ -133,17 +133,20 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: def model_dump( model: pydantic.BaseModel, *, + exclude: IncEx = None, exclude_unset: bool = False, exclude_defaults: bool = False, ) -> dict[str, Any]: if PYDANTIC_V2: return model.model_dump( + exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, ) return cast( "dict[str, Any]", model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, ), From 9c600b0ab83d292e515a4728ad7acda8a06471e3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 17:29:45 +0000 Subject: [PATCH 053/192] feat(api): updates (#1608) - This commit removes the `AssistantResponseFormat` type --- .stats.yml | 2 +- api.md | 13 ++- src/openai/_client.py | 4 +- src/openai/resources/beta/assistants.py | 20 +++++ .../resources/beta/threads/runs/runs.py | 30 +++++++ src/openai/resources/beta/threads/threads.py | 30 +++++++ src/openai/resources/chat/completions.py | 12 +++ src/openai/resources/fine_tuning/jobs/jobs.py | 12 +-- src/openai/types/__init__.py | 3 + src/openai/types/beta/__init__.py | 2 - src/openai/types/beta/assistant.py | 5 ++ .../types/beta/assistant_create_params.py | 5 ++ .../types/beta/assistant_response_format.py | 13 --- .../beta/assistant_response_format_option.py | 8 +- .../assistant_response_format_option_param.py | 9 +- .../beta/assistant_response_format_param.py | 12 --- .../types/beta/assistant_update_params.py | 5 ++ src/openai/types/beta/file_search_tool.py | 4 +- .../types/beta/file_search_tool_param.py | 4 +- .../beta/thread_create_and_run_params.py | 5 ++ src/openai/types/beta/threads/__init__.py | 2 + .../types/beta/threads/message_content.py | 4 +- .../beta/threads/message_content_delta.py | 4 +- .../beta/threads/refusal_content_block.py | 14 ++++ .../types/beta/threads/refusal_delta_block.py | 18 ++++ src/openai/types/beta/threads/run.py | 5 ++ .../types/beta/threads/run_create_params.py | 5 ++ .../beta/vector_stores/vector_store_file.py | 2 +- src/openai/types/chat/__init__.py | 3 + src/openai/types/chat/chat_completion.py | 3 + ...chat_completion_assistant_message_param.py | 15 +++- .../types/chat/chat_completion_chunk.py | 6 ++ ...t_completion_content_part_refusal_param.py | 15 ++++ .../types/chat/chat_completion_message.py | 3 + .../chat_completion_system_message_param.py | 5 +- .../chat_completion_tool_message_param.py | 5 +- .../types/chat/completion_create_params.py | 9 +- src/openai/types/chat_model.py | 1 + .../types/fine_tuning/job_create_params.py | 6 +- src/openai/types/shared/__init__.py | 3 + .../types/shared/function_definition.py | 9 ++ .../shared/response_format_json_object.py | 12 +++ .../shared/response_format_json_schema.py | 44 ++++++++++ .../types/shared/response_format_text.py | 12 +++ src/openai/types/shared_params/__init__.py | 3 + .../shared_params/function_definition.py | 10 +++ .../response_format_json_object.py | 12 +++ .../response_format_json_schema.py | 42 ++++++++++ .../shared_params/response_format_text.py | 12 +++ tests/api_resources/beta/test_assistants.py | 24 +++--- tests/api_resources/beta/test_threads.py | 16 ++-- tests/api_resources/beta/threads/test_runs.py | 16 ++-- tests/api_resources/chat/test_completions.py | 84 +++++++++++-------- tests/api_resources/fine_tuning/test_jobs.py | 16 ++-- tests/api_resources/test_models.py | 24 +++--- tests/test_client.py | 16 ++-- 56 files changed, 524 insertions(+), 154 deletions(-) delete mode 100644 src/openai/types/beta/assistant_response_format.py delete mode 100644 src/openai/types/beta/assistant_response_format_param.py create mode 100644 src/openai/types/beta/threads/refusal_content_block.py create mode 100644 src/openai/types/beta/threads/refusal_delta_block.py create mode 100644 src/openai/types/chat/chat_completion_content_part_refusal_param.py create mode 100644 src/openai/types/shared/response_format_json_object.py create mode 100644 src/openai/types/shared/response_format_json_schema.py create mode 100644 src/openai/types/shared/response_format_text.py create mode 100644 src/openai/types/shared_params/response_format_json_object.py create mode 100644 src/openai/types/shared_params/response_format_json_schema.py create mode 100644 src/openai/types/shared_params/response_format_text.py diff --git a/.stats.yml b/.stats.yml index 6cc7757636..ac652c9271 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4097c2f86beb3f3bb021775cd1dfa240e960caf842aeefc2e08da4dc0851ea79.yml diff --git a/api.md b/api.md index 31be4e06a7..cb78f55ca6 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,14 @@ # Shared Types ```python -from openai.types import ErrorObject, FunctionDefinition, FunctionParameters +from openai.types import ( + ErrorObject, + FunctionDefinition, + FunctionParameters, + ResponseFormatJSONObject, + ResponseFormatJSONSchema, + ResponseFormatText, +) ``` # Completions @@ -35,6 +42,7 @@ from openai.types.chat import ( ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, + ChatCompletionContentPartRefusal, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, @@ -288,7 +296,6 @@ Types: ```python from openai.types.beta import ( - AssistantResponseFormat, AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, @@ -381,6 +388,8 @@ from openai.types.beta.threads import ( MessageDeleted, MessageDelta, MessageDeltaEvent, + RefusalContentBlock, + RefusalDeltaBlock, Text, TextContentBlock, TextContentBlockParam, diff --git a/src/openai/_client.py b/src/openai/_client.py index 8b404e234d..d3ee6cf0f1 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -151,7 +151,7 @@ def __init__( @property @override def qs(self) -> Querystring: - return Querystring(array_format="comma") + return Querystring(array_format="brackets") @property @override @@ -365,7 +365,7 @@ def __init__( @property @override def qs(self) -> Querystring: - return Querystring(array_format="comma") + return Querystring(array_format="brackets") @property @override diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index b4dc3cfdd6..441390d24b 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -88,6 +88,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -233,6 +238,11 @@ def update( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -453,6 +463,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -598,6 +613,11 @@ async def update( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 92e6c2e1c8..d84a7161aa 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -134,6 +134,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -264,6 +269,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -390,6 +400,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -924,6 +939,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1054,6 +1074,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1180,6 +1205,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index daecb74b7c..6ec4a14a7e 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -314,6 +314,11 @@ def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -443,6 +448,11 @@ def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -568,6 +578,11 @@ def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -942,6 +957,11 @@ async def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1071,6 +1091,11 @@ async def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1196,6 +1221,11 @@ async def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 07a35f577b..3dcd3774d7 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -142,6 +142,8 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -338,6 +340,8 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -527,6 +531,8 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -791,6 +797,8 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -987,6 +995,8 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -1176,6 +1186,8 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 61bd3bfbe5..cbd3cbbfba 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -49,7 +49,7 @@ def with_streaming_response(self) -> JobsWithStreamingResponse: def create( self, *, - model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], + model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, @@ -74,7 +74,7 @@ def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. @@ -104,7 +104,7 @@ def create( name. For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. @@ -329,7 +329,7 @@ def with_streaming_response(self) -> AsyncJobsWithStreamingResponse: async def create( self, *, - model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], + model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, @@ -354,7 +354,7 @@ async def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. @@ -384,7 +384,7 @@ async def create( name. For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 84916962cc..f621fb67c5 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -9,6 +9,9 @@ ErrorObject as ErrorObject, FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, + ResponseFormatText as ResponseFormatText, + ResponseFormatJSONObject as ResponseFormatJSONObject, + ResponseFormatJSONSchema as ResponseFormatJSONSchema, ) from .upload import Upload as Upload from .embedding import Embedding as Embedding diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index d851a3619c..9c5ddfdbe0 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -23,7 +23,6 @@ from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams -from .assistant_response_format import AssistantResponseFormat as AssistantResponseFormat from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam @@ -31,7 +30,6 @@ from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction -from .assistant_response_format_param import AssistantResponseFormatParam as AssistantResponseFormatParam from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index 4e5adc766e..c6a0a4cfcf 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -89,6 +89,11 @@ class Assistant(BaseModel): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index c10f7f57ad..84cd4425d1 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -60,6 +60,11 @@ class AssistantCreateParams(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/assistant_response_format.py b/src/openai/types/beta/assistant_response_format.py deleted file mode 100644 index f53bdaf62a..0000000000 --- a/src/openai/types/beta/assistant_response_format.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["AssistantResponseFormat"] - - -class AssistantResponseFormat(BaseModel): - type: Optional[Literal["text", "json_object"]] = None - """Must be one of `text` or `json_object`.""" diff --git a/src/openai/types/beta/assistant_response_format_option.py b/src/openai/types/beta/assistant_response_format_option.py index 6ce390f6d6..6f06a3442f 100644 --- a/src/openai/types/beta/assistant_response_format_option.py +++ b/src/openai/types/beta/assistant_response_format_option.py @@ -3,8 +3,12 @@ from typing import Union from typing_extensions import Literal, TypeAlias -from .assistant_response_format import AssistantResponseFormat +from ..shared.response_format_text import ResponseFormatText +from ..shared.response_format_json_object import ResponseFormatJSONObject +from ..shared.response_format_json_schema import ResponseFormatJSONSchema __all__ = ["AssistantResponseFormatOption"] -AssistantResponseFormatOption: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormat] +AssistantResponseFormatOption: TypeAlias = Union[ + Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema +] diff --git a/src/openai/types/beta/assistant_response_format_option_param.py b/src/openai/types/beta/assistant_response_format_option_param.py index 8100088723..680a060c3c 100644 --- a/src/openai/types/beta/assistant_response_format_option_param.py +++ b/src/openai/types/beta/assistant_response_format_option_param.py @@ -5,8 +5,13 @@ from typing import Union from typing_extensions import Literal, TypeAlias -from .assistant_response_format_param import AssistantResponseFormatParam +from ...types import shared_params __all__ = ["AssistantResponseFormatOptionParam"] -AssistantResponseFormatOptionParam: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormatParam] +AssistantResponseFormatOptionParam: TypeAlias = Union[ + Literal["auto"], + shared_params.ResponseFormatText, + shared_params.ResponseFormatJSONObject, + shared_params.ResponseFormatJSONSchema, +] diff --git a/src/openai/types/beta/assistant_response_format_param.py b/src/openai/types/beta/assistant_response_format_param.py deleted file mode 100644 index 96e1d02115..0000000000 --- a/src/openai/types/beta/assistant_response_format_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["AssistantResponseFormatParam"] - - -class AssistantResponseFormatParam(TypedDict, total=False): - type: Literal["text", "json_object"] - """Must be one of `text` or `json_object`.""" diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index b401e1a891..ade565819f 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -49,6 +49,11 @@ class AssistantUpdateParams(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py index e2711b9b3d..26ab1cb83f 100644 --- a/src/openai/types/beta/file_search_tool.py +++ b/src/openai/types/beta/file_search_tool.py @@ -12,8 +12,8 @@ class FileSearch(BaseModel): max_num_results: Optional[int] = None """The maximum number of results the file search tool should output. - The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should - be between 1 and 50 inclusive. + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py index 115f86a444..666719f8cd 100644 --- a/src/openai/types/beta/file_search_tool_param.py +++ b/src/openai/types/beta/file_search_tool_param.py @@ -11,8 +11,8 @@ class FileSearch(TypedDict, total=False): max_num_results: int """The maximum number of results the file search tool should output. - The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should - be between 1 and 50 inclusive. + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 62cff921e2..7490b25ef3 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -100,6 +100,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/threads/__init__.py b/src/openai/types/beta/threads/__init__.py index 023d76fc13..70853177bd 100644 --- a/src/openai/types/beta/threads/__init__.py +++ b/src/openai/types/beta/threads/__init__.py @@ -25,11 +25,13 @@ from .text_content_block import TextContentBlock as TextContentBlock from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent from .message_list_params import MessageListParams as MessageListParams +from .refusal_delta_block import RefusalDeltaBlock as RefusalDeltaBlock from .file_path_annotation import FilePathAnnotation as FilePathAnnotation from .image_url_delta_block import ImageURLDeltaBlock as ImageURLDeltaBlock from .message_content_delta import MessageContentDelta as MessageContentDelta from .message_create_params import MessageCreateParams as MessageCreateParams from .message_update_params import MessageUpdateParams as MessageUpdateParams +from .refusal_content_block import RefusalContentBlock as RefusalContentBlock from .image_file_delta_block import ImageFileDeltaBlock as ImageFileDeltaBlock from .image_url_content_block import ImageURLContentBlock as ImageURLContentBlock from .file_citation_annotation import FileCitationAnnotation as FileCitationAnnotation diff --git a/src/openai/types/beta/threads/message_content.py b/src/openai/types/beta/threads/message_content.py index 7b718c3ca9..b313d35af6 100644 --- a/src/openai/types/beta/threads/message_content.py +++ b/src/openai/types/beta/threads/message_content.py @@ -5,11 +5,13 @@ from ...._utils import PropertyInfo from .text_content_block import TextContentBlock +from .refusal_content_block import RefusalContentBlock from .image_url_content_block import ImageURLContentBlock from .image_file_content_block import ImageFileContentBlock __all__ = ["MessageContent"] MessageContent: TypeAlias = Annotated[ - Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock], PropertyInfo(discriminator="type") + Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/beta/threads/message_content_delta.py b/src/openai/types/beta/threads/message_content_delta.py index 667172c08f..b6e7dfa45a 100644 --- a/src/openai/types/beta/threads/message_content_delta.py +++ b/src/openai/types/beta/threads/message_content_delta.py @@ -5,11 +5,13 @@ from ...._utils import PropertyInfo from .text_delta_block import TextDeltaBlock +from .refusal_delta_block import RefusalDeltaBlock from .image_url_delta_block import ImageURLDeltaBlock from .image_file_delta_block import ImageFileDeltaBlock __all__ = ["MessageContentDelta"] MessageContentDelta: TypeAlias = Annotated[ - Union[ImageFileDeltaBlock, TextDeltaBlock, ImageURLDeltaBlock], PropertyInfo(discriminator="type") + Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/beta/threads/refusal_content_block.py b/src/openai/types/beta/threads/refusal_content_block.py new file mode 100644 index 0000000000..d54f948554 --- /dev/null +++ b/src/openai/types/beta/threads/refusal_content_block.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RefusalContentBlock"] + + +class RefusalContentBlock(BaseModel): + refusal: str + + type: Literal["refusal"] + """Always `refusal`.""" diff --git a/src/openai/types/beta/threads/refusal_delta_block.py b/src/openai/types/beta/threads/refusal_delta_block.py new file mode 100644 index 0000000000..dbd8e62697 --- /dev/null +++ b/src/openai/types/beta/threads/refusal_delta_block.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RefusalDeltaBlock"] + + +class RefusalDeltaBlock(BaseModel): + index: int + """The index of the refusal part in the message.""" + + type: Literal["refusal"] + """Always `refusal`.""" + + refusal: Optional[str] = None diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index 81d10d4a56..0579e229d8 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -171,6 +171,11 @@ class Run(BaseModel): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index e0c42fd23f..d3e6d9c476 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -97,6 +97,11 @@ class RunCreateParamsBase(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/beta/vector_stores/vector_store_file.py index 4762de0ebd..65096e8dad 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file.py +++ b/src/openai/types/beta/vector_stores/vector_store_file.py @@ -17,7 +17,7 @@ class LastError(BaseModel): - code: Literal["internal_error", "file_not_found", "parsing_error", "unhandled_mime_type"] + code: Literal["server_error", "unsupported_file", "invalid_file"] """One of `server_error` or `rate_limit_exceeded`.""" message: str diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 0ba812ff9b..df3b48149c 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -37,6 +37,9 @@ from .chat_completion_tool_choice_option_param import ( ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, ) +from .chat_completion_content_part_refusal_param import ( + ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam, +) from .chat_completion_function_call_option_param import ( ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, ) diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 5f4eaf3366..4b53e70890 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -15,6 +15,9 @@ class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + class Choice(BaseModel): finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"] diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 8f7357b96c..2429d41d33 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -2,12 +2,16 @@ from __future__ import annotations -from typing import Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing import Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam +from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam -__all__ = ["ChatCompletionAssistantMessageParam", "FunctionCall"] +__all__ = ["ChatCompletionAssistantMessageParam", "ContentArrayOfContentPart", "FunctionCall"] + +ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam] class FunctionCall(TypedDict, total=False): @@ -27,7 +31,7 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Optional[str] + content: Union[str, Iterable[ContentArrayOfContentPart], None] """The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. @@ -47,5 +51,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role. """ + refusal: Optional[str] + """The refusal message by the assistant.""" + tool_calls: Iterable[ChatCompletionMessageToolCallParam] """The tool calls generated by the model, such as function calls.""" diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 65643c7e60..9ec6dc4bdb 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -67,6 +67,9 @@ class ChoiceDelta(BaseModel): model. """ + refusal: Optional[str] = None + """The refusal message generated by the model.""" + role: Optional[Literal["system", "user", "assistant", "tool"]] = None """The role of the author of this message.""" @@ -77,6 +80,9 @@ class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + class Choice(BaseModel): delta: ChoiceDelta diff --git a/src/openai/types/chat/chat_completion_content_part_refusal_param.py b/src/openai/types/chat/chat_completion_content_part_refusal_param.py new file mode 100644 index 0000000000..c18c7db770 --- /dev/null +++ b/src/openai/types/chat/chat_completion_content_part_refusal_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartRefusalParam"] + + +class ChatCompletionContentPartRefusalParam(TypedDict, total=False): + refusal: Required[str] + """The refusal message generated by the model.""" + + type: Required[Literal["refusal"]] + """The type of the content part.""" diff --git a/src/openai/types/chat/chat_completion_message.py b/src/openai/types/chat/chat_completion_message.py index 8db7d17d24..492bb68c85 100644 --- a/src/openai/types/chat/chat_completion_message.py +++ b/src/openai/types/chat/chat_completion_message.py @@ -26,6 +26,9 @@ class ChatCompletionMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" + refusal: Optional[str] = None + """The refusal message generated by the model.""" + role: Literal["assistant"] """The role of the author of this message.""" diff --git a/src/openai/types/chat/chat_completion_system_message_param.py b/src/openai/types/chat/chat_completion_system_message_param.py index 94bb3f636c..172ccea09e 100644 --- a/src/openai/types/chat/chat_completion_system_message_param.py +++ b/src/openai/types/chat/chat_completion_system_message_param.py @@ -2,13 +2,16 @@ from __future__ import annotations +from typing import Union, Iterable from typing_extensions import Literal, Required, TypedDict +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + __all__ = ["ChatCompletionSystemMessageParam"] class ChatCompletionSystemMessageParam(TypedDict, total=False): - content: Required[str] + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the system message.""" role: Required[Literal["system"]] diff --git a/src/openai/types/chat/chat_completion_tool_message_param.py b/src/openai/types/chat/chat_completion_tool_message_param.py index 5c590e033f..eb5e270e47 100644 --- a/src/openai/types/chat/chat_completion_tool_message_param.py +++ b/src/openai/types/chat/chat_completion_tool_message_param.py @@ -2,13 +2,16 @@ from __future__ import annotations +from typing import Union, Iterable from typing_extensions import Literal, Required, TypedDict +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + __all__ = ["ChatCompletionToolMessageParam"] class ChatCompletionToolMessageParam(TypedDict, total=False): - content: Required[str] + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the tool message.""" role: Required[Literal["tool"]] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 9e81881b9e..bf648a3858 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -121,7 +121,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): response_format: ResponseFormat """An object specifying the format that the model must output. - Compatible with + Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -250,9 +251,9 @@ class Function(TypedDict, total=False): """ -class ResponseFormat(TypedDict, total=False): - type: Literal["text", "json_object"] - """Must be one of `text` or `json_object`.""" +ResponseFormat: TypeAlias = Union[ + shared_params.ResponseFormatText, shared_params.ResponseFormatJSONObject, shared_params.ResponseFormatJSONSchema +] class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index edb7b732bf..686f26b783 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -6,6 +6,7 @@ ChatModel: TypeAlias = Literal[ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index c5196e4406..e9be2ef1ca 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -9,11 +9,11 @@ class JobCreateParams(TypedDict, total=False): - model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]]] + model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]] """The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). """ training_file: Required[str] @@ -54,7 +54,7 @@ class JobCreateParams(TypedDict, total=False): name. For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. """ validation_file: Optional[str] diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index e085744e29..c8776bca0e 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -3,3 +3,6 @@ from .error_object import ErrorObject as ErrorObject from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters +from .response_format_text import ResponseFormatText as ResponseFormatText +from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject +from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/src/openai/types/shared/function_definition.py b/src/openai/types/shared/function_definition.py index 49f5e67c50..06baa23170 100644 --- a/src/openai/types/shared/function_definition.py +++ b/src/openai/types/shared/function_definition.py @@ -32,3 +32,12 @@ class FunctionDefinition(BaseModel): Omitting `parameters` defines a function with an empty parameter list. """ + + strict: Optional[bool] = None + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/src/openai/types/shared/response_format_json_object.py b/src/openai/types/shared/response_format_json_object.py new file mode 100644 index 0000000000..107728dd2e --- /dev/null +++ b/src/openai/types/shared/response_format_json_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatJSONObject"] + + +class ResponseFormatJSONObject(BaseModel): + type: Literal["json_object"] + """The type of response format being defined: `json_object`""" diff --git a/src/openai/types/shared/response_format_json_schema.py b/src/openai/types/shared/response_format_json_schema.py new file mode 100644 index 0000000000..3194a4fe91 --- /dev/null +++ b/src/openai/types/shared/response_format_json_schema.py @@ -0,0 +1,44 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["ResponseFormatJSONSchema", "JSONSchema"] + + +class JSONSchema(BaseModel): + name: str + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: Optional[str] = None + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None) + """The schema for the response format, described as a JSON Schema object.""" + + strict: Optional[bool] = None + """Whether to enable strict schema adherence when generating the output. + + If set to true, the model will always follow the exact schema defined in the + `schema` field. Only a subset of JSON Schema is supported when `strict` is + `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + """ + + +class ResponseFormatJSONSchema(BaseModel): + json_schema: JSONSchema + + type: Literal["json_schema"] + """The type of response format being defined: `json_schema`""" diff --git a/src/openai/types/shared/response_format_text.py b/src/openai/types/shared/response_format_text.py new file mode 100644 index 0000000000..6721fe0973 --- /dev/null +++ b/src/openai/types/shared/response_format_text.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatText"] + + +class ResponseFormatText(BaseModel): + type: Literal["text"] + """The type of response format being defined: `text`""" diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index ef638cb279..ab4057d59f 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -2,3 +2,6 @@ from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters +from .response_format_text import ResponseFormatText as ResponseFormatText +from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject +from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py index 29ccc548d4..f41392f154 100644 --- a/src/openai/types/shared_params/function_definition.py +++ b/src/openai/types/shared_params/function_definition.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Optional from typing_extensions import Required, TypedDict from ...types import shared_params @@ -33,3 +34,12 @@ class FunctionDefinition(TypedDict, total=False): Omitting `parameters` defines a function with an empty parameter list. """ + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/src/openai/types/shared_params/response_format_json_object.py b/src/openai/types/shared_params/response_format_json_object.py new file mode 100644 index 0000000000..8419c6cb56 --- /dev/null +++ b/src/openai/types/shared_params/response_format_json_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJSONObject"] + + +class ResponseFormatJSONObject(TypedDict, total=False): + type: Required[Literal["json_object"]] + """The type of response format being defined: `json_object`""" diff --git a/src/openai/types/shared_params/response_format_json_schema.py b/src/openai/types/shared_params/response_format_json_schema.py new file mode 100644 index 0000000000..4b60fae8ee --- /dev/null +++ b/src/openai/types/shared_params/response_format_json_schema.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJSONSchema", "JSONSchema"] + + +class JSONSchema(TypedDict, total=False): + name: Required[str] + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema: Dict[str, object] + """The schema for the response format, described as a JSON Schema object.""" + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the output. + + If set to true, the model will always follow the exact schema defined in the + `schema` field. Only a subset of JSON Schema is supported when `strict` is + `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + """ + + +class ResponseFormatJSONSchema(TypedDict, total=False): + json_schema: Required[JSONSchema] + + type: Required[Literal["json_schema"]] + """The type of response format being defined: `json_schema`""" diff --git a/src/openai/types/shared_params/response_format_text.py b/src/openai/types/shared_params/response_format_text.py new file mode 100644 index 0000000000..5bec7fc503 --- /dev/null +++ b/src/openai/types/shared_params/response_format_text.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatText"] + + +class ResponseFormatText(TypedDict, total=False): + type: Required[Literal["text"]] + """The type of response format being defined: `text`""" diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index 14f279bbb5..fbd5ff0597 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -24,19 +24,19 @@ class TestAssistants: @parametrize def test_method_create(self, client: OpenAI) -> None: assistant = client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", description="description", instructions="instructions", metadata={}, name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -59,7 +59,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -70,7 +70,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,7 +134,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: metadata={}, model="model", name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -256,19 +256,19 @@ class TestAsyncAssistants: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", description="description", instructions="instructions", metadata={}, name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -291,7 +291,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -302,7 +302,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -366,7 +366,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> metadata={}, model="model", name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index d45a1a18d1..36ce75b8e7 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -302,9 +302,9 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, thread={ @@ -473,9 +473,9 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, thread={ "messages": [ @@ -912,9 +912,9 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, thread={ @@ -1083,9 +1083,9 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, thread={ "messages": [ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index ff242126b2..548c14f45d 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -133,9 +133,9 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, tool_choice="none", @@ -297,9 +297,9 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, tool_choice="none", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], @@ -799,9 +799,9 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, tool_choice="none", @@ -963,9 +963,9 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, tool_choice="none", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index ca5cada7f3..f31fd06dd9 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -24,11 +24,11 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -37,12 +37,12 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", frequency_penalty=-2, function_call="none", functions=[ @@ -58,7 +58,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -73,6 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -81,6 +82,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -89,6 +91,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -103,11 +106,11 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -120,11 +123,11 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -139,11 +142,11 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) completion_stream.response.close() @@ -153,12 +156,12 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, frequency_penalty=-2, function_call="none", @@ -175,7 +178,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -189,6 +192,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -197,6 +201,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -205,6 +210,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -219,11 +225,11 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) @@ -236,11 +242,11 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) as response: assert not response.is_closed @@ -260,11 +266,11 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None completion = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -273,12 +279,12 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn completion = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", frequency_penalty=-2, function_call="none", functions=[ @@ -294,7 +300,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -309,6 +315,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -317,6 +324,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -325,6 +333,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -339,11 +348,11 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - response = await async_client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -356,11 +365,11 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe async with async_client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -375,11 +384,11 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None completion_stream = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) await completion_stream.response.aclose() @@ -389,12 +398,12 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn completion_stream = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, frequency_penalty=-2, function_call="none", @@ -411,7 +420,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -425,6 +434,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -433,6 +443,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -441,6 +452,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -455,11 +467,11 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - response = await async_client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) @@ -472,11 +484,11 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe async with async_client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) as response: assert not response.is_closed diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index 3353547ad7..e19b22b0b1 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -24,7 +24,7 @@ class TestJobs: @parametrize def test_method_create(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", hyperparameters={ "batch_size": "auto", @@ -77,7 +77,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) @@ -89,7 +89,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) as response: assert not response.is_closed @@ -263,7 +263,7 @@ class TestAsyncJobs: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @@ -271,7 +271,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", hyperparameters={ "batch_size": "auto", @@ -316,7 +316,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) @@ -328,7 +328,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) as response: assert not response.is_closed diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 71f8e5834b..8791507c3e 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -21,14 +21,14 @@ class TestModels: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: model = client.models.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert_matches_type(Model, model, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.models.with_raw_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert response.is_closed is True @@ -39,7 +39,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.models.with_streaming_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -84,14 +84,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_method_delete(self, client: OpenAI) -> None: model = client.models.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.models.with_raw_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert response.is_closed is True @@ -102,7 +102,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.models.with_streaming_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -126,14 +126,14 @@ class TestAsyncModels: @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: model = await async_client.models.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert_matches_type(Model, model, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.models.with_raw_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert response.is_closed is True @@ -144,7 +144,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.models.with_streaming_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -189,14 +189,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: model = await async_client.models.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.models.with_raw_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert response.is_closed is True @@ -207,7 +207,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.models.with_streaming_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/test_client.py b/tests/test_client.py index 2402ffa82f..054ae0ff4e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -780,11 +780,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.retries_taken == failures_before_success @@ -811,11 +811,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert response.retries_taken == failures_before_success @@ -1574,11 +1574,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: response = await client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.retries_taken == failures_before_success @@ -1606,10 +1606,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: async with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert response.retries_taken == failures_before_success From ca11432d9b1b3af13cd2ddb8be715878f5339a72 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 20:21:24 +0000 Subject: [PATCH 054/192] chore(internal): updates (#1624) --- .stats.yml | 2 +- pyproject.toml | 3 +- src/openai/resources/chat/completions.py | 30 +++++++++++++++++++ .../types/chat/completion_create_params.py | 5 ++++ src/openai/types/chat_model.py | 2 +- 5 files changed, 38 insertions(+), 4 deletions(-) diff --git a/.stats.yml b/.stats.yml index ac652c9271..cad2c64cd0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4097c2f86beb3f3bb021775cd1dfa240e960caf842aeefc2e08da4dc0851ea79.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97797a9363b9960b5f2fbdc84426a2b91e75533ecd409fe99e37c231180a4339.yml diff --git a/pyproject.toml b/pyproject.toml index 43a0102ec4..0dc0dcd4b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -195,7 +195,6 @@ unfixable = [ "T201", "T203", ] -ignore-init-module-imports = true [tool.ruff.lint.flake8-tidy-imports.banned-api] "functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead" @@ -207,7 +206,7 @@ combine-as-imports = true extra-standard-library = ["typing_extensions"] known-first-party = ["openai", "tests"] -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] "scripts/**.py" = ["T201", "T203"] "tests/**.py" = ["T201", "T203"] diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 3dcd3774d7..d1be712e33 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -147,6 +147,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -345,6 +350,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -536,6 +546,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -802,6 +817,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1000,6 +1020,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1191,6 +1216,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index bf648a3858..61126b37ac 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -126,6 +126,11 @@ class CompletionCreateParamsBase(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index 686f26b783..09bc081f7a 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -6,8 +6,8 @@ ChatModel: TypeAlias = Literal[ "gpt-4o", - "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", From dc5911e451378b61924eeeaac96c8f629c161223 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 16:32:50 +0000 Subject: [PATCH 055/192] chore(ci): codeowners file (#1627) --- .github/CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3ce5f8d004..d58c8454c5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1,4 @@ +# This file is used to automatically assign reviewers to PRs +# For more information see: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners + * @openai/sdks-team From b9b512aa6f24d4613b5934d695cf5715b681acf6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 17:43:42 +0000 Subject: [PATCH 056/192] chore(ci): bump prism mock server version (#1630) --- scripts/mock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock b/scripts/mock index f586157699..d2814ae6a0 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" fi From 4954535dd3805c187539d6a11896fb8d73dc68f0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 19:05:02 +0000 Subject: [PATCH 057/192] chore(internal): ensure package is importable in lint cmd (#1631) --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 0dc0dcd4b8..c2ca31abaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,10 +76,13 @@ format = { chain = [ "lint" = { chain = [ "check:ruff", "typecheck", + "check:importable", ]} "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." +"check:importable" = "python -c 'import openai'" + typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" From 60d2e1a83ef124d0a66d93d24b749a42bf6f4ffe Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 19:21:15 +0000 Subject: [PATCH 058/192] chore(internal): update some imports (#1642) --- .../beta/assistant_response_format_option_param.py | 9 ++++----- src/openai/types/beta/function_tool_param.py | 4 ++-- src/openai/types/chat/chat_completion_tool_param.py | 4 ++-- src/openai/types/chat/completion_create_params.py | 11 ++++++----- src/openai/types/shared_params/function_definition.py | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/openai/types/beta/assistant_response_format_option_param.py b/src/openai/types/beta/assistant_response_format_option_param.py index 680a060c3c..5e724a4d98 100644 --- a/src/openai/types/beta/assistant_response_format_option_param.py +++ b/src/openai/types/beta/assistant_response_format_option_param.py @@ -5,13 +5,12 @@ from typing import Union from typing_extensions import Literal, TypeAlias -from ...types import shared_params +from ..shared_params.response_format_text import ResponseFormatText +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema __all__ = ["AssistantResponseFormatOptionParam"] AssistantResponseFormatOptionParam: TypeAlias = Union[ - Literal["auto"], - shared_params.ResponseFormatText, - shared_params.ResponseFormatJSONObject, - shared_params.ResponseFormatJSONSchema, + Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema ] diff --git a/src/openai/types/beta/function_tool_param.py b/src/openai/types/beta/function_tool_param.py index b44c0d47ef..d906e02b88 100644 --- a/src/openai/types/beta/function_tool_param.py +++ b/src/openai/types/beta/function_tool_param.py @@ -4,13 +4,13 @@ from typing_extensions import Literal, Required, TypedDict -from ...types import shared_params +from ..shared_params.function_definition import FunctionDefinition __all__ = ["FunctionToolParam"] class FunctionToolParam(TypedDict, total=False): - function: Required[shared_params.FunctionDefinition] + function: Required[FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" diff --git a/src/openai/types/chat/chat_completion_tool_param.py b/src/openai/types/chat/chat_completion_tool_param.py index 0cf6ea7268..6c2b1a36f0 100644 --- a/src/openai/types/chat/chat_completion_tool_param.py +++ b/src/openai/types/chat/chat_completion_tool_param.py @@ -4,13 +4,13 @@ from typing_extensions import Literal, Required, TypedDict -from ...types import shared_params +from ..shared_params.function_definition import FunctionDefinition __all__ = ["ChatCompletionToolParam"] class ChatCompletionToolParam(TypedDict, total=False): - function: Required[shared_params.FunctionDefinition] + function: Required[FunctionDefinition] type: Required[Literal["function"]] """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 61126b37ac..91435dcedd 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -5,12 +5,15 @@ from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ...types import shared_params from ..chat_model import ChatModel from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_message_param import ChatCompletionMessageParam +from ..shared_params.function_parameters import FunctionParameters +from ..shared_params.response_format_text import ResponseFormatText from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema from .chat_completion_function_call_option_param import ChatCompletionFunctionCallOptionParam __all__ = [ @@ -244,7 +247,7 @@ class Function(TypedDict, total=False): how to call the function. """ - parameters: shared_params.FunctionParameters + parameters: FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for @@ -256,9 +259,7 @@ class Function(TypedDict, total=False): """ -ResponseFormat: TypeAlias = Union[ - shared_params.ResponseFormatText, shared_params.ResponseFormatJSONObject, shared_params.ResponseFormatJSONSchema -] +ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema] class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py index f41392f154..d45ec13f1e 100644 --- a/src/openai/types/shared_params/function_definition.py +++ b/src/openai/types/shared_params/function_definition.py @@ -5,7 +5,7 @@ from typing import Optional from typing_extensions import Required, TypedDict -from ...types import shared_params +from .function_parameters import FunctionParameters __all__ = ["FunctionDefinition"] @@ -24,7 +24,7 @@ class FunctionDefinition(TypedDict, total=False): how to call the function. """ - parameters: shared_params.FunctionParameters + parameters: FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for From 10fd7a73830a03e3b03611c521fe39aea0b601b0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 19:48:03 +0000 Subject: [PATCH 059/192] chore(examples): minor formatting changes (#1644) --- tests/api_resources/beta/test_assistants.py | 4 +- tests/api_resources/beta/test_threads.py | 56 +++++++++---------- tests/api_resources/beta/threads/test_runs.py | 36 ++++++------ tests/api_resources/chat/test_completions.py | 56 +++++++++---------- tests/api_resources/fine_tuning/test_jobs.py | 12 ++-- tests/api_resources/test_images.py | 12 ++-- 6 files changed, 88 insertions(+), 88 deletions(-) diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index fbd5ff0597..642935cdaf 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -44,8 +44,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], @@ -276,8 +276,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 36ce75b8e7..6fb36199a4 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -31,8 +31,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: thread = client.beta.threads.create( messages=[ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -62,8 +62,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -93,8 +93,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -131,8 +131,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], @@ -310,8 +310,8 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) thread={ "messages": [ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -341,8 +341,8 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -372,8 +372,8 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -403,20 +403,20 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "metadata": {}, }, ], + "metadata": {}, "tool_resources": { "code_interpreter": {"file_ids": ["string", "string", "string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], }, }, - "metadata": {}, }, tool_choice="none", tool_resources={ @@ -480,8 +480,8 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) thread={ "messages": [ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -511,8 +511,8 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -542,8 +542,8 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -573,20 +573,20 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "metadata": {}, }, ], + "metadata": {}, "tool_resources": { "code_interpreter": {"file_ids": ["string", "string", "string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], }, }, - "metadata": {}, }, tool_choice="none", tool_resources={ @@ -641,8 +641,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> thread = await async_client.beta.threads.create( messages=[ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -672,8 +672,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -703,8 +703,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -741,8 +741,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], @@ -920,8 +920,8 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie thread={ "messages": [ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -951,8 +951,8 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -982,8 +982,8 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -1013,20 +1013,20 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "metadata": {}, }, ], + "metadata": {}, "tool_resources": { "code_interpreter": {"file_ids": ["string", "string", "string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], }, }, - "metadata": {}, }, tool_choice="none", tool_resources={ @@ -1090,8 +1090,8 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie thread={ "messages": [ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -1121,8 +1121,8 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -1152,8 +1152,8 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -1183,20 +1183,20 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "metadata": {}, }, ], + "metadata": {}, "tool_resources": { "code_interpreter": {"file_ids": ["string", "string", "string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { - "file_ids": ["string", "string", "string"], "chunking_strategy": {"type": "auto"}, + "file_ids": ["string", "string", "string"], "metadata": {}, } ], }, }, - "metadata": {}, }, tool_choice="none", tool_resources={ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index 548c14f45d..0c7ff2f146 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -36,8 +36,8 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: additional_instructions="additional_instructions", additional_messages=[ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -67,8 +67,8 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -98,8 +98,8 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -200,8 +200,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: additional_instructions="additional_instructions", additional_messages=[ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -231,8 +231,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -262,8 +262,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -565,16 +565,16 @@ def test_method_submit_tool_outputs_with_all_params_overload_1(self, client: Ope thread_id="thread_id", tool_outputs=[ { - "tool_call_id": "tool_call_id", "output": "output", + "tool_call_id": "tool_call_id", }, { - "tool_call_id": "tool_call_id", "output": "output", + "tool_call_id": "tool_call_id", }, { - "tool_call_id": "tool_call_id", "output": "output", + "tool_call_id": "tool_call_id", }, ], stream=False, @@ -702,8 +702,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn additional_instructions="additional_instructions", additional_messages=[ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -733,8 +733,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -764,8 +764,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -866,8 +866,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn additional_instructions="additional_instructions", additional_messages=[ { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -897,8 +897,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -928,8 +928,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "metadata": {}, }, { - "role": "user", "content": "string", + "role": "user", "attachments": [ { "file_id": "file_id", @@ -1231,16 +1231,16 @@ async def test_method_submit_tool_outputs_with_all_params_overload_1(self, async thread_id="thread_id", tool_outputs=[ { - "tool_call_id": "tool_call_id", "output": "output", + "tool_call_id": "tool_call_id", }, { - "tool_call_id": "tool_call_id", "output": "output", + "tool_call_id": "tool_call_id", }, { - "tool_call_id": "tool_call_id", "output": "output", + "tool_call_id": "tool_call_id", }, ], stream=False, diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index f31fd06dd9..01ce3f1b0d 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -47,8 +47,8 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: function_call="none", functions=[ { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, } ], @@ -68,31 +68,31 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: tool_choice="none", tools=[ { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, ], top_logprobs=0, @@ -167,8 +167,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: function_call="none", functions=[ { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, } ], @@ -187,31 +187,31 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: tool_choice="none", tools=[ { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, ], top_logprobs=0, @@ -289,8 +289,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn function_call="none", functions=[ { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, } ], @@ -310,31 +310,31 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn tool_choice="none", tools=[ { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, ], top_logprobs=0, @@ -409,8 +409,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn function_call="none", functions=[ { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, } ], @@ -429,31 +429,31 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn tool_choice="none", tools=[ { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, { - "type": "function", "function": { - "description": "description", "name": "name", + "description": "description", "parameters": {"foo": "bar"}, "strict": True, }, + "type": "function", }, ], top_logprobs=0, diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index e19b22b0b1..018ed82764 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -44,8 +44,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "name", "entity": "entity", + "name": "name", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -53,8 +53,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "name", "entity": "entity", + "name": "name", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -62,8 +62,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "name", "entity": "entity", + "name": "name", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -283,8 +283,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "name", "entity": "entity", + "name": "name", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -292,8 +292,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "name", "entity": "entity", + "name": "name", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, @@ -301,8 +301,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "type": "wandb", "wandb": { "project": "my-wandb-project", - "name": "name", "entity": "entity", + "name": "name", "tags": ["custom-tag", "custom-tag", "custom-tag"], }, }, diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py index 2e31f3354a..9bc9719bc5 100644 --- a/tests/api_resources/test_images.py +++ b/tests/api_resources/test_images.py @@ -31,7 +31,7 @@ def test_method_create_variation_with_all_params(self, client: OpenAI) -> None: model="dall-e-2", n=1, response_format="url", - size="1024x1024", + size="256x256", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @@ -77,7 +77,7 @@ def test_method_edit_with_all_params(self, client: OpenAI) -> None: model="dall-e-2", n=1, response_format="url", - size="1024x1024", + size="256x256", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @@ -123,7 +123,7 @@ def test_method_generate_with_all_params(self, client: OpenAI) -> None: n=1, quality="standard", response_format="url", - size="1024x1024", + size="256x256", style="vivid", user="user-1234", ) @@ -171,7 +171,7 @@ async def test_method_create_variation_with_all_params(self, async_client: Async model="dall-e-2", n=1, response_format="url", - size="1024x1024", + size="256x256", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @@ -217,7 +217,7 @@ async def test_method_edit_with_all_params(self, async_client: AsyncOpenAI) -> N model="dall-e-2", n=1, response_format="url", - size="1024x1024", + size="256x256", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @@ -263,7 +263,7 @@ async def test_method_generate_with_all_params(self, async_client: AsyncOpenAI) n=1, quality="standard", response_format="url", - size="1024x1024", + size="256x256", style="vivid", user="user-1234", ) From 654fdbfd9973bec43e6ab3d8486895e27015c4af Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 21:25:41 +0000 Subject: [PATCH 060/192] chore: sync openapi url (#1646) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index cad2c64cd0..2371b7b8d4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97797a9363b9960b5f2fbdc84426a2b91e75533ecd409fe99e37c231180a4339.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-285bce7dcdae7eea5fe84a8d6e5af2c1473d65ea193109370fb2257851eef7eb.yml From c5a74dd0b6c2f8d97927104712930bef87a3e8cb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 20:19:37 +0000 Subject: [PATCH 061/192] chore(internal): use different 32bit detection method (#1652) --- src/openai/_base_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 3388d69fab..f374449dbc 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -1,5 +1,6 @@ from __future__ import annotations +import sys import json import time import uuid @@ -2012,7 +2013,6 @@ def get_python_version() -> str: def get_architecture() -> Arch: try: - python_bitness, _ = platform.architecture() machine = platform.machine().lower() except Exception: return "unknown" @@ -2028,7 +2028,7 @@ def get_architecture() -> Arch: return "x64" # TODO: untested - if python_bitness == "32bit": + if sys.maxsize <= 2**32: return "x32" if machine: From 40e542b0cad5b5d06e38a00b3b28bebde7c34170 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 21:59:01 +0000 Subject: [PATCH 062/192] chore(types): define FilePurpose enum (#1653) --- .stats.yml | 2 +- api.md | 2 +- src/openai/resources/files.py | 8 ++++---- src/openai/resources/uploads/uploads.py | 8 ++++---- src/openai/types/__init__.py | 1 + src/openai/types/file_create_params.py | 5 +++-- src/openai/types/file_purpose.py | 7 +++++++ src/openai/types/upload_create_params.py | 6 ++++-- 8 files changed, 25 insertions(+), 14 deletions(-) create mode 100644 src/openai/types/file_purpose.py diff --git a/.stats.yml b/.stats.yml index 2371b7b8d4..185585b675 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-285bce7dcdae7eea5fe84a8d6e5af2c1473d65ea193109370fb2257851eef7eb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ff62fa1091460d68fbd36d72c17d91b709917bebf2983c9c4de5784bc384a2e.yml diff --git a/api.md b/api.md index cb78f55ca6..2e766b7ca4 100644 --- a/api.md +++ b/api.md @@ -82,7 +82,7 @@ Methods: Types: ```python -from openai.types import FileContent, FileDeleted, FileObject +from openai.types import FileContent, FileDeleted, FileObject, FilePurpose ``` Methods: diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index f9db4f9ff9..4d2b51ab56 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -4,12 +4,11 @@ import typing_extensions from typing import Mapping, cast -from typing_extensions import Literal import httpx from .. import _legacy_response -from ..types import file_list_params, file_create_params +from ..types import FilePurpose, file_list_params, file_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from .._utils import ( extract_files, @@ -31,6 +30,7 @@ from .._base_client import AsyncPaginator, make_request_options from ..types.file_object import FileObject from ..types.file_deleted import FileDeleted +from ..types.file_purpose import FilePurpose __all__ = ["Files", "AsyncFiles"] @@ -48,7 +48,7 @@ def create( self, *, file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -307,7 +307,7 @@ async def create( self, *, file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index 4100423d3e..3590a3843f 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -3,7 +3,6 @@ from __future__ import annotations from typing import List -from typing_extensions import Literal import httpx @@ -16,7 +15,7 @@ PartsWithStreamingResponse, AsyncPartsWithStreamingResponse, ) -from ...types import upload_create_params, upload_complete_params +from ...types import FilePurpose, upload_create_params, upload_complete_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import ( maybe_transform, @@ -27,6 +26,7 @@ from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..._base_client import make_request_options from ...types.upload import Upload +from ...types.file_purpose import FilePurpose __all__ = ["Uploads", "AsyncUploads"] @@ -50,7 +50,7 @@ def create( bytes: int, filename: str, mime_type: str, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -233,7 +233,7 @@ async def create( bytes: int, filename: str, mime_type: str, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index f621fb67c5..ad9284fbd5 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -24,6 +24,7 @@ from .image_model import ImageModel as ImageModel from .file_content import FileContent as FileContent from .file_deleted import FileDeleted as FileDeleted +from .file_purpose import FilePurpose as FilePurpose from .model_deleted import ModelDeleted as ModelDeleted from .images_response import ImagesResponse as ImagesResponse from .completion_usage import CompletionUsage as CompletionUsage diff --git a/src/openai/types/file_create_params.py b/src/openai/types/file_create_params.py index 8b1c296f39..ecf7503358 100644 --- a/src/openai/types/file_create_params.py +++ b/src/openai/types/file_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Required, TypedDict from .._types import FileTypes +from .file_purpose import FilePurpose __all__ = ["FileCreateParams"] @@ -13,7 +14,7 @@ class FileCreateParams(TypedDict, total=False): file: Required[FileTypes] """The File object (not file name) to be uploaded.""" - purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] + purpose: Required[FilePurpose] """The intended purpose of the uploaded file. Use "assistants" for diff --git a/src/openai/types/file_purpose.py b/src/openai/types/file_purpose.py new file mode 100644 index 0000000000..32dc352c62 --- /dev/null +++ b/src/openai/types/file_purpose.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["FilePurpose"] + +FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision"] diff --git a/src/openai/types/upload_create_params.py b/src/openai/types/upload_create_params.py index 3165ebcc7a..2ebabe6c66 100644 --- a/src/openai/types/upload_create_params.py +++ b/src/openai/types/upload_create_params.py @@ -2,7 +2,9 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Required, TypedDict + +from .file_purpose import FilePurpose __all__ = ["UploadCreateParams"] @@ -21,7 +23,7 @@ class UploadCreateParams(TypedDict, total=False): supported MIME types for assistants and vision. """ - purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] + purpose: Required[FilePurpose] """The intended purpose of the uploaded file. See the From 8cde9ad539a647ad61fa69e83bd69d9a7256ea12 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 Aug 2024 13:40:42 +0000 Subject: [PATCH 063/192] feat(api): add chatgpt-4o-latest model (#1656) --- .stats.yml | 2 +- src/openai/types/chat_model.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 185585b675..e9aeeaaeff 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ff62fa1091460d68fbd36d72c17d91b709917bebf2983c9c4de5784bc384a2e.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8e569a23f15a599dd4aee8a53431962bcba4985ab6cfb66c53c1434b99026b37.yml diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index 09bc081f7a..2372d5e14e 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -8,6 +8,7 @@ "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", From 0f2facfb4b0e899b94d20cd9670bd949fad02a16 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 21:25:16 +0000 Subject: [PATCH 064/192] chore(client): fix parsing union responses when non-json is returned (#1665) --- src/openai/_models.py | 2 ++ tests/test_legacy_response.py | 22 +++++++++++++++++++- tests/test_response.py | 39 ++++++++++++++++++++++++++++++++++- 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index 5148d5a7b3..d386eaa3a4 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -380,6 +380,8 @@ def is_basemodel(type_: type) -> bool: def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]: origin = get_origin(type_) or type_ + if not inspect.isclass(origin): + return False return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) diff --git a/tests/test_legacy_response.py b/tests/test_legacy_response.py index 3659ee12c1..3c2df53e58 100644 --- a/tests/test_legacy_response.py +++ b/tests/test_legacy_response.py @@ -1,5 +1,5 @@ import json -from typing import cast +from typing import Any, Union, cast from typing_extensions import Annotated import httpx @@ -81,3 +81,23 @@ def test_response_parse_annotated_type(client: OpenAI) -> None: ) assert obj.foo == "hello!" assert obj.bar == 2 + + +class OtherModel(pydantic.BaseModel): + a: str + + +@pytest.mark.parametrize("client", [False], indirect=True) # loose validation +def test_response_parse_expect_model_union_non_json_content(client: OpenAI) -> None: + response = LegacyAPIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" diff --git a/tests/test_response.py b/tests/test_response.py index 6ea1be1a1a..b7d88bdbde 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -1,5 +1,5 @@ import json -from typing import List, cast +from typing import Any, List, Union, cast from typing_extensions import Annotated import httpx @@ -188,3 +188,40 @@ async def test_async_response_parse_annotated_type(async_client: AsyncOpenAI) -> ) assert obj.foo == "hello!" assert obj.bar == 2 + + +class OtherModel(BaseModel): + a: str + + +@pytest.mark.parametrize("client", [False], indirect=True) # loose validation +def test_response_parse_expect_model_union_non_json_content(client: OpenAI) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation +async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncOpenAI) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" From c21c7a353cba403d9e81c22df595d54558824ce2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 20 Aug 2024 01:49:10 +0000 Subject: [PATCH 065/192] chore(ci): also run pydantic v1 tests (#1666) --- scripts/test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/test b/scripts/test index b3ace9013b..4fa5698b8f 100755 --- a/scripts/test +++ b/scripts/test @@ -54,3 +54,6 @@ fi echo "==> Running tests" rye run pytest "$@" + +echo "==> Running Pydantic v1 tests" +rye run nox -s test-pydantic-v1 -- "$@" From 71fdd9836d5d0c78083f6212533404e9a27e0d75 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:08:57 +0000 Subject: [PATCH 066/192] feat(api): add file search result details to run steps (#1681) --- .stats.yml | 2 +- api.md | 3 +- .../resources/beta/threads/runs/runs.py | 71 ++++++++++++++++++- .../resources/beta/threads/runs/steps.py | 59 +++++++++++++-- src/openai/types/beta/file_search_tool.py | 26 ++++++- .../types/beta/file_search_tool_param.py | 26 ++++++- .../types/beta/threads/run_create_params.py | 15 +++- .../types/beta/threads/runs/__init__.py | 2 + .../threads/runs/file_search_tool_call.py | 58 ++++++++++++++- .../beta/threads/runs/run_step_include.py | 7 ++ .../beta/threads/runs/step_list_params.py | 15 ++++ .../beta/threads/runs/step_retrieve_params.py | 28 ++++++++ .../beta/threads/runs/test_steps.py | 22 ++++++ tests/api_resources/beta/threads/test_runs.py | 4 ++ 14 files changed, 322 insertions(+), 16 deletions(-) create mode 100644 src/openai/types/beta/threads/runs/run_step_include.py create mode 100644 src/openai/types/beta/threads/runs/step_retrieve_params.py diff --git a/.stats.yml b/.stats.yml index e9aeeaaeff..fd4f271361 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8e569a23f15a599dd4aee8a53431962bcba4985ab6cfb66c53c1434b99026b37.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-1dbac0e95bdb5a89a0dd3d93265475a378214551b7d8c22862928e0d87ace94b.yml diff --git a/api.md b/api.md index 2e766b7ca4..e5ae2ad169 100644 --- a/api.md +++ b/api.md @@ -349,6 +349,7 @@ from openai.types.beta.threads.runs import ( RunStepDelta, RunStepDeltaEvent, RunStepDeltaMessageDelta, + RunStepInclude, ToolCall, ToolCallDelta, ToolCallDeltaObject, @@ -358,7 +359,7 @@ from openai.types.beta.threads.runs import ( Methods: -- client.beta.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id) -> RunStep +- client.beta.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id, \*\*params) -> RunStep - client.beta.threads.runs.steps.list(run_id, \*, thread_id, \*\*params) -> SyncCursorPage[RunStep] ### Messages diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index d84a7161aa..2d2eed44df 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable, Optional, overload +from typing import List, Union, Iterable, Optional, overload from typing_extensions import Literal import httpx @@ -38,6 +38,7 @@ from .....types.beta.threads.run import Run from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent +from .....types.beta.threads.runs.run_step_include import RunStepInclude from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -63,6 +64,7 @@ def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -93,6 +95,14 @@ def create( [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -195,6 +205,7 @@ def create( *, assistant_id: str, stream: Literal[True], + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -228,6 +239,14 @@ def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -326,6 +345,7 @@ def create( *, assistant_id: str, stream: bool, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -359,6 +379,14 @@ def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -456,6 +484,7 @@ def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -505,7 +534,11 @@ def create( run_create_params.RunCreateParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, run_create_params.RunCreateParams), ), cast_to=Run, stream=stream or False, @@ -868,6 +901,7 @@ async def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -898,6 +932,14 @@ async def create( [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -1000,6 +1042,7 @@ async def create( *, assistant_id: str, stream: Literal[True], + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1033,6 +1076,14 @@ async def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -1131,6 +1182,7 @@ async def create( *, assistant_id: str, stream: bool, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1164,6 +1216,14 @@ async def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -1261,6 +1321,7 @@ async def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1310,7 +1371,11 @@ async def create( run_create_params.RunCreateParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, run_create_params.RunCreateParams), ), cast_to=Run, stream=stream or False, diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 96b16dfa0a..3d2d40a3fb 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -2,20 +2,25 @@ from __future__ import annotations +from typing import List from typing_extensions import Literal import httpx from ..... import _legacy_response from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ....._utils import maybe_transform +from ....._utils import ( + maybe_transform, + async_maybe_transform, +) from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .....pagination import SyncCursorPage, AsyncCursorPage from ....._base_client import AsyncPaginator, make_request_options -from .....types.beta.threads.runs import step_list_params +from .....types.beta.threads.runs import step_list_params, step_retrieve_params from .....types.beta.threads.runs.run_step import RunStep +from .....types.beta.threads.runs.run_step_include import RunStepInclude __all__ = ["Steps", "AsyncSteps"] @@ -35,6 +40,7 @@ def retrieve( *, thread_id: str, run_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -46,6 +52,14 @@ def retrieve( Retrieves a run step. Args: + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -64,7 +78,11 @@ def retrieve( return self._get( f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), ), cast_to=RunStep, ) @@ -76,6 +94,7 @@ def list( thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -99,6 +118,14 @@ def list( ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -130,6 +157,7 @@ def list( { "after": after, "before": before, + "include": include, "limit": limit, "order": order, }, @@ -155,6 +183,7 @@ async def retrieve( *, thread_id: str, run_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -166,6 +195,14 @@ async def retrieve( Retrieves a run step. Args: + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -184,7 +221,11 @@ async def retrieve( return await self._get( f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), ), cast_to=RunStep, ) @@ -196,6 +237,7 @@ def list( thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -219,6 +261,14 @@ def list( ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -250,6 +300,7 @@ def list( { "after": after, "before": before, + "include": include, "limit": limit, "order": order, }, diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py index 26ab1cb83f..4015b3da09 100644 --- a/src/openai/types/beta/file_search_tool.py +++ b/src/openai/types/beta/file_search_tool.py @@ -5,7 +5,21 @@ from ..._models import BaseModel -__all__ = ["FileSearchTool", "FileSearch"] +__all__ = ["FileSearchTool", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(BaseModel): + ranker: Optional[Literal["auto", "default_2024_08_21"]] = None + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ + + score_threshold: Optional[float] = None + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ class FileSearch(BaseModel): @@ -17,7 +31,15 @@ class FileSearch(BaseModel): Note that the file search tool may output fewer than `max_num_results` results. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + """ + + ranking_options: Optional[FileSearchRankingOptions] = None + """The ranking options for the file search. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. """ diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py index 666719f8cd..97e651b0da 100644 --- a/src/openai/types/beta/file_search_tool_param.py +++ b/src/openai/types/beta/file_search_tool_param.py @@ -4,7 +4,21 @@ from typing_extensions import Literal, Required, TypedDict -__all__ = ["FileSearchToolParam", "FileSearch"] +__all__ = ["FileSearchToolParam", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(TypedDict, total=False): + ranker: Literal["auto", "default_2024_08_21"] + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ + + score_threshold: float + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ class FileSearch(TypedDict, total=False): @@ -16,7 +30,15 @@ class FileSearch(TypedDict, total=False): Note that the file search tool may output fewer than `max_num_results` results. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + """ + + ranking_options: FileSearchRankingOptions + """The ranking options for the file search. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. """ diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index d3e6d9c476..8bb73ddc78 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -2,11 +2,12 @@ from __future__ import annotations -from typing import Union, Iterable, Optional +from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from ...chat_model import ChatModel from ..assistant_tool_param import AssistantToolParam +from .runs.run_step_include import RunStepInclude from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -32,6 +33,18 @@ class RunCreateParamsBase(TypedDict, total=False): execute this run. """ + include: List[RunStepInclude] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + """ + additional_instructions: Optional[str] """Appends additional instructions at the end of the instructions for the run. diff --git a/src/openai/types/beta/threads/runs/__init__.py b/src/openai/types/beta/threads/runs/__init__.py index a312ce3df2..467d5d793d 100644 --- a/src/openai/types/beta/threads/runs/__init__.py +++ b/src/openai/types/beta/threads/runs/__init__.py @@ -6,9 +6,11 @@ from .tool_call import ToolCall as ToolCall from .run_step_delta import RunStepDelta as RunStepDelta from .tool_call_delta import ToolCallDelta as ToolCallDelta +from .run_step_include import RunStepInclude as RunStepInclude from .step_list_params import StepListParams as StepListParams from .function_tool_call import FunctionToolCall as FunctionToolCall from .run_step_delta_event import RunStepDeltaEvent as RunStepDeltaEvent +from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams from .code_interpreter_logs import CodeInterpreterLogs as CodeInterpreterLogs from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall from .tool_call_delta_object import ToolCallDeltaObject as ToolCallDeltaObject diff --git a/src/openai/types/beta/threads/runs/file_search_tool_call.py b/src/openai/types/beta/threads/runs/file_search_tool_call.py index 57c0ca9a90..da4d58dc37 100644 --- a/src/openai/types/beta/threads/runs/file_search_tool_call.py +++ b/src/openai/types/beta/threads/runs/file_search_tool_call.py @@ -1,17 +1,71 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ....._models import BaseModel -__all__ = ["FileSearchToolCall"] +__all__ = [ + "FileSearchToolCall", + "FileSearch", + "FileSearchRankingOptions", + "FileSearchResult", + "FileSearchResultContent", +] + + +class FileSearchRankingOptions(BaseModel): + ranker: Literal["default_2024_08_21"] + """The ranker used for the file search.""" + + score_threshold: float + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + +class FileSearchResultContent(BaseModel): + text: Optional[str] = None + """The text content of the file.""" + + type: Optional[Literal["text"]] = None + """The type of the content.""" + + +class FileSearchResult(BaseModel): + file_id: str + """The ID of the file that result was found in.""" + + file_name: str + """The name of the file that result was found in.""" + + score: float + """The score of the result. + + All values must be a floating point number between 0 and 1. + """ + + content: Optional[List[FileSearchResultContent]] = None + """The content of the result that was found. + + The content is only included if requested via the include query parameter. + """ + + +class FileSearch(BaseModel): + ranking_options: Optional[FileSearchRankingOptions] = None + """The ranking options for the file search.""" + + results: Optional[List[FileSearchResult]] = None + """The results of the file search.""" class FileSearchToolCall(BaseModel): id: str """The ID of the tool call object.""" - file_search: object + file_search: FileSearch """For now, this is always going to be an empty object.""" type: Literal["file_search"] diff --git a/src/openai/types/beta/threads/runs/run_step_include.py b/src/openai/types/beta/threads/runs/run_step_include.py new file mode 100644 index 0000000000..8e76c1b716 --- /dev/null +++ b/src/openai/types/beta/threads/runs/run_step_include.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["RunStepInclude"] + +RunStepInclude: TypeAlias = Literal["step_details.tool_calls[*].file_search.results[*].content"] diff --git a/src/openai/types/beta/threads/runs/step_list_params.py b/src/openai/types/beta/threads/runs/step_list_params.py index 606d444539..3931bd7e0c 100644 --- a/src/openai/types/beta/threads/runs/step_list_params.py +++ b/src/openai/types/beta/threads/runs/step_list_params.py @@ -2,8 +2,11 @@ from __future__ import annotations +from typing import List from typing_extensions import Literal, Required, TypedDict +from .run_step_include import RunStepInclude + __all__ = ["StepListParams"] @@ -28,6 +31,18 @@ class StepListParams(TypedDict, total=False): of the list. """ + include: List[RunStepInclude] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + """ + limit: int """A limit on the number of objects to be returned. diff --git a/src/openai/types/beta/threads/runs/step_retrieve_params.py b/src/openai/types/beta/threads/runs/step_retrieve_params.py new file mode 100644 index 0000000000..22c1c049f4 --- /dev/null +++ b/src/openai/types/beta/threads/runs/step_retrieve_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Required, TypedDict + +from .run_step_include import RunStepInclude + +__all__ = ["StepRetrieveParams"] + + +class StepRetrieveParams(TypedDict, total=False): + thread_id: Required[str] + + run_id: Required[str] + + include: List[RunStepInclude] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + """ diff --git a/tests/api_resources/beta/threads/runs/test_steps.py b/tests/api_resources/beta/threads/runs/test_steps.py index d5edeb823e..ea3e682158 100644 --- a/tests/api_resources/beta/threads/runs/test_steps.py +++ b/tests/api_resources/beta/threads/runs/test_steps.py @@ -27,6 +27,16 @@ def test_method_retrieve(self, client: OpenAI) -> None: ) assert_matches_type(RunStep, step, path=["response"]) + @parametrize + def test_method_retrieve_with_all_params(self, client: OpenAI) -> None: + step = client.beta.threads.runs.steps.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], + ) + assert_matches_type(RunStep, step, path=["response"]) + @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.runs.steps.with_raw_response.retrieve( @@ -93,6 +103,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: thread_id="thread_id", after="after", before="before", + include=["step_details.tool_calls[*].file_search.results[*].content"], limit=0, order="asc", ) @@ -151,6 +162,16 @@ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: ) assert_matches_type(RunStep, step, path=["response"]) + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None: + step = await async_client.beta.threads.runs.steps.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], + ) + assert_matches_type(RunStep, step, path=["response"]) + @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve( @@ -217,6 +238,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N thread_id="thread_id", after="after", before="before", + include=["step_details.tool_calls[*].file_search.results[*].content"], limit=0, order="asc", ) diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index 0c7ff2f146..cb0718ede7 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -33,6 +33,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: run = client.beta.threads.runs.create( thread_id="thread_id", assistant_id="assistant_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], additional_instructions="additional_instructions", additional_messages=[ { @@ -197,6 +198,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: thread_id="thread_id", assistant_id="assistant_id", stream=True, + include=["step_details.tool_calls[*].file_search.results[*].content"], additional_instructions="additional_instructions", additional_messages=[ { @@ -699,6 +701,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn run = await async_client.beta.threads.runs.create( thread_id="thread_id", assistant_id="assistant_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], additional_instructions="additional_instructions", additional_messages=[ { @@ -863,6 +866,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn thread_id="thread_id", assistant_id="assistant_id", stream=True, + include=["step_details.tool_calls[*].file_search.results[*].content"], additional_instructions="additional_instructions", additional_messages=[ { From 8e07457c2ba8b8244ad831ce194e30657a666e29 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 19:14:05 +0000 Subject: [PATCH 067/192] chore: pyproject.toml formatting changes (#1687) --- pyproject.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c2ca31abaa..ab8cf5cf38 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ dependencies = [ "distro>=1.7.0, <2", "sniffio", "cached-property; python_version < '3.8'", - ] requires-python = ">= 3.7.1" classifiers = [ @@ -36,8 +35,6 @@ classifiers = [ "License :: OSI Approved :: Apache Software License" ] - - [project.urls] Homepage = "https://github.com/openai/openai-python" Repository = "https://github.com/openai/openai-python" @@ -59,7 +56,6 @@ dev-dependencies = [ "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", "rich>=13.7.1", - ] [tool.rye.scripts] From 5f9435b735fac7a2b539a886b44ce61159d5552a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:26:51 +0000 Subject: [PATCH 068/192] feat(vector store): improve chunking strategy type names (#1690) --- .stats.yml | 2 +- api.md | 12 ++++- .../beta/vector_stores/file_batches.py | 10 ++-- .../resources/beta/vector_stores/files.py | 10 ++-- .../beta/vector_stores/vector_stores.py | 12 +++-- src/openai/types/beta/__init__.py | 7 +++ .../types/beta/assistant_create_params.py | 44 ++-------------- .../beta/auto_file_chunking_strategy_param.py | 12 +++++ .../types/beta/file_chunking_strategy.py | 14 +++++ .../beta/file_chunking_strategy_param.py | 13 +++++ .../other_file_chunking_strategy_object.py | 12 +++++ .../beta/static_file_chunking_strategy.py | 22 ++++++++ .../static_file_chunking_strategy_object.py | 15 ++++++ .../static_file_chunking_strategy_param.py | 22 ++++++++ .../beta/thread_create_and_run_params.py | 43 ++-------------- src/openai/types/beta/thread_create_params.py | 42 ++------------- .../types/beta/vector_store_create_params.py | 47 +++-------------- .../vector_stores/file_batch_create_params.py | 51 +++---------------- .../beta/vector_stores/file_create_params.py | 50 +++--------------- .../beta/vector_stores/vector_store_file.py | 49 ++---------------- 20 files changed, 189 insertions(+), 300 deletions(-) create mode 100644 src/openai/types/beta/auto_file_chunking_strategy_param.py create mode 100644 src/openai/types/beta/file_chunking_strategy.py create mode 100644 src/openai/types/beta/file_chunking_strategy_param.py create mode 100644 src/openai/types/beta/other_file_chunking_strategy_object.py create mode 100644 src/openai/types/beta/static_file_chunking_strategy.py create mode 100644 src/openai/types/beta/static_file_chunking_strategy_object.py create mode 100644 src/openai/types/beta/static_file_chunking_strategy_param.py diff --git a/.stats.yml b/.stats.yml index fd4f271361..903c159960 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-1dbac0e95bdb5a89a0dd3d93265475a378214551b7d8c22862928e0d87ace94b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-85a85e0c08de456441431c0ae4e9c078cc8f9748c29430b9a9058340db6389ee.yml diff --git a/api.md b/api.md index e5ae2ad169..8267f03967 100644 --- a/api.md +++ b/api.md @@ -221,7 +221,17 @@ Methods: Types: ```python -from openai.types.beta import VectorStore, VectorStoreDeleted +from openai.types.beta import ( + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyParam, + VectorStore, + VectorStoreDeleted, +) ``` Methods: diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index a3ddf84b1d..54b26ee0eb 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -17,8 +17,10 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage +from ....types.beta import FileChunkingStrategyParam from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_stores import file_batch_create_params, file_batch_list_files_params +from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam from ....types.beta.vector_stores.vector_store_file import VectorStoreFile from ....types.beta.vector_stores.vector_store_file_batch import VectorStoreFileBatch @@ -39,7 +41,7 @@ def create( vector_store_id: str, *, file_ids: List[str], - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -56,7 +58,7 @@ def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers @@ -249,7 +251,7 @@ async def create( vector_store_id: str, *, file_ids: List[str], - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -266,7 +268,7 @@ async def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index 16bfd2d66f..53d34366cf 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -16,8 +16,10 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage +from ....types.beta import FileChunkingStrategyParam from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_stores import file_list_params, file_create_params +from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam from ....types.beta.vector_stores.vector_store_file import VectorStoreFile from ....types.beta.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted @@ -38,7 +40,7 @@ def create( vector_store_id: str, *, file_id: str, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -57,7 +59,7 @@ def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers @@ -249,7 +251,7 @@ async def create( vector_store_id: str, *, file_id: str, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -268,7 +270,7 @@ async def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 58374a9572..c93b3bc41f 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -33,10 +33,16 @@ AsyncFileBatchesWithStreamingResponse, ) from ....pagination import SyncCursorPage, AsyncCursorPage -from ....types.beta import vector_store_list_params, vector_store_create_params, vector_store_update_params +from ....types.beta import ( + FileChunkingStrategyParam, + vector_store_list_params, + vector_store_create_params, + vector_store_update_params, +) from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_store import VectorStore from ....types.beta.vector_store_deleted import VectorStoreDeleted +from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam __all__ = ["VectorStores", "AsyncVectorStores"] @@ -61,7 +67,7 @@ def with_streaming_response(self) -> VectorStoresWithStreamingResponse: def create( self, *, - chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, @@ -328,7 +334,7 @@ def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse: async def create( self, *, - chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index 9c5ddfdbe0..7f76fed0cd 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -19,6 +19,7 @@ from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent +from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams @@ -28,11 +29,17 @@ from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption +from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams +from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption +from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam +from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam +from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject from .assistant_response_format_option_param import ( AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam, ) diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 84cd4425d1..c1360b5b66 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -3,10 +3,11 @@ from __future__ import annotations from typing import List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing_extensions import Required, TypedDict from ..chat_model import ChatModel from .assistant_tool_param import AssistantToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam __all__ = [ @@ -15,10 +16,6 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", - "ToolResourcesFileSearchVectorStoreChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -118,43 +115,12 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ -class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): - static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic -] - - class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ file_ids: List[str] diff --git a/src/openai/types/beta/auto_file_chunking_strategy_param.py b/src/openai/types/beta/auto_file_chunking_strategy_param.py new file mode 100644 index 0000000000..6f17836bac --- /dev/null +++ b/src/openai/types/beta/auto_file_chunking_strategy_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AutoFileChunkingStrategyParam"] + + +class AutoFileChunkingStrategyParam(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" diff --git a/src/openai/types/beta/file_chunking_strategy.py b/src/openai/types/beta/file_chunking_strategy.py new file mode 100644 index 0000000000..406d69dd0e --- /dev/null +++ b/src/openai/types/beta/file_chunking_strategy.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject +from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject + +__all__ = ["FileChunkingStrategy"] + +FileChunkingStrategy: TypeAlias = Annotated[ + Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type") +] diff --git a/src/openai/types/beta/file_chunking_strategy_param.py b/src/openai/types/beta/file_chunking_strategy_param.py new file mode 100644 index 0000000000..46383358e5 --- /dev/null +++ b/src/openai/types/beta/file_chunking_strategy_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam + +__all__ = ["FileChunkingStrategyParam"] + +FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam] diff --git a/src/openai/types/beta/other_file_chunking_strategy_object.py b/src/openai/types/beta/other_file_chunking_strategy_object.py new file mode 100644 index 0000000000..89da560be4 --- /dev/null +++ b/src/openai/types/beta/other_file_chunking_strategy_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["OtherFileChunkingStrategyObject"] + + +class OtherFileChunkingStrategyObject(BaseModel): + type: Literal["other"] + """Always `other`.""" diff --git a/src/openai/types/beta/static_file_chunking_strategy.py b/src/openai/types/beta/static_file_chunking_strategy.py new file mode 100644 index 0000000000..ba80e1a2b9 --- /dev/null +++ b/src/openai/types/beta/static_file_chunking_strategy.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + + +from ..._models import BaseModel + +__all__ = ["StaticFileChunkingStrategy"] + + +class StaticFileChunkingStrategy(BaseModel): + chunk_overlap_tokens: int + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: int + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/src/openai/types/beta/static_file_chunking_strategy_object.py b/src/openai/types/beta/static_file_chunking_strategy_object.py new file mode 100644 index 0000000000..896c4b8320 --- /dev/null +++ b/src/openai/types/beta/static_file_chunking_strategy_object.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .static_file_chunking_strategy import StaticFileChunkingStrategy + +__all__ = ["StaticFileChunkingStrategyObject"] + + +class StaticFileChunkingStrategyObject(BaseModel): + static: StaticFileChunkingStrategy + + type: Literal["static"] + """Always `static`.""" diff --git a/src/openai/types/beta/static_file_chunking_strategy_param.py b/src/openai/types/beta/static_file_chunking_strategy_param.py new file mode 100644 index 0000000000..f917ac5647 --- /dev/null +++ b/src/openai/types/beta/static_file_chunking_strategy_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["StaticFileChunkingStrategyParam"] + + +class StaticFileChunkingStrategyParam(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 7490b25ef3..cd3d9f29d4 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -9,6 +9,7 @@ from .function_tool_param import FunctionToolParam from .file_search_tool_param import FileSearchToolParam from .code_interpreter_tool_param import CodeInterpreterToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .threads.message_content_part_param import MessageContentPartParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -24,10 +25,6 @@ "ThreadToolResourcesCodeInterpreter", "ThreadToolResourcesFileSearch", "ThreadToolResourcesFileSearchVectorStore", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategy", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", @@ -218,44 +215,12 @@ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): """ -class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): - static: Required[ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ThreadToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto, - ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic, -] - - class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ThreadToolResourcesFileSearchVectorStoreChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ file_ids: List[str] diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index f9561aa48c..729164b481 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from .code_interpreter_tool_param import CodeInterpreterToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam from .threads.message_content_part_param import MessageContentPartParam __all__ = [ @@ -18,10 +19,6 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", - "ToolResourcesFileSearchVectorStoreChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -99,43 +96,12 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ -class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): - static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic -] - - class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ file_ids: List[str] diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py index 4f74af49f8..a8f03a89b9 100644 --- a/src/openai/types/beta/vector_store_create_params.py +++ b/src/openai/types/beta/vector_store_create_params.py @@ -2,21 +2,16 @@ from __future__ import annotations -from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing import List, Optional +from typing_extensions import Literal, Required, TypedDict -__all__ = [ - "VectorStoreCreateParams", - "ChunkingStrategy", - "ChunkingStrategyAuto", - "ChunkingStrategyStatic", - "ChunkingStrategyStaticStatic", - "ExpiresAfter", -] +from .file_chunking_strategy_param import FileChunkingStrategyParam + +__all__ = ["VectorStoreCreateParams", "ExpiresAfter"] class VectorStoreCreateParams(TypedDict, total=False): - chunking_strategy: ChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is @@ -45,36 +40,6 @@ class VectorStoreCreateParams(TypedDict, total=False): """The name of the vector store.""" -class ChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStatic(TypedDict, total=False): - static: Required[ChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ChunkingStrategy: TypeAlias = Union[ChunkingStrategyAuto, ChunkingStrategyStatic] - - class ExpiresAfter(TypedDict, total=False): anchor: Required[Literal["last_active_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/src/openai/types/beta/vector_stores/file_batch_create_params.py b/src/openai/types/beta/vector_stores/file_batch_create_params.py index e1c3303cf3..e42ea99cd1 100644 --- a/src/openai/types/beta/vector_stores/file_batch_create_params.py +++ b/src/openai/types/beta/vector_stores/file_batch_create_params.py @@ -2,16 +2,12 @@ from __future__ import annotations -from typing import List, Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing import List +from typing_extensions import Required, TypedDict -__all__ = [ - "FileBatchCreateParams", - "ChunkingStrategy", - "ChunkingStrategyAutoChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParamStatic", -] +from ..file_chunking_strategy_param import FileChunkingStrategyParam + +__all__ = ["FileBatchCreateParams"] class FileBatchCreateParams(TypedDict, total=False): @@ -22,40 +18,9 @@ class FileBatchCreateParams(TypedDict, total=False): files. """ - chunking_strategy: ChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. - """ - - -class ChunkingStrategyAutoChunkingStrategyRequestParam(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ChunkingStrategyStaticChunkingStrategyRequestParamStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False): - static: Required[ChunkingStrategyStaticChunkingStrategyRequestParamStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ChunkingStrategy: TypeAlias = Union[ - ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam -] diff --git a/src/openai/types/beta/vector_stores/file_create_params.py b/src/openai/types/beta/vector_stores/file_create_params.py index cfb80657c6..d074d766e6 100644 --- a/src/openai/types/beta/vector_stores/file_create_params.py +++ b/src/openai/types/beta/vector_stores/file_create_params.py @@ -2,16 +2,11 @@ from __future__ import annotations -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing_extensions import Required, TypedDict -__all__ = [ - "FileCreateParams", - "ChunkingStrategy", - "ChunkingStrategyAutoChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParamStatic", -] +from ..file_chunking_strategy_param import FileChunkingStrategyParam + +__all__ = ["FileCreateParams"] class FileCreateParams(TypedDict, total=False): @@ -22,40 +17,9 @@ class FileCreateParams(TypedDict, total=False): files. """ - chunking_strategy: ChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. - """ - - -class ChunkingStrategyAutoChunkingStrategyRequestParam(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ChunkingStrategyStaticChunkingStrategyRequestParamStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False): - static: Required[ChunkingStrategyStaticChunkingStrategyRequestParamStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ChunkingStrategy: TypeAlias = Union[ - ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam -] diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/beta/vector_stores/vector_store_file.py index 65096e8dad..e4608e159c 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file.py +++ b/src/openai/types/beta/vector_stores/vector_store_file.py @@ -1,19 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias +from typing import Optional +from typing_extensions import Literal -from ...._utils import PropertyInfo from ...._models import BaseModel +from ..file_chunking_strategy import FileChunkingStrategy -__all__ = [ - "VectorStoreFile", - "LastError", - "ChunkingStrategy", - "ChunkingStrategyStatic", - "ChunkingStrategyStaticStatic", - "ChunkingStrategyOther", -] +__all__ = ["VectorStoreFile", "LastError"] class LastError(BaseModel): @@ -24,38 +17,6 @@ class LastError(BaseModel): """A human-readable description of the error.""" -class ChunkingStrategyStaticStatic(BaseModel): - chunk_overlap_tokens: int - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: int - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStatic(BaseModel): - static: ChunkingStrategyStaticStatic - - type: Literal["static"] - """Always `static`.""" - - -class ChunkingStrategyOther(BaseModel): - type: Literal["other"] - """Always `other`.""" - - -ChunkingStrategy: TypeAlias = Annotated[ - Union[ChunkingStrategyStatic, ChunkingStrategyOther], PropertyInfo(discriminator="type") -] - - class VectorStoreFile(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" @@ -93,5 +54,5 @@ class VectorStoreFile(BaseModel): attached to. """ - chunking_strategy: Optional[ChunkingStrategy] = None + chunking_strategy: Optional[FileChunkingStrategy] = None """The strategy used to chunk the file.""" From 65753fd690b9a98ef3bbfa2193fbb0937dbdd867 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:31:32 +0000 Subject: [PATCH 069/192] chore: add docstrings to raw response properties (#1696) --- src/openai/resources/audio/audio.py | 22 +++++++++++++++++++ src/openai/resources/audio/speech.py | 22 +++++++++++++++++++ src/openai/resources/audio/transcriptions.py | 22 +++++++++++++++++++ src/openai/resources/audio/translations.py | 22 +++++++++++++++++++ src/openai/resources/batches.py | 22 +++++++++++++++++++ src/openai/resources/beta/assistants.py | 22 +++++++++++++++++++ src/openai/resources/beta/beta.py | 22 +++++++++++++++++++ src/openai/resources/beta/threads/messages.py | 22 +++++++++++++++++++ .../resources/beta/threads/runs/runs.py | 22 +++++++++++++++++++ .../resources/beta/threads/runs/steps.py | 22 +++++++++++++++++++ src/openai/resources/beta/threads/threads.py | 22 +++++++++++++++++++ .../beta/vector_stores/file_batches.py | 22 +++++++++++++++++++ .../resources/beta/vector_stores/files.py | 22 +++++++++++++++++++ .../beta/vector_stores/vector_stores.py | 22 +++++++++++++++++++ src/openai/resources/chat/chat.py | 22 +++++++++++++++++++ src/openai/resources/chat/completions.py | 22 +++++++++++++++++++ src/openai/resources/completions.py | 22 +++++++++++++++++++ src/openai/resources/embeddings.py | 22 +++++++++++++++++++ src/openai/resources/files.py | 22 +++++++++++++++++++ .../resources/fine_tuning/fine_tuning.py | 22 +++++++++++++++++++ .../resources/fine_tuning/jobs/checkpoints.py | 22 +++++++++++++++++++ src/openai/resources/fine_tuning/jobs/jobs.py | 22 +++++++++++++++++++ src/openai/resources/images.py | 22 +++++++++++++++++++ src/openai/resources/models.py | 22 +++++++++++++++++++ src/openai/resources/moderations.py | 22 +++++++++++++++++++ src/openai/resources/uploads/parts.py | 22 +++++++++++++++++++ src/openai/resources/uploads/uploads.py | 22 +++++++++++++++++++ 27 files changed, 594 insertions(+) diff --git a/src/openai/resources/audio/audio.py b/src/openai/resources/audio/audio.py index 537ad573d0..18bd7b812c 100644 --- a/src/openai/resources/audio/audio.py +++ b/src/openai/resources/audio/audio.py @@ -47,10 +47,21 @@ def speech(self) -> Speech: @cached_property def with_raw_response(self) -> AudioWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AudioWithRawResponse(self) @cached_property def with_streaming_response(self) -> AudioWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AudioWithStreamingResponse(self) @@ -69,10 +80,21 @@ def speech(self) -> AsyncSpeech: @cached_property def with_raw_response(self) -> AsyncAudioWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncAudioWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncAudioWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncAudioWithStreamingResponse(self) diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index a0df9ec487..6085ae8afe 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -31,10 +31,21 @@ class Speech(SyncAPIResource): @cached_property def with_raw_response(self) -> SpeechWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return SpeechWithRawResponse(self) @cached_property def with_streaming_response(self) -> SpeechWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return SpeechWithStreamingResponse(self) def create( @@ -104,10 +115,21 @@ def create( class AsyncSpeech(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSpeechWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncSpeechWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncSpeechWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncSpeechWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 1ee962411c..a6009143d4 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -29,10 +29,21 @@ class Transcriptions(SyncAPIResource): @cached_property def with_raw_response(self) -> TranscriptionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return TranscriptionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> TranscriptionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return TranscriptionsWithStreamingResponse(self) def create( @@ -125,10 +136,21 @@ def create( class AsyncTranscriptions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncTranscriptionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncTranscriptionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncTranscriptionsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index ed97ccf840..7ec647fb6b 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -28,10 +28,21 @@ class Translations(SyncAPIResource): @cached_property def with_raw_response(self) -> TranslationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return TranslationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> TranslationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return TranslationsWithStreamingResponse(self) def create( @@ -109,10 +120,21 @@ def create( class AsyncTranslations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranslationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncTranslationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncTranslationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncTranslationsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 4e345dd505..ee62faf774 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -27,10 +27,21 @@ class Batches(SyncAPIResource): @cached_property def with_raw_response(self) -> BatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return BatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> BatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return BatchesWithStreamingResponse(self) def create( @@ -221,10 +232,21 @@ def cancel( class AsyncBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncBatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncBatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncBatchesWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 441390d24b..1e57944eb3 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -35,10 +35,21 @@ class Assistants(SyncAPIResource): @cached_property def with_raw_response(self) -> AssistantsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AssistantsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AssistantsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AssistantsWithStreamingResponse(self) def create( @@ -410,10 +421,21 @@ def delete( class AsyncAssistants(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncAssistantsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncAssistantsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncAssistantsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 0d9806678f..78ea0e017f 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -49,10 +49,21 @@ def threads(self) -> Threads: @cached_property def with_raw_response(self) -> BetaWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return BetaWithRawResponse(self) @cached_property def with_streaming_response(self) -> BetaWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return BetaWithStreamingResponse(self) @@ -71,10 +82,21 @@ def threads(self) -> AsyncThreads: @cached_property def with_raw_response(self) -> AsyncBetaWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncBetaWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncBetaWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncBetaWithStreamingResponse(self) diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index 5b4f1f2955..0cf7a8d5ea 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -29,10 +29,21 @@ class Messages(SyncAPIResource): @cached_property def with_raw_response(self) -> MessagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return MessagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> MessagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return MessagesWithStreamingResponse(self) def create( @@ -292,10 +303,21 @@ def delete( class AsyncMessages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncMessagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncMessagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncMessagesWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 2d2eed44df..a17e0016c7 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -52,10 +52,21 @@ def steps(self) -> Steps: @cached_property def with_raw_response(self) -> RunsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return RunsWithRawResponse(self) @cached_property def with_streaming_response(self) -> RunsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return RunsWithStreamingResponse(self) @overload @@ -889,10 +900,21 @@ def steps(self) -> AsyncSteps: @cached_property def with_raw_response(self) -> AsyncRunsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncRunsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncRunsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncRunsWithStreamingResponse(self) @overload diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 3d2d40a3fb..5d6d55f9d9 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -28,10 +28,21 @@ class Steps(SyncAPIResource): @cached_property def with_raw_response(self) -> StepsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return StepsWithRawResponse(self) @cached_property def with_streaming_response(self) -> StepsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return StepsWithStreamingResponse(self) def retrieve( @@ -171,10 +182,21 @@ def list( class AsyncSteps(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncStepsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncStepsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncStepsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncStepsWithStreamingResponse(self) async def retrieve( diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 6ec4a14a7e..27777251ad 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -63,10 +63,21 @@ def messages(self) -> Messages: @cached_property def with_raw_response(self) -> ThreadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ThreadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ThreadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ThreadsWithStreamingResponse(self) def create( @@ -706,10 +717,21 @@ def messages(self) -> AsyncMessages: @cached_property def with_raw_response(self) -> AsyncThreadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncThreadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncThreadsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index 54b26ee0eb..34fcd8c61b 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -30,10 +30,21 @@ class FileBatches(SyncAPIResource): @cached_property def with_raw_response(self) -> FileBatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FileBatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FileBatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FileBatchesWithStreamingResponse(self) def create( @@ -240,10 +251,21 @@ def list_files( class AsyncFileBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFileBatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFileBatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFileBatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFileBatchesWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index 53d34366cf..e96b492ac0 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -29,10 +29,21 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FilesWithStreamingResponse(self) def create( @@ -240,10 +251,21 @@ def delete( class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFilesWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index c93b3bc41f..06e26852b4 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -58,10 +58,21 @@ def file_batches(self) -> FileBatches: @cached_property def with_raw_response(self) -> VectorStoresWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return VectorStoresWithRawResponse(self) @cached_property def with_streaming_response(self) -> VectorStoresWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return VectorStoresWithStreamingResponse(self) def create( @@ -325,10 +336,21 @@ def file_batches(self) -> AsyncFileBatches: @cached_property def with_raw_response(self) -> AsyncVectorStoresWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncVectorStoresWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncVectorStoresWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/chat/chat.py b/src/openai/resources/chat/chat.py index d14d055506..dc23a15a8e 100644 --- a/src/openai/resources/chat/chat.py +++ b/src/openai/resources/chat/chat.py @@ -23,10 +23,21 @@ def completions(self) -> Completions: @cached_property def with_raw_response(self) -> ChatWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ChatWithRawResponse(self) @cached_property def with_streaming_response(self) -> ChatWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ChatWithStreamingResponse(self) @@ -37,10 +48,21 @@ def completions(self) -> AsyncCompletions: @cached_property def with_raw_response(self) -> AsyncChatWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncChatWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncChatWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncChatWithStreamingResponse(self) diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index d1be712e33..29fd69947a 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -34,10 +34,21 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return CompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return CompletionsWithStreamingResponse(self) @overload @@ -704,10 +715,21 @@ def create( class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncCompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncCompletionsWithStreamingResponse(self) @overload diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index d33862b405..79d150edd8 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -29,10 +29,21 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return CompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return CompletionsWithStreamingResponse(self) @overload @@ -560,10 +571,21 @@ def create( class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncCompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncCompletionsWithStreamingResponse(self) @overload diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 3b06eea37e..6d24a1a1f8 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -26,10 +26,21 @@ class Embeddings(SyncAPIResource): @cached_property def with_raw_response(self) -> EmbeddingsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return EmbeddingsWithRawResponse(self) @cached_property def with_streaming_response(self) -> EmbeddingsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return EmbeddingsWithStreamingResponse(self) def create( @@ -105,10 +116,21 @@ def create( class AsyncEmbeddings(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncEmbeddingsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncEmbeddingsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index 4d2b51ab56..ee668e9bc2 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -38,10 +38,21 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FilesWithStreamingResponse(self) def create( @@ -297,10 +308,21 @@ def retrieve_content( class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFilesWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/fine_tuning/fine_tuning.py b/src/openai/resources/fine_tuning/fine_tuning.py index 0404fed6ec..c386de3c2a 100644 --- a/src/openai/resources/fine_tuning/fine_tuning.py +++ b/src/openai/resources/fine_tuning/fine_tuning.py @@ -24,10 +24,21 @@ def jobs(self) -> Jobs: @cached_property def with_raw_response(self) -> FineTuningWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FineTuningWithRawResponse(self) @cached_property def with_streaming_response(self) -> FineTuningWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FineTuningWithStreamingResponse(self) @@ -38,10 +49,21 @@ def jobs(self) -> AsyncJobs: @cached_property def with_raw_response(self) -> AsyncFineTuningWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFineTuningWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFineTuningWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFineTuningWithStreamingResponse(self) diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py index 5b5a1043d7..be08b6ea9e 100644 --- a/src/openai/resources/fine_tuning/jobs/checkpoints.py +++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py @@ -21,10 +21,21 @@ class Checkpoints(SyncAPIResource): @cached_property def with_raw_response(self) -> CheckpointsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return CheckpointsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CheckpointsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return CheckpointsWithStreamingResponse(self) def list( @@ -81,10 +92,21 @@ def list( class AsyncCheckpoints(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCheckpointsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncCheckpointsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncCheckpointsWithStreamingResponse(self) def list( diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index cbd3cbbfba..88ac6107a4 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -40,10 +40,21 @@ def checkpoints(self) -> Checkpoints: @cached_property def with_raw_response(self) -> JobsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return JobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> JobsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return JobsWithStreamingResponse(self) def create( @@ -320,10 +331,21 @@ def checkpoints(self) -> AsyncCheckpoints: @cached_property def with_raw_response(self) -> AsyncJobsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncJobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncJobsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncJobsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 0913b572cb..e9629d48fd 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -29,10 +29,21 @@ class Images(SyncAPIResource): @cached_property def with_raw_response(self) -> ImagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ImagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> ImagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ImagesWithStreamingResponse(self) def create_variation( @@ -275,10 +286,21 @@ def generate( class AsyncImages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncImagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncImagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncImagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncImagesWithStreamingResponse(self) async def create_variation( diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py index 5d0eb6f602..b3d185b553 100644 --- a/src/openai/resources/models.py +++ b/src/openai/resources/models.py @@ -20,10 +20,21 @@ class Models(SyncAPIResource): @cached_property def with_raw_response(self) -> ModelsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ModelsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ModelsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ModelsWithStreamingResponse(self) def retrieve( @@ -122,10 +133,21 @@ def delete( class AsyncModels(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModelsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncModelsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncModelsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncModelsWithStreamingResponse(self) async def retrieve( diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index b9ad9972f0..5283554373 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -26,10 +26,21 @@ class Moderations(SyncAPIResource): @cached_property def with_raw_response(self) -> ModerationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ModerationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ModerationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ModerationsWithStreamingResponse(self) def create( @@ -86,10 +97,21 @@ def create( class AsyncModerations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModerationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncModerationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncModerationsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py index 3ec2592b1e..d46e5ea1bb 100644 --- a/src/openai/resources/uploads/parts.py +++ b/src/openai/resources/uploads/parts.py @@ -27,10 +27,21 @@ class Parts(SyncAPIResource): @cached_property def with_raw_response(self) -> PartsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return PartsWithRawResponse(self) @cached_property def with_streaming_response(self) -> PartsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return PartsWithStreamingResponse(self) def create( @@ -91,10 +102,21 @@ def create( class AsyncParts(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncPartsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncPartsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncPartsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncPartsWithStreamingResponse(self) async def create( diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index 3590a3843f..dadd01d342 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -38,10 +38,21 @@ def parts(self) -> Parts: @cached_property def with_raw_response(self) -> UploadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return UploadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> UploadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return UploadsWithStreamingResponse(self) def create( @@ -221,10 +232,21 @@ def parts(self) -> AsyncParts: @cached_property def with_raw_response(self) -> AsyncUploadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncUploadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncUploadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncUploadsWithStreamingResponse(self) async def create( From e36484ebe958985692d5913f12f9b3e6c077e242 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:41:46 +0000 Subject: [PATCH 070/192] docs(readme): add section on determining installed version (#1697) --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index c5a78cb585..a47b1a2d7c 100644 --- a/README.md +++ b/README.md @@ -507,6 +507,17 @@ We take backwards-compatibility seriously and work hard to ensure you can rely o We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-python/issues) with questions, bugs, or suggestions. +### Determining the installed version + +If you've upgraded to the latest version but aren't seeing any new features you were expecting then your python environment is likely still using an older version. + +You can determine the version that is being used at runtime with: + +```py +import openai +print(openai.__version__) +``` + ## Requirements Python 3.7 or higher. From a64f5d4e5c3bf0e10f3346b4f777478dfe97abb7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Sep 2024 14:11:58 +0000 Subject: [PATCH 071/192] fix(types): correctly mark stream discriminator as optional (#1706) --- src/openai/types/beta/thread_create_and_run_params.py | 2 +- src/openai/types/beta/threads/run_create_params.py | 2 +- src/openai/types/beta/threads/run_submit_tool_outputs_params.py | 2 +- src/openai/types/chat/completion_create_params.py | 2 +- src/openai/types/completion_create_params.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index cd3d9f29d4..370c2f9bce 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -332,7 +332,7 @@ class TruncationStrategy(TypedDict, total=False): """ -class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase): +class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False): stream: Optional[Literal[False]] """ If `true`, returns a stream of events that happen during the Run as server-sent diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 8bb73ddc78..7c5f571d58 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -225,7 +225,7 @@ class TruncationStrategy(TypedDict, total=False): """ -class RunCreateParamsNonStreaming(RunCreateParamsBase): +class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False): stream: Optional[Literal[False]] """ If `true`, returns a stream of events that happen during the Run as server-sent diff --git a/src/openai/types/beta/threads/run_submit_tool_outputs_params.py b/src/openai/types/beta/threads/run_submit_tool_outputs_params.py index ccb5e5e97e..147728603a 100644 --- a/src/openai/types/beta/threads/run_submit_tool_outputs_params.py +++ b/src/openai/types/beta/threads/run_submit_tool_outputs_params.py @@ -31,7 +31,7 @@ class ToolOutput(TypedDict, total=False): """ -class RunSubmitToolOutputsParamsNonStreaming(RunSubmitToolOutputsParamsBase): +class RunSubmitToolOutputsParamsNonStreaming(RunSubmitToolOutputsParamsBase, total=False): stream: Optional[Literal[False]] """ If `true`, returns a stream of events that happen during the Run as server-sent diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 91435dcedd..b86dab742b 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -262,7 +262,7 @@ class Function(TypedDict, total=False): ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema] -class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): +class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """If set, partial message deltas will be sent, like in ChatGPT. diff --git a/src/openai/types/completion_create_params.py b/src/openai/types/completion_create_params.py index 9fe22fe3c9..6c112b3902 100644 --- a/src/openai/types/completion_create_params.py +++ b/src/openai/types/completion_create_params.py @@ -160,7 +160,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ -class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): +class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """Whether to stream back partial progress. From 2bfc9698798e3c1a37c466ffe2c88677e615bc7f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Sep 2024 16:53:04 +0000 Subject: [PATCH 072/192] feat(api): add o1 models (#1708) See https://platform.openai.com/docs/guides/reasoning for details. --- .stats.yml | 2 +- src/openai/resources/beta/assistants.py | 24 +-- .../resources/beta/threads/runs/runs.py | 36 ++-- src/openai/resources/beta/threads/threads.py | 36 ++-- src/openai/resources/chat/completions.py | 172 ++++++++++++------ src/openai/resources/fine_tuning/jobs/jobs.py | 4 +- src/openai/types/beta/assistant.py | 6 +- .../types/beta/assistant_create_params.py | 6 +- .../types/beta/assistant_update_params.py | 6 +- src/openai/types/beta/file_search_tool.py | 15 +- .../types/beta/file_search_tool_param.py | 15 +- .../beta/thread_create_and_run_params.py | 6 +- src/openai/types/beta/threads/run.py | 6 +- .../types/beta/threads/run_create_params.py | 6 +- .../types/chat/completion_create_params.py | 30 ++- src/openai/types/chat_model.py | 6 +- src/openai/types/completion_usage.py | 12 +- .../types/fine_tuning/job_create_params.py | 2 +- tests/api_resources/chat/test_completions.py | 4 + 19 files changed, 239 insertions(+), 155 deletions(-) diff --git a/.stats.yml b/.stats.yml index 903c159960..de3167f3a8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-85a85e0c08de456441431c0ae4e9c078cc8f9748c29430b9a9058340db6389ee.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-501122aa32adaa2abb3d4487880ab9cdf2141addce2e6c3d1bd9bb6b44c318a8.yml diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 1e57944eb3..5d8c6ec331 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -100,11 +100,11 @@ def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -250,11 +250,11 @@ def update( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -486,11 +486,11 @@ async def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -636,11 +636,11 @@ async def update( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index a17e0016c7..ef0edf0e36 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -156,11 +156,11 @@ def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -300,11 +300,11 @@ def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -440,11 +440,11 @@ def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1004,11 +1004,11 @@ async def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1148,11 +1148,11 @@ async def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1288,11 +1288,11 @@ async def create( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 27777251ad..3b0e310e4f 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -326,11 +326,11 @@ def create_and_run( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -460,11 +460,11 @@ def create_and_run( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -590,11 +590,11 @@ def create_and_run( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -980,11 +980,11 @@ async def create_and_run( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1114,11 +1114,11 @@ async def create_and_run( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1244,11 +1244,11 @@ async def create_and_run( and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 29fd69947a..d25a8c4dfb 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -62,6 +62,7 @@ def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -130,13 +131,17 @@ def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -159,11 +164,11 @@ def create( all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -183,8 +188,11 @@ def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -259,6 +267,7 @@ def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -333,13 +342,17 @@ def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -362,11 +375,11 @@ def create( all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -386,8 +399,11 @@ def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -455,6 +471,7 @@ def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -529,13 +546,17 @@ def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -558,11 +579,11 @@ def create( all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -582,8 +603,11 @@ def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -650,6 +674,7 @@ def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -684,6 +709,7 @@ def create( "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, "n": n, "parallel_tool_calls": parallel_tool_calls, @@ -743,6 +769,7 @@ async def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -811,13 +838,17 @@ async def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -840,11 +871,11 @@ async def create( all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -864,8 +895,11 @@ async def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -940,6 +974,7 @@ async def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1014,13 +1049,17 @@ async def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -1043,11 +1082,11 @@ async def create( all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1067,8 +1106,11 @@ async def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -1136,6 +1178,7 @@ async def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1210,13 +1253,17 @@ async def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -1239,11 +1286,11 @@ async def create( all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1263,8 +1310,11 @@ async def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -1331,6 +1381,7 @@ async def create( functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1365,6 +1416,7 @@ async def create( "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, "n": n, "parallel_tool_calls": parallel_tool_calls, diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 88ac6107a4..7eb0c5dbfc 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -111,7 +111,7 @@ def create( job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. - suffix: A string of up to 18 characters that will be added to your fine-tuned model + suffix: A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like @@ -402,7 +402,7 @@ async def create( job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. - suffix: A string of up to 18 characters that will be added to your fine-tuned model + suffix: A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index c6a0a4cfcf..b4da08745d 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -90,11 +90,11 @@ class Assistant(BaseModel): and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index c1360b5b66..eca4da0a2b 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -58,11 +58,11 @@ class AssistantCreateParams(TypedDict, total=False): and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index ade565819f..5396233937 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -50,11 +50,11 @@ class AssistantUpdateParams(TypedDict, total=False): and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py index 4015b3da09..aee6593e89 100644 --- a/src/openai/types/beta/file_search_tool.py +++ b/src/openai/types/beta/file_search_tool.py @@ -9,16 +9,16 @@ class FileSearchRankingOptions(BaseModel): - ranker: Optional[Literal["auto", "default_2024_08_21"]] = None - """The ranker to use for the file search. + score_threshold: float + """The score threshold for the file search. - If not specified will use the `auto` ranker. + All values must be a floating point number between 0 and 1. """ - score_threshold: Optional[float] = None - """The score threshold for the file search. + ranker: Optional[Literal["auto", "default_2024_08_21"]] = None + """The ranker to use for the file search. - All values must be a floating point number between 0 and 1. + If not specified will use the `auto` ranker. """ @@ -38,6 +38,9 @@ class FileSearch(BaseModel): ranking_options: Optional[FileSearchRankingOptions] = None """The ranking options for the file search. + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py index 97e651b0da..5ce91207ba 100644 --- a/src/openai/types/beta/file_search_tool_param.py +++ b/src/openai/types/beta/file_search_tool_param.py @@ -8,16 +8,16 @@ class FileSearchRankingOptions(TypedDict, total=False): - ranker: Literal["auto", "default_2024_08_21"] - """The ranker to use for the file search. + score_threshold: Required[float] + """The score threshold for the file search. - If not specified will use the `auto` ranker. + All values must be a floating point number between 0 and 1. """ - score_threshold: float - """The score threshold for the file search. + ranker: Literal["auto", "default_2024_08_21"] + """The ranker to use for the file search. - All values must be a floating point number between 0 and 1. + If not specified will use the `auto` ranker. """ @@ -37,6 +37,9 @@ class FileSearch(TypedDict, total=False): ranking_options: FileSearchRankingOptions """The ranking options for the file search. + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 370c2f9bce..20d525fa1a 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -98,11 +98,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index 0579e229d8..5abc1de295 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -172,11 +172,11 @@ class Run(BaseModel): and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 7c5f571d58..824cb1a041 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -111,11 +111,11 @@ class RunCreateParamsBase(TypedDict, total=False): and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index b86dab742b..4ed89b00f5 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -87,15 +87,22 @@ class CompletionCreateParamsBase(TypedDict, total=False): `content` of `message`. """ + max_completion_tokens: Optional[int] + """ + An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + """ + max_tokens: Optional[int] """ The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). """ n: Optional[int] @@ -130,11 +137,11 @@ class CompletionCreateParamsBase(TypedDict, total=False): all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -160,8 +167,11 @@ class CompletionCreateParamsBase(TypedDict, total=False): This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index 2372d5e14e..f8438c75c8 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -5,9 +5,13 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", "gpt-4o", - "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", diff --git a/src/openai/types/completion_usage.py b/src/openai/types/completion_usage.py index 0d57b96595..a4b9116e35 100644 --- a/src/openai/types/completion_usage.py +++ b/src/openai/types/completion_usage.py @@ -1,10 +1,15 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - +from typing import Optional from .._models import BaseModel -__all__ = ["CompletionUsage"] +__all__ = ["CompletionUsage", "CompletionTokensDetails"] + + +class CompletionTokensDetails(BaseModel): + reasoning_tokens: Optional[int] = None + """Tokens generated by the model for reasoning.""" class CompletionUsage(BaseModel): @@ -16,3 +21,6 @@ class CompletionUsage(BaseModel): total_tokens: int """Total number of tokens used in the request (prompt + completion).""" + + completion_tokens_details: Optional[CompletionTokensDetails] = None + """Breakdown of tokens used in a completion.""" diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index e9be2ef1ca..8f5ea86274 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -50,7 +50,7 @@ class JobCreateParams(TypedDict, total=False): suffix: Optional[str] """ - A string of up to 18 characters that will be added to your fine-tuned model + A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 01ce3f1b0d..df7bc799df 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -54,6 +54,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: ], logit_bias={"foo": 0}, logprobs=True, + max_completion_tokens=0, max_tokens=0, n=1, parallel_tool_calls=True, @@ -174,6 +175,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], logit_bias={"foo": 0}, logprobs=True, + max_completion_tokens=0, max_tokens=0, n=1, parallel_tool_calls=True, @@ -296,6 +298,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ], logit_bias={"foo": 0}, logprobs=True, + max_completion_tokens=0, max_tokens=0, n=1, parallel_tool_calls=True, @@ -416,6 +419,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], logit_bias={"foo": 0}, logprobs=True, + max_completion_tokens=0, max_tokens=0, n=1, parallel_tool_calls=True, From 36fa41816a480fae21c795ddd2fb241ded97ff13 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Sep 2024 15:30:06 +0000 Subject: [PATCH 073/192] docs: update CONTRIBUTING.md (#1710) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0f1f31488e..176b8ffc2d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,13 +31,13 @@ $ pip install -r requirements-dev.lock ## Modifying/Adding code -Most of the SDK is generated code, and any modified code will be overridden on the next generation. The -`src/openai/lib/` and `examples/` directories are exceptions and will never be overridden. +Most of the SDK is generated code. Modifications to code will be persisted between generations, but may +result in merge conflicts between manual patches and changes from the generator. The generator will never +modify the contents of the `src/openai/lib/` and `examples/` directories. ## Adding and running examples -All files in the `examples/` directory are not modified by the Stainless generator and can be freely edited or -added to. +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. ```bash # add an example to examples/.py From 798f169f52bc78061dd183cbc161ffcaccb779d7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:03:54 +0000 Subject: [PATCH 074/192] chore(internal): bump ruff (#1714) --- requirements-dev.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index aa6d1a804b..000f260f7f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -80,7 +80,7 @@ pytz==2023.3.post1 # via dirty-equals respx==0.20.2 rich==13.7.1 -ruff==0.5.6 +ruff==0.6.5 setuptools==68.2.2 # via nodeenv six==1.16.0 From e5c3a173ef882c49fd6bb67ab1af4c4501630440 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:24:14 +0000 Subject: [PATCH 075/192] chore(internal): update spec link (#1716) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index de3167f3a8..2fc39385e9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-501122aa32adaa2abb3d4487880ab9cdf2141addce2e6c3d1bd9bb6b44c318a8.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff407aa10917e62f2b0c12d1ad2c4f1258ed083bd45753c70eaaf5b1cf8356ae.yml From 180a5f4565af3f9f4f557091f7f0274ecd699b2e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:44:15 +0000 Subject: [PATCH 076/192] chore(internal): bump pyright / mypy version (#1717) --- requirements-dev.lock | 4 ++-- src/openai/_utils/_utils.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 000f260f7f..e464a89bfb 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -49,7 +49,7 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -mypy==1.10.1 +mypy==1.11.2 mypy-extensions==1.0.0 # via mypy nodeenv==1.8.0 @@ -70,7 +70,7 @@ pydantic-core==2.18.2 # via pydantic pygments==2.18.0 # via rich -pyright==1.1.374 +pyright==1.1.380 pytest==7.1.1 # via pytest-asyncio pytest-asyncio==0.21.1 diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py index 2fc5a1c65a..0bba17caad 100644 --- a/src/openai/_utils/_utils.py +++ b/src/openai/_utils/_utils.py @@ -363,12 +363,13 @@ def file_from_path(path: str) -> FileTypes: def get_required_header(headers: HeadersLike, header: str) -> str: lower_header = header.lower() - if isinstance(headers, Mapping): - for k, v in headers.items(): + if is_mapping_t(headers): + # mypy doesn't understand the type narrowing here + for k, v in headers.items(): # type: ignore if k.lower() == lower_header and isinstance(v, str): return v - """ to deal with the case where the header looks like Stainless-Event-Id """ + # to deal with the case where the header looks like Stainless-Event-Id intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize()) for normalized_header in [header, lower_header, header.upper(), intercaps_header]: From e588b5cb770b9446ae4ad17dffa630a686358429 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 14:48:19 +0000 Subject: [PATCH 077/192] fix(client): handle domains with underscores (#1726) --- src/openai/_base_client.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index f374449dbc..2545ddf967 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -490,12 +490,17 @@ def _build_request( if not files: files = cast(HttpxRequestFiles, ForceMultipartDict()) + prepared_url = self._prepare_url(http://github.com/options.url) + if "_" in prepared_url.host: + # work around https://github.com/encode/httpx/discussions/2880 + kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, method=options.method, - url=self._prepare_url(http://github.com/options.url), + url=prepared_url, # the `Query` type that we use is incompatible with qs' # `Params` type as it needs to be typed as `Mapping[str, object]` # so that passing a `TypedDict` doesn't cause an error. From 4601581a4abfe5632b62013b3399a2d7584cb31f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:11:52 +0000 Subject: [PATCH 078/192] feat(client): send retry count header (#1728) --- src/openai/_base_client.py | 101 ++++++++++++++++++++----------------- tests/test_client.py | 4 ++ 2 files changed, 58 insertions(+), 47 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 2545ddf967..d632ed5951 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -401,14 +401,7 @@ def _make_status_error( ) -> _exceptions.APIStatusError: raise NotImplementedError() - def _remaining_retries( - self, - remaining_retries: Optional[int], - options: FinalRequestOptions, - ) -> int: - return remaining_retries if remaining_retries is not None else options.get_max_retries(self.max_retries) - - def _build_headers(self, options: FinalRequestOptions) -> httpx.Headers: + def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers: custom_headers = options.headers or {} headers_dict = _merge_mappings(self.default_headers, custom_headers) self._validate_headers(headers_dict, custom_headers) @@ -420,6 +413,8 @@ def _build_headers(self, options: FinalRequestOptions) -> httpx.Headers: if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers: headers[idempotency_header] = options.idempotency_key or self._idempotency_key() + headers.setdefault("x-stainless-retry-count", str(retries_taken)) + return headers def _prepare_url(http://github.com/self,%20url:%20str) -> URL: @@ -441,6 +436,8 @@ def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder: def _build_request( self, options: FinalRequestOptions, + *, + retries_taken: int = 0, ) -> httpx.Request: if log.isEnabledFor(logging.DEBUG): log.debug("Request options: %s", model_dump(options, exclude_unset=True)) @@ -456,7 +453,7 @@ def _build_request( else: raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") - headers = self._build_headers(options) + headers = self._build_headers(options, retries_taken=retries_taken) params = _merge_mappings(self.default_query, options.params) content_type = headers.get("Content-Type") files = options.files @@ -939,12 +936,17 @@ def request( stream: bool = False, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: + if remaining_retries is not None: + retries_taken = options.get_max_retries(self.max_retries) - remaining_retries + else: + retries_taken = 0 + return self._request( cast_to=cast_to, options=options, stream=stream, stream_cls=stream_cls, - remaining_retries=remaining_retries, + retries_taken=retries_taken, ) def _request( @@ -952,7 +954,7 @@ def _request( *, cast_to: Type[ResponseT], options: FinalRequestOptions, - remaining_retries: int | None, + retries_taken: int, stream: bool, stream_cls: type[_StreamT] | None, ) -> ResponseT | _StreamT: @@ -964,8 +966,8 @@ def _request( cast_to = self._maybe_override_cast_to(cast_to, options) options = self._prepare_options(options) - retries = self._remaining_retries(remaining_retries, options) - request = self._build_request(options) + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + request = self._build_request(options, retries_taken=retries_taken) self._prepare_request(request) kwargs: HttpxSendArgs = {} @@ -983,11 +985,11 @@ def _request( except httpx.TimeoutException as err: log.debug("Encountered httpx.TimeoutException", exc_info=True) - if retries > 0: + if remaining_retries > 0: return self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -998,11 +1000,11 @@ def _request( except Exception as err: log.debug("Encountered Exception", exc_info=True) - if retries > 0: + if remaining_retries > 0: return self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -1026,13 +1028,13 @@ def _request( except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code log.debug("Encountered httpx.HTTPStatusError", exc_info=True) - if retries > 0 and self._should_retry(err.response): + if remaining_retries > 0 and self._should_retry(err.response): err.response.close() return self._retry_request( input_options, cast_to, - retries, - err.response.headers, + retries_taken=retries_taken, + response_headers=err.response.headers, stream=stream, stream_cls=stream_cls, ) @@ -1051,26 +1053,26 @@ def _request( response=response, stream=stream, stream_cls=stream_cls, - retries_taken=options.get_max_retries(self.max_retries) - retries, + retries_taken=retries_taken, ) def _retry_request( self, options: FinalRequestOptions, cast_to: Type[ResponseT], - remaining_retries: int, - response_headers: httpx.Headers | None, *, + retries_taken: int, + response_headers: httpx.Headers | None, stream: bool, stream_cls: type[_StreamT] | None, ) -> ResponseT | _StreamT: - remaining = remaining_retries - 1 - if remaining == 1: + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + if remaining_retries == 1: log.debug("1 retry left") else: - log.debug("%i retries left", remaining) + log.debug("%i retries left", remaining_retries) - timeout = self._calculate_retry_timeout(remaining, options, response_headers) + timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers) log.info("Retrying request to %s in %f seconds", options.url, timeout) # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a @@ -1080,7 +1082,7 @@ def _retry_request( return self._request( options=options, cast_to=cast_to, - remaining_retries=remaining, + retries_taken=retries_taken + 1, stream=stream, stream_cls=stream_cls, ) @@ -1512,12 +1514,17 @@ async def request( stream_cls: type[_AsyncStreamT] | None = None, remaining_retries: Optional[int] = None, ) -> ResponseT | _AsyncStreamT: + if remaining_retries is not None: + retries_taken = options.get_max_retries(self.max_retries) - remaining_retries + else: + retries_taken = 0 + return await self._request( cast_to=cast_to, options=options, stream=stream, stream_cls=stream_cls, - remaining_retries=remaining_retries, + retries_taken=retries_taken, ) async def _request( @@ -1527,7 +1534,7 @@ async def _request( *, stream: bool, stream_cls: type[_AsyncStreamT] | None, - remaining_retries: int | None, + retries_taken: int, ) -> ResponseT | _AsyncStreamT: if self._platform is None: # `get_platform` can make blocking IO calls so we @@ -1542,8 +1549,8 @@ async def _request( cast_to = self._maybe_override_cast_to(cast_to, options) options = await self._prepare_options(options) - retries = self._remaining_retries(remaining_retries, options) - request = self._build_request(options) + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + request = self._build_request(options, retries_taken=retries_taken) await self._prepare_request(request) kwargs: HttpxSendArgs = {} @@ -1559,11 +1566,11 @@ async def _request( except httpx.TimeoutException as err: log.debug("Encountered httpx.TimeoutException", exc_info=True) - if retries > 0: + if remaining_retries > 0: return await self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -1574,11 +1581,11 @@ async def _request( except Exception as err: log.debug("Encountered Exception", exc_info=True) - if retries > 0: + if retries_taken > 0: return await self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -1596,13 +1603,13 @@ async def _request( except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code log.debug("Encountered httpx.HTTPStatusError", exc_info=True) - if retries > 0 and self._should_retry(err.response): + if remaining_retries > 0 and self._should_retry(err.response): await err.response.aclose() return await self._retry_request( input_options, cast_to, - retries, - err.response.headers, + retries_taken=retries_taken, + response_headers=err.response.headers, stream=stream, stream_cls=stream_cls, ) @@ -1621,26 +1628,26 @@ async def _request( response=response, stream=stream, stream_cls=stream_cls, - retries_taken=options.get_max_retries(self.max_retries) - retries, + retries_taken=retries_taken, ) async def _retry_request( self, options: FinalRequestOptions, cast_to: Type[ResponseT], - remaining_retries: int, - response_headers: httpx.Headers | None, *, + retries_taken: int, + response_headers: httpx.Headers | None, stream: bool, stream_cls: type[_AsyncStreamT] | None, ) -> ResponseT | _AsyncStreamT: - remaining = remaining_retries - 1 - if remaining == 1: + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + if remaining_retries == 1: log.debug("1 retry left") else: - log.debug("%i retries left", remaining) + log.debug("%i retries left", remaining_retries) - timeout = self._calculate_retry_timeout(remaining, options, response_headers) + timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers) log.info("Retrying request to %s in %f seconds", options.url, timeout) await anyio.sleep(timeout) @@ -1648,7 +1655,7 @@ async def _retry_request( return await self._request( options=options, cast_to=cast_to, - remaining_retries=remaining, + retries_taken=retries_taken + 1, stream=stream, stream_cls=stream_cls, ) diff --git a/tests/test_client.py b/tests/test_client.py index 054ae0ff4e..a520998965 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -788,6 +788,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: ) assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @@ -818,6 +819,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: model="gpt-4o", ) as response: assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success class TestAsyncOpenAI: @@ -1582,6 +1584,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: ) assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @@ -1613,3 +1616,4 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: model="gpt-4o", ) as response: assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success From 9a3ad4ce544f2603dac0cdb9b855cd10ce98dc37 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:21:25 +0000 Subject: [PATCH 079/192] chore(types): improve type name for embedding models (#1730) --- .stats.yml | 2 +- api.md | 2 +- src/openai/resources/embeddings.py | 5 +++-- src/openai/types/__init__.py | 1 + src/openai/types/embedding_create_params.py | 4 +++- src/openai/types/embedding_model.py | 7 +++++++ 6 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 src/openai/types/embedding_model.py diff --git a/.stats.yml b/.stats.yml index 2fc39385e9..0151c5a105 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff407aa10917e62f2b0c12d1ad2c4f1258ed083bd45753c70eaaf5b1cf8356ae.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-de1981b64ac229493473670d618500c6362c195f1057eb7de00bd1bc9184fbd5.yml diff --git a/api.md b/api.md index 8267f03967..fa76718148 100644 --- a/api.md +++ b/api.md @@ -70,7 +70,7 @@ Methods: Types: ```python -from openai.types import CreateEmbeddingResponse, Embedding +from openai.types import CreateEmbeddingResponse, Embedding, EmbeddingModel ``` Methods: diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 6d24a1a1f8..3a2763904b 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -18,6 +18,7 @@ from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._base_client import make_request_options +from ..types.embedding_model import EmbeddingModel from ..types.create_embedding_response import CreateEmbeddingResponse __all__ = ["Embeddings", "AsyncEmbeddings"] @@ -47,7 +48,7 @@ def create( self, *, input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], - model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], + model: Union[str, EmbeddingModel], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -137,7 +138,7 @@ async def create( self, *, input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], - model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], + model: Union[str, EmbeddingModel], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index ad9284fbd5..4dbc1b6b7b 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -26,6 +26,7 @@ from .file_deleted import FileDeleted as FileDeleted from .file_purpose import FilePurpose as FilePurpose from .model_deleted import ModelDeleted as ModelDeleted +from .embedding_model import EmbeddingModel as EmbeddingModel from .images_response import ImagesResponse as ImagesResponse from .completion_usage import CompletionUsage as CompletionUsage from .file_list_params import FileListParams as FileListParams diff --git a/src/openai/types/embedding_create_params.py b/src/openai/types/embedding_create_params.py index 930b3b7914..1548cdbd77 100644 --- a/src/openai/types/embedding_create_params.py +++ b/src/openai/types/embedding_create_params.py @@ -5,6 +5,8 @@ from typing import List, Union, Iterable from typing_extensions import Literal, Required, TypedDict +from .embedding_model import EmbeddingModel + __all__ = ["EmbeddingCreateParams"] @@ -20,7 +22,7 @@ class EmbeddingCreateParams(TypedDict, total=False): for counting tokens. """ - model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]] + model: Required[Union[str, EmbeddingModel]] """ID of the model to use. You can use the diff --git a/src/openai/types/embedding_model.py b/src/openai/types/embedding_model.py new file mode 100644 index 0000000000..075ff97644 --- /dev/null +++ b/src/openai/types/embedding_model.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["EmbeddingModel"] + +EmbeddingModel: TypeAlias = Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"] From 3121db4b81db3706cc47b4e19f2fe9d27de18dd0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 11:15:14 +0000 Subject: [PATCH 080/192] chore(internal): update pydantic v1 compat helpers (#1737) --- src/openai/_compat.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/openai/_compat.py b/src/openai/_compat.py index 21fe6941ce..162a6fbe4f 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -136,12 +136,14 @@ def model_dump( exclude: IncEx = None, exclude_unset: bool = False, exclude_defaults: bool = False, + warnings: bool = True, ) -> dict[str, Any]: if PYDANTIC_V2: return model.model_dump( exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, + warnings=warnings, ) return cast( "dict[str, Any]", From e94611551ef30984d10a54574ee44bbc659d5c48 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:50:12 +0000 Subject: [PATCH 081/192] chore(internal): use `typing_extensions.overload` instead of `typing` (#1740) --- src/openai/resources/beta/threads/runs/runs.py | 4 ++-- src/openai/resources/beta/threads/threads.py | 4 ++-- src/openai/resources/chat/completions.py | 4 ++-- src/openai/resources/completions.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index ef0edf0e36..eb6a156e2f 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -2,8 +2,8 @@ from __future__ import annotations -from typing import List, Union, Iterable, Optional, overload -from typing_extensions import Literal +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 3b0e310e4f..8d49dedf56 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -2,8 +2,8 @@ from __future__ import annotations -from typing import Union, Iterable, Optional, overload -from typing_extensions import Literal +from typing import Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index d25a8c4dfb..28676d53eb 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -2,8 +2,8 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional, overload -from typing_extensions import Literal +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 79d150edd8..7f5a3fc4ff 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -2,8 +2,8 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional, overload -from typing_extensions import Literal +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx From c118d98a74713a0fdab2837dea66a01582f52792 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:02:56 +0000 Subject: [PATCH 082/192] fix(audio): correct response_format translations type (#1743) --- .stats.yml | 2 +- api.md | 2 +- src/openai/resources/audio/transcriptions.py | 14 ++++++++------ src/openai/resources/audio/translations.py | 14 ++++++++------ src/openai/types/__init__.py | 1 + .../types/audio/transcription_create_params.py | 7 ++++--- .../types/audio/translation_create_params.py | 7 ++++--- src/openai/types/audio_response_format.py | 7 +++++++ tests/api_resources/audio/test_translations.py | 4 ++-- 9 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 src/openai/types/audio_response_format.py diff --git a/.stats.yml b/.stats.yml index 0151c5a105..e8bca3c6d8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-de1981b64ac229493473670d618500c6362c195f1057eb7de00bd1bc9184fbd5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-073331021d48db6af646a3552ab0c682efe31b7fb4e59a109ed1ba539f9b89c5.yml diff --git a/api.md b/api.md index fa76718148..9e4b62bf51 100644 --- a/api.md +++ b/api.md @@ -113,7 +113,7 @@ Methods: Types: ```python -from openai.types import AudioModel +from openai.types import AudioModel, AudioResponseFormat ``` ## Transcriptions diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index a6009143d4..fd042d1ac3 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -8,6 +8,7 @@ import httpx from ... import _legacy_response +from ...types import AudioResponseFormat from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import ( extract_files, @@ -22,6 +23,7 @@ from ..._base_client import make_request_options from ...types.audio_model import AudioModel from ...types.audio.transcription import Transcription +from ...types.audio_response_format import AudioResponseFormat __all__ = ["Transcriptions", "AsyncTranscriptions"] @@ -53,7 +55,7 @@ def create( model: Union[str, AudioModel], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -83,8 +85,8 @@ def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -160,7 +162,7 @@ async def create( model: Union[str, AudioModel], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -190,8 +192,8 @@ async def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index 7ec647fb6b..fe08dd550e 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -7,6 +7,7 @@ import httpx from ... import _legacy_response +from ...types import AudioResponseFormat from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import ( extract_files, @@ -21,6 +22,7 @@ from ..._base_client import make_request_options from ...types.audio_model import AudioModel from ...types.audio.translation import Translation +from ...types.audio_response_format import AudioResponseFormat __all__ = ["Translations", "AsyncTranslations"] @@ -51,7 +53,7 @@ def create( file: FileTypes, model: Union[str, AudioModel], prompt: str | NotGiven = NOT_GIVEN, - response_format: str | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -75,8 +77,8 @@ def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -143,7 +145,7 @@ async def create( file: FileTypes, model: Union[str, AudioModel], prompt: str | NotGiven = NOT_GIVEN, - response_format: str | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -167,8 +169,8 @@ async def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 4dbc1b6b7b..6223be883d 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -38,6 +38,7 @@ from .batch_create_params import BatchCreateParams as BatchCreateParams from .batch_request_counts import BatchRequestCounts as BatchRequestCounts from .upload_create_params import UploadCreateParams as UploadCreateParams +from .audio_response_format import AudioResponseFormat as AudioResponseFormat from .image_generate_params import ImageGenerateParams as ImageGenerateParams from .upload_complete_params import UploadCompleteParams as UploadCompleteParams from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index a825fefecb..5ac2bb91e5 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -7,6 +7,7 @@ from ..._types import FileTypes from ..audio_model import AudioModel +from ..audio_response_format import AudioResponseFormat __all__ = ["TranscriptionCreateParams"] @@ -41,10 +42,10 @@ class TranscriptionCreateParams(TypedDict, total=False): should match the audio language. """ - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] + response_format: AudioResponseFormat """ - The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. """ temperature: float diff --git a/src/openai/types/audio/translation_create_params.py b/src/openai/types/audio/translation_create_params.py index 054996a134..6859ed9d30 100644 --- a/src/openai/types/audio/translation_create_params.py +++ b/src/openai/types/audio/translation_create_params.py @@ -7,6 +7,7 @@ from ..._types import FileTypes from ..audio_model import AudioModel +from ..audio_response_format import AudioResponseFormat __all__ = ["TranslationCreateParams"] @@ -33,10 +34,10 @@ class TranslationCreateParams(TypedDict, total=False): should be in English. """ - response_format: str + response_format: AudioResponseFormat """ - The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. """ temperature: float diff --git a/src/openai/types/audio_response_format.py b/src/openai/types/audio_response_format.py new file mode 100644 index 0000000000..f8c8d45945 --- /dev/null +++ b/src/openai/types/audio_response_format.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["AudioResponseFormat"] + +AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"] diff --git a/tests/api_resources/audio/test_translations.py b/tests/api_resources/audio/test_translations.py index c6c87c2fef..b048a1af12 100644 --- a/tests/api_resources/audio/test_translations.py +++ b/tests/api_resources/audio/test_translations.py @@ -31,7 +31,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: file=b"raw file contents", model="whisper-1", prompt="prompt", - response_format="response_format", + response_format="json", temperature=0, ) assert_matches_type(Translation, translation, path=["response"]) @@ -80,7 +80,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> file=b"raw file contents", model="whisper-1", prompt="prompt", - response_format="response_format", + response_format="json", temperature=0, ) assert_matches_type(Translation, translation, path=["response"]) From 65701fb21569d820436c039f970e15154568709f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:36:51 +0000 Subject: [PATCH 083/192] feat(client): allow overriding retry count header (#1745) --- src/openai/_base_client.py | 5 +- tests/test_client.py | 130 +++++++++++++++++++++++++++++++++++++ 2 files changed, 134 insertions(+), 1 deletion(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index d632ed5951..c4c9803e74 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -413,7 +413,10 @@ def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0 if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers: headers[idempotency_header] = options.idempotency_key or self._idempotency_key() - headers.setdefault("x-stainless-retry-count", str(retries_taken)) + # Don't set the retry count header if it was already set or removed by the caller. We check + # `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case. + if "x-stainless-retry-count" not in (header.lower() for header in custom_headers): + headers["x-stainless-retry-count"] = str(retries_taken) return headers diff --git a/tests/test_client.py b/tests/test_client.py index a520998965..463174465c 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -790,6 +790,70 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_omit_retry_count_header( + self, client: OpenAI, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="gpt-4o", + extra_headers={"x-stainless-retry-count": Omit()}, + ) + + assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_overwrite_retry_count_header( + self, client: OpenAI, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="gpt-4o", + extra_headers={"x-stainless-retry-count": "42"}, + ) + + assert response.http_request.headers.get("x-stainless-retry-count") == "42" + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @@ -1586,6 +1650,72 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_omit_retry_count_header( + self, async_client: AsyncOpenAI, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="gpt-4o", + extra_headers={"x-stainless-retry-count": Omit()}, + ) + + assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_overwrite_retry_count_header( + self, async_client: AsyncOpenAI, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="gpt-4o", + extra_headers={"x-stainless-retry-count": "42"}, + ) + + assert response.http_request.headers.get("x-stainless-retry-count") == "42" + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) From 763972e9e3ca5f80a86674e7380fb9cc43249bd7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 26 Sep 2024 17:25:28 +0000 Subject: [PATCH 084/192] feat(api): add omni-moderation model (#1750) --- .stats.yml | 2 +- api.md | 9 ++- src/openai/resources/moderations.py | 49 +++++++------ src/openai/types/__init__.py | 3 + src/openai/types/moderation.py | 70 ++++++++++++++++++- src/openai/types/moderation_create_params.py | 26 +++---- .../types/moderation_image_url_input_param.py | 20 ++++++ src/openai/types/moderation_model.py | 4 +- .../moderation_multi_modal_input_param.py | 13 ++++ .../types/moderation_text_input_param.py | 15 ++++ tests/api_resources/test_moderations.py | 4 +- 11 files changed, 172 insertions(+), 43 deletions(-) create mode 100644 src/openai/types/moderation_image_url_input_param.py create mode 100644 src/openai/types/moderation_multi_modal_input_param.py create mode 100644 src/openai/types/moderation_text_input_param.py diff --git a/.stats.yml b/.stats.yml index e8bca3c6d8..0998368a4c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-073331021d48db6af646a3552ab0c682efe31b7fb4e59a109ed1ba539f9b89c5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-17ddd746c775ca4d4fbe64e5621ac30756ef09c061ff6313190b6ec162222d4c.yml diff --git a/api.md b/api.md index 9e4b62bf51..785962f429 100644 --- a/api.md +++ b/api.md @@ -157,7 +157,14 @@ Methods: Types: ```python -from openai.types import Moderation, ModerationModel, ModerationCreateResponse +from openai.types import ( + Moderation, + ModerationImageURLInput, + ModerationModel, + ModerationMultiModalInput, + ModerationTextInput, + ModerationCreateResponse, +) ``` Methods: diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index 5283554373..8b73da57b2 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union +from typing import List, Union, Iterable import httpx @@ -19,6 +19,7 @@ from .._base_client import make_request_options from ..types.moderation_model import ModerationModel from ..types.moderation_create_response import ModerationCreateResponse +from ..types.moderation_multi_modal_input_param import ModerationMultiModalInputParam __all__ = ["Moderations", "AsyncModerations"] @@ -46,7 +47,7 @@ def with_streaming_response(self) -> ModerationsWithStreamingResponse: def create( self, *, - input: Union[str, List[str]], + input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]], model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -55,20 +56,19 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: - """ - Classifies if text is potentially harmful. + """Classifies if text and/or image inputs are potentially harmful. - Args: - input: The input text to classify + Learn more in + the [moderation guide](https://platform.openai.com/docs/guides/moderation). - model: Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. - The default is `text-moderation-latest` which will be automatically upgraded - over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the - model. Accuracy of `text-moderation-stable` may be slightly lower than for - `text-moderation-latest`. + model: The content moderation model you would like to use. Learn more in + [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + learn about available models + [here](https://platform.openai.com/docs/models/moderation). extra_headers: Send extra headers @@ -117,7 +117,7 @@ def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse: async def create( self, *, - input: Union[str, List[str]], + input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]], model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -126,20 +126,19 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: - """ - Classifies if text is potentially harmful. + """Classifies if text and/or image inputs are potentially harmful. - Args: - input: The input text to classify + Learn more in + the [moderation guide](https://platform.openai.com/docs/guides/moderation). - model: Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. - The default is `text-moderation-latest` which will be automatically upgraded - over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the - model. Accuracy of `text-moderation-stable` may be slightly lower than for - `text-moderation-latest`. + model: The content moderation model you would like to use. Learn more in + [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + learn about available models + [here](https://platform.openai.com/docs/models/moderation). extra_headers: Send extra headers diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 6223be883d..7677be01b2 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -46,4 +46,7 @@ from .moderation_create_params import ModerationCreateParams as ModerationCreateParams from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse +from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams +from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam +from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam diff --git a/src/openai/types/moderation.py b/src/openai/types/moderation.py index 5aa691823a..e4ec182ce2 100644 --- a/src/openai/types/moderation.py +++ b/src/openai/types/moderation.py @@ -1,11 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List +from typing_extensions import Literal from pydantic import Field as FieldInfo from .._models import BaseModel -__all__ = ["Moderation", "Categories", "CategoryScores"] +__all__ = ["Moderation", "Categories", "CategoryAppliedInputTypes", "CategoryScores"] class Categories(BaseModel): @@ -36,6 +38,20 @@ class Categories(BaseModel): orientation, disability status, or caste. """ + illicit: bool + """ + Content that includes instructions or advice that facilitate the planning or + execution of wrongdoing, or that gives advice or instruction on how to commit + illicit acts. For example, "how to shoplift" would fit this category. + """ + + illicit_violent: bool = FieldInfo(alias="illicit/violent") + """ + Content that includes instructions or advice that facilitate the planning or + execution of wrongdoing that also includes violence, or that gives advice or + instruction on the procurement of any weapon. + """ + self_harm: bool = FieldInfo(alias="self-harm") """ Content that promotes, encourages, or depicts acts of self-harm, such as @@ -72,6 +88,47 @@ class Categories(BaseModel): """Content that depicts death, violence, or physical injury in graphic detail.""" +class CategoryAppliedInputTypes(BaseModel): + harassment: List[Literal["text"]] + """The applied input type(s) for the category 'harassment'.""" + + harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening") + """The applied input type(s) for the category 'harassment/threatening'.""" + + hate: List[Literal["text"]] + """The applied input type(s) for the category 'hate'.""" + + hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening") + """The applied input type(s) for the category 'hate/threatening'.""" + + illicit: List[Literal["text"]] + """The applied input type(s) for the category 'illicit'.""" + + illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent") + """The applied input type(s) for the category 'illicit/violent'.""" + + self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm") + """The applied input type(s) for the category 'self-harm'.""" + + self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions") + """The applied input type(s) for the category 'self-harm/instructions'.""" + + self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent") + """The applied input type(s) for the category 'self-harm/intent'.""" + + sexual: List[Literal["text", "image"]] + """The applied input type(s) for the category 'sexual'.""" + + sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors") + """The applied input type(s) for the category 'sexual/minors'.""" + + violence: List[Literal["text", "image"]] + """The applied input type(s) for the category 'violence'.""" + + violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic") + """The applied input type(s) for the category 'violence/graphic'.""" + + class CategoryScores(BaseModel): harassment: float """The score for the category 'harassment'.""" @@ -85,6 +142,12 @@ class CategoryScores(BaseModel): hate_threatening: float = FieldInfo(alias="hate/threatening") """The score for the category 'hate/threatening'.""" + illicit: float + """The score for the category 'illicit'.""" + + illicit_violent: float = FieldInfo(alias="illicit/violent") + """The score for the category 'illicit/violent'.""" + self_harm: float = FieldInfo(alias="self-harm") """The score for the category 'self-harm'.""" @@ -111,6 +174,11 @@ class Moderation(BaseModel): categories: Categories """A list of the categories, and whether they are flagged or not.""" + category_applied_input_types: CategoryAppliedInputTypes + """ + A list of the categories along with the input type(s) that the score applies to. + """ + category_scores: CategoryScores """A list of the categories along with their scores as predicted by model.""" diff --git a/src/openai/types/moderation_create_params.py b/src/openai/types/moderation_create_params.py index 337682194d..3193fd9c2d 100644 --- a/src/openai/types/moderation_create_params.py +++ b/src/openai/types/moderation_create_params.py @@ -2,26 +2,28 @@ from __future__ import annotations -from typing import List, Union +from typing import List, Union, Iterable from typing_extensions import Required, TypedDict from .moderation_model import ModerationModel +from .moderation_multi_modal_input_param import ModerationMultiModalInputParam __all__ = ["ModerationCreateParams"] class ModerationCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str]]] - """The input text to classify""" + input: Required[Union[str, List[str], Iterable[ModerationMultiModalInputParam]]] + """Input (or inputs) to classify. - model: Union[str, ModerationModel] + Can be a single string, an array of strings, or an array of multi-modal input + objects similar to other models. """ - Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. - - The default is `text-moderation-latest` which will be automatically upgraded - over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the - model. Accuracy of `text-moderation-stable` may be slightly lower than for - `text-moderation-latest`. + + model: Union[str, ModerationModel] + """The content moderation model you would like to use. + + Learn more in + [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + learn about available models + [here](https://platform.openai.com/docs/models/moderation). """ diff --git a/src/openai/types/moderation_image_url_input_param.py b/src/openai/types/moderation_image_url_input_param.py new file mode 100644 index 0000000000..9a69a6a257 --- /dev/null +++ b/src/openai/types/moderation_image_url_input_param.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ModerationImageURLInputParam", "ImageURL"] + + +class ImageURL(TypedDict, total=False): + url: Required[str] + """Either a URL of the image or the base64 encoded image data.""" + + +class ModerationImageURLInputParam(TypedDict, total=False): + image_url: Required[ImageURL] + """Contains either an image URL or a data URL for a base64 encoded image.""" + + type: Required[Literal["image_url"]] + """Always `image_url`.""" diff --git a/src/openai/types/moderation_model.py b/src/openai/types/moderation_model.py index f549aeeb7a..64954c4547 100644 --- a/src/openai/types/moderation_model.py +++ b/src/openai/types/moderation_model.py @@ -4,4 +4,6 @@ __all__ = ["ModerationModel"] -ModerationModel: TypeAlias = Literal["text-moderation-latest", "text-moderation-stable"] +ModerationModel: TypeAlias = Literal[ + "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable" +] diff --git a/src/openai/types/moderation_multi_modal_input_param.py b/src/openai/types/moderation_multi_modal_input_param.py new file mode 100644 index 0000000000..4314e7b031 --- /dev/null +++ b/src/openai/types/moderation_multi_modal_input_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .moderation_text_input_param import ModerationTextInputParam +from .moderation_image_url_input_param import ModerationImageURLInputParam + +__all__ = ["ModerationMultiModalInputParam"] + +ModerationMultiModalInputParam: TypeAlias = Union[ModerationImageURLInputParam, ModerationTextInputParam] diff --git a/src/openai/types/moderation_text_input_param.py b/src/openai/types/moderation_text_input_param.py new file mode 100644 index 0000000000..e5da53337b --- /dev/null +++ b/src/openai/types/moderation_text_input_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ModerationTextInputParam"] + + +class ModerationTextInputParam(TypedDict, total=False): + text: Required[str] + """A string of text to classify.""" + + type: Required[Literal["text"]] + """Always `text`.""" diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py index 94b9ecd31b..bbdeb63e49 100644 --- a/tests/api_resources/test_moderations.py +++ b/tests/api_resources/test_moderations.py @@ -28,7 +28,7 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: moderation = client.moderations.create( input="I want to kill them.", - model="text-moderation-stable", + model="omni-moderation-2024-09-26", ) assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) @@ -71,7 +71,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: moderation = await async_client.moderations.create( input="I want to kill them.", - model="text-moderation-stable", + model="omni-moderation-2024-09-26", ) assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) From 1f3f47846f0dff3d53c9936808ff112752112189 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Sep 2024 22:58:34 +0000 Subject: [PATCH 085/192] fix(client): correct types for transcriptions / translations (#1757) --- .stats.yml | 2 +- api.md | 14 ++++-- src/openai/resources/audio/transcriptions.py | 46 ++++++++++------- src/openai/resources/audio/translations.py | 46 ++++++++++------- src/openai/types/audio/__init__.py | 6 +++ .../audio/transcription_create_response.py | 11 +++++ .../types/audio/transcription_segment.py | 49 +++++++++++++++++++ .../types/audio/transcription_verbose.py | 26 ++++++++++ src/openai/types/audio/transcription_word.py | 18 +++++++ .../audio/translation_create_response.py | 11 +++++ src/openai/types/audio/translation_verbose.py | 22 +++++++++ .../audio/test_transcriptions.py | 18 +++---- .../api_resources/audio/test_translations.py | 18 +++---- 13 files changed, 228 insertions(+), 59 deletions(-) create mode 100644 src/openai/types/audio/transcription_create_response.py create mode 100644 src/openai/types/audio/transcription_segment.py create mode 100644 src/openai/types/audio/transcription_verbose.py create mode 100644 src/openai/types/audio/transcription_word.py create mode 100644 src/openai/types/audio/translation_create_response.py create mode 100644 src/openai/types/audio/translation_verbose.py diff --git a/.stats.yml b/.stats.yml index 0998368a4c..68789976bf 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-17ddd746c775ca4d4fbe64e5621ac30756ef09c061ff6313190b6ec162222d4c.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-71e58a77027c67e003fdd1b1ac8ac11557d8bfabc7666d1a827c6b1ca8ab98b5.yml diff --git a/api.md b/api.md index 785962f429..3a8a7c595c 100644 --- a/api.md +++ b/api.md @@ -121,24 +121,30 @@ from openai.types import AudioModel, AudioResponseFormat Types: ```python -from openai.types.audio import Transcription +from openai.types.audio import ( + Transcription, + TranscriptionSegment, + TranscriptionVerbose, + TranscriptionWord, + TranscriptionCreateResponse, +) ``` Methods: -- client.audio.transcriptions.create(\*\*params) -> Transcription +- client.audio.transcriptions.create(\*\*params) -> TranscriptionCreateResponse ## Translations Types: ```python -from openai.types.audio import Translation +from openai.types.audio import Translation, TranslationVerbose, TranslationCreateResponse ``` Methods: -- client.audio.translations.create(\*\*params) -> Translation +- client.audio.translations.create(\*\*params) -> TranslationCreateResponse ## Speech diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index fd042d1ac3..ccff507a41 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Mapping, cast +from typing import Any, List, Union, Mapping, cast from typing_extensions import Literal import httpx @@ -22,8 +22,8 @@ from ...types.audio import transcription_create_params from ..._base_client import make_request_options from ...types.audio_model import AudioModel -from ...types.audio.transcription import Transcription from ...types.audio_response_format import AudioResponseFormat +from ...types.audio.transcription_create_response import TranscriptionCreateResponse __all__ = ["Transcriptions", "AsyncTranscriptions"] @@ -64,7 +64,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Transcription: + ) -> TranscriptionCreateResponse: """ Transcribes audio into the input language. @@ -124,14 +124,19 @@ def create( # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/audio/transcriptions", - body=maybe_transform(body, transcription_create_params.TranscriptionCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + return cast( + TranscriptionCreateResponse, + self._post( + "/audio/transcriptions", + body=maybe_transform(body, transcription_create_params.TranscriptionCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, TranscriptionCreateResponse + ), # Union types cannot be passed in as arguments in the type system ), - cast_to=Transcription, ) @@ -171,7 +176,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Transcription: + ) -> TranscriptionCreateResponse: """ Transcribes audio into the input language. @@ -231,14 +236,19 @@ async def create( # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/audio/transcriptions", - body=await async_maybe_transform(body, transcription_create_params.TranscriptionCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + return cast( + TranscriptionCreateResponse, + await self._post( + "/audio/transcriptions", + body=await async_maybe_transform(body, transcription_create_params.TranscriptionCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, TranscriptionCreateResponse + ), # Union types cannot be passed in as arguments in the type system ), - cast_to=Transcription, ) diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index fe08dd550e..27475f1a59 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Mapping, cast +from typing import Any, Union, Mapping, cast import httpx @@ -21,8 +21,8 @@ from ...types.audio import translation_create_params from ..._base_client import make_request_options from ...types.audio_model import AudioModel -from ...types.audio.translation import Translation from ...types.audio_response_format import AudioResponseFormat +from ...types.audio.translation_create_response import TranslationCreateResponse __all__ = ["Translations", "AsyncTranslations"] @@ -61,7 +61,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Translation: + ) -> TranslationCreateResponse: """ Translates audio into English. @@ -108,14 +108,19 @@ def create( # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/audio/translations", - body=maybe_transform(body, translation_create_params.TranslationCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + return cast( + TranslationCreateResponse, + self._post( + "/audio/translations", + body=maybe_transform(body, translation_create_params.TranslationCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, TranslationCreateResponse + ), # Union types cannot be passed in as arguments in the type system ), - cast_to=Translation, ) @@ -153,7 +158,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Translation: + ) -> TranslationCreateResponse: """ Translates audio into English. @@ -200,14 +205,19 @@ async def create( # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/audio/translations", - body=await async_maybe_transform(body, translation_create_params.TranslationCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + return cast( + TranslationCreateResponse, + await self._post( + "/audio/translations", + body=await async_maybe_transform(body, translation_create_params.TranslationCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, TranslationCreateResponse + ), # Union types cannot be passed in as arguments in the type system ), - cast_to=Translation, ) diff --git a/src/openai/types/audio/__init__.py b/src/openai/types/audio/__init__.py index 1de5c0ff82..822e0f3a8d 100644 --- a/src/openai/types/audio/__init__.py +++ b/src/openai/types/audio/__init__.py @@ -5,6 +5,12 @@ from .translation import Translation as Translation from .speech_model import SpeechModel as SpeechModel from .transcription import Transcription as Transcription +from .transcription_word import TranscriptionWord as TranscriptionWord +from .translation_verbose import TranslationVerbose as TranslationVerbose from .speech_create_params import SpeechCreateParams as SpeechCreateParams +from .transcription_segment import TranscriptionSegment as TranscriptionSegment +from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose from .translation_create_params import TranslationCreateParams as TranslationCreateParams from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams +from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse +from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse diff --git a/src/openai/types/audio/transcription_create_response.py b/src/openai/types/audio/transcription_create_response.py new file mode 100644 index 0000000000..2f7bed8114 --- /dev/null +++ b/src/openai/types/audio/transcription_create_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import TypeAlias + +from .transcription import Transcription +from .transcription_verbose import TranscriptionVerbose + +__all__ = ["TranscriptionCreateResponse"] + +TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose] diff --git a/src/openai/types/audio/transcription_segment.py b/src/openai/types/audio/transcription_segment.py new file mode 100644 index 0000000000..522c401ebb --- /dev/null +++ b/src/openai/types/audio/transcription_segment.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["TranscriptionSegment"] + + +class TranscriptionSegment(BaseModel): + id: int + """Unique identifier of the segment.""" + + avg_logprob: float + """Average logprob of the segment. + + If the value is lower than -1, consider the logprobs failed. + """ + + compression_ratio: float + """Compression ratio of the segment. + + If the value is greater than 2.4, consider the compression failed. + """ + + end: float + """End time of the segment in seconds.""" + + no_speech_prob: float + """Probability of no speech in the segment. + + If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this + segment silent. + """ + + seek: int + """Seek offset of the segment.""" + + start: float + """Start time of the segment in seconds.""" + + temperature: float + """Temperature parameter used for generating the segment.""" + + text: str + """Text content of the segment.""" + + tokens: List[int] + """Array of token IDs for the text content.""" diff --git a/src/openai/types/audio/transcription_verbose.py b/src/openai/types/audio/transcription_verbose.py new file mode 100644 index 0000000000..3b18fa4871 --- /dev/null +++ b/src/openai/types/audio/transcription_verbose.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .transcription_word import TranscriptionWord +from .transcription_segment import TranscriptionSegment + +__all__ = ["TranscriptionVerbose"] + + +class TranscriptionVerbose(BaseModel): + duration: str + """The duration of the input audio.""" + + language: str + """The language of the input audio.""" + + text: str + """The transcribed text.""" + + segments: Optional[List[TranscriptionSegment]] = None + """Segments of the transcribed text and their corresponding details.""" + + words: Optional[List[TranscriptionWord]] = None + """Extracted words and their corresponding timestamps.""" diff --git a/src/openai/types/audio/transcription_word.py b/src/openai/types/audio/transcription_word.py new file mode 100644 index 0000000000..55b3c00880 --- /dev/null +++ b/src/openai/types/audio/transcription_word.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + + +from ..._models import BaseModel + +__all__ = ["TranscriptionWord"] + + +class TranscriptionWord(BaseModel): + end: float + """End time of the word in seconds.""" + + start: float + """Start time of the word in seconds.""" + + word: str + """The text content of the word.""" diff --git a/src/openai/types/audio/translation_create_response.py b/src/openai/types/audio/translation_create_response.py new file mode 100644 index 0000000000..9953813c08 --- /dev/null +++ b/src/openai/types/audio/translation_create_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import TypeAlias + +from .translation import Translation +from .translation_verbose import TranslationVerbose + +__all__ = ["TranslationCreateResponse"] + +TranslationCreateResponse: TypeAlias = Union[Translation, TranslationVerbose] diff --git a/src/openai/types/audio/translation_verbose.py b/src/openai/types/audio/translation_verbose.py new file mode 100644 index 0000000000..5901ae7535 --- /dev/null +++ b/src/openai/types/audio/translation_verbose.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .transcription_segment import TranscriptionSegment + +__all__ = ["TranslationVerbose"] + + +class TranslationVerbose(BaseModel): + duration: str + """The duration of the input audio.""" + + language: str + """The language of the output translation (always `english`).""" + + text: str + """The translated text.""" + + segments: Optional[List[TranscriptionSegment]] = None + """Segments of the translated text and their corresponding details.""" diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index a459a34c68..3db013b079 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.audio import Transcription +from openai.types.audio import TranscriptionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") @@ -23,7 +23,7 @@ def test_method_create(self, client: OpenAI) -> None: file=b"raw file contents", model="whisper-1", ) - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: @@ -36,7 +36,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: temperature=0, timestamp_granularities=["word", "segment"], ) - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: @@ -48,7 +48,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = response.parse() - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: @@ -60,7 +60,7 @@ def test_streaming_response_create(self, client: OpenAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = response.parse() - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) assert cast(Any, response.is_closed) is True @@ -74,7 +74,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: file=b"raw file contents", model="whisper-1", ) - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: @@ -87,7 +87,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> temperature=0, timestamp_granularities=["word", "segment"], ) - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @@ -99,7 +99,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = response.parse() - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: @@ -111,6 +111,6 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = await response.parse() - assert_matches_type(Transcription, transcription, path=["response"]) + assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/audio/test_translations.py b/tests/api_resources/audio/test_translations.py index b048a1af12..e12ab7e6c0 100644 --- a/tests/api_resources/audio/test_translations.py +++ b/tests/api_resources/audio/test_translations.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.audio import Translation +from openai.types.audio import TranslationCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") @@ -23,7 +23,7 @@ def test_method_create(self, client: OpenAI) -> None: file=b"raw file contents", model="whisper-1", ) - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: @@ -34,7 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: response_format="json", temperature=0, ) - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: @@ -46,7 +46,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = response.parse() - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: @@ -58,7 +58,7 @@ def test_streaming_response_create(self, client: OpenAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = response.parse() - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) assert cast(Any, response.is_closed) is True @@ -72,7 +72,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: file=b"raw file contents", model="whisper-1", ) - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: @@ -83,7 +83,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> response_format="json", temperature=0, ) - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @@ -95,7 +95,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = response.parse() - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: @@ -107,6 +107,6 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = await response.parse() - assert_matches_type(Translation, translation, path=["response"]) + assert_matches_type(TranslationCreateResponse, translation, path=["response"]) assert cast(Any, response.is_closed) is True From b5e39d936fb26d3afa9d7cbda20d99b58d4b4fa6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 14:51:54 +0000 Subject: [PATCH 086/192] chore(docs): fix maxium typo (#1762) --- .stats.yml | 2 +- src/openai/resources/beta/assistants.py | 8 ++++---- src/openai/resources/beta/threads/messages.py | 8 ++++---- .../resources/beta/threads/runs/runs.py | 16 +++++++-------- src/openai/resources/beta/threads/threads.py | 20 +++++++++---------- .../beta/vector_stores/vector_stores.py | 8 ++++---- src/openai/types/batch.py | 2 +- src/openai/types/beta/assistant.py | 2 +- .../types/beta/assistant_create_params.py | 4 ++-- .../types/beta/assistant_update_params.py | 2 +- src/openai/types/beta/thread.py | 2 +- .../beta/thread_create_and_run_params.py | 8 ++++---- src/openai/types/beta/thread_create_params.py | 6 +++--- src/openai/types/beta/thread_update_params.py | 2 +- src/openai/types/beta/threads/message.py | 2 +- .../beta/threads/message_create_params.py | 2 +- .../beta/threads/message_update_params.py | 2 +- src/openai/types/beta/threads/run.py | 2 +- .../types/beta/threads/run_create_params.py | 4 ++-- .../types/beta/threads/run_update_params.py | 2 +- .../types/beta/threads/runs/run_step.py | 2 +- src/openai/types/beta/vector_store.py | 2 +- .../types/beta/vector_store_create_params.py | 2 +- .../types/beta/vector_store_update_params.py | 2 +- 24 files changed, 56 insertions(+), 56 deletions(-) diff --git a/.stats.yml b/.stats.yml index 68789976bf..67778eef99 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-71e58a77027c67e003fdd1b1ac8ac11557d8bfabc7666d1a827c6b1ca8ab98b5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ad878332083dd506a478a293db78dc9e7b1b2124f2682e1d991225bc5bbcc3b.yml diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 5d8c6ec331..2ebef183b6 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -89,7 +89,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the assistant. The maximum length is 256 characters. @@ -233,7 +233,7 @@ def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: ID of the model to use. You can use the @@ -475,7 +475,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the assistant. The maximum length is 256 characters. @@ -619,7 +619,7 @@ async def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: ID of the model to use. You can use the diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index 0cf7a8d5ea..9e6ae8811a 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -79,7 +79,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -166,7 +166,7 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -353,7 +353,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -440,7 +440,7 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index eb6a156e2f..68efaf1782 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -138,7 +138,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -282,7 +282,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -422,7 +422,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -612,7 +612,7 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -986,7 +986,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1130,7 +1130,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1270,7 +1270,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1460,7 +1460,7 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 8d49dedf56..17f5c6970b 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -102,7 +102,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -188,7 +188,7 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -308,7 +308,7 @@ def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -442,7 +442,7 @@ def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -572,7 +572,7 @@ def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -756,7 +756,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -842,7 +842,7 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -962,7 +962,7 @@ async def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1096,7 +1096,7 @@ async def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1226,7 +1226,7 @@ async def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 06e26852b4..d69add7b26 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -105,7 +105,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. @@ -193,7 +193,7 @@ def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. @@ -383,7 +383,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. @@ -471,7 +471,7 @@ async def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. diff --git a/src/openai/types/batch.py b/src/openai/types/batch.py index 90f6d79572..ac3d7ea119 100644 --- a/src/openai/types/batch.py +++ b/src/openai/types/batch.py @@ -75,7 +75,7 @@ class Batch(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ output_file_id: Optional[str] = None diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index b4da08745d..ea97de440f 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -56,7 +56,7 @@ class Assistant(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: str diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index eca4da0a2b..e11f842f05 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -44,7 +44,7 @@ class AssistantCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: Optional[str] @@ -135,7 +135,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can - be a maxium of 512 characters long. + be a maximum of 512 characters long. """ diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 5396233937..c4598df507 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -26,7 +26,7 @@ class AssistantUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: str diff --git a/src/openai/types/beta/thread.py b/src/openai/types/beta/thread.py index 6f7a6c7d0c..37d50ccb93 100644 --- a/src/openai/types/beta/thread.py +++ b/src/openai/types/beta/thread.py @@ -45,7 +45,7 @@ class Thread(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ object: Literal["thread"] diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 20d525fa1a..64ee6a8710 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -72,7 +72,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: Union[str, ChatModel, None] @@ -202,7 +202,7 @@ class ThreadMessage(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ @@ -235,7 +235,7 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can - be a maxium of 512 characters long. + be a maximum of 512 characters long. """ @@ -275,7 +275,7 @@ class Thread(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ tool_resources: Optional[ThreadToolResources] diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index 729164b481..3ac6c7d69b 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -34,7 +34,7 @@ class ThreadCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ tool_resources: Optional[ToolResources] @@ -83,7 +83,7 @@ class Message(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ @@ -116,7 +116,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can - be a maxium of 512 characters long. + be a maximum of 512 characters long. """ diff --git a/src/openai/types/beta/thread_update_params.py b/src/openai/types/beta/thread_update_params.py index 7210ab77c9..78c5ec4f2e 100644 --- a/src/openai/types/beta/thread_update_params.py +++ b/src/openai/types/beta/thread_update_params.py @@ -14,7 +14,7 @@ class ThreadUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ tool_resources: Optional[ToolResources] diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py index 298a1d4273..63c5c4800a 100644 --- a/src/openai/types/beta/threads/message.py +++ b/src/openai/types/beta/threads/message.py @@ -71,7 +71,7 @@ class Message(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ object: Literal["thread.message"] diff --git a/src/openai/types/beta/threads/message_create_params.py b/src/openai/types/beta/threads/message_create_params.py index 2b450deb5d..2c4edfdf71 100644 --- a/src/openai/types/beta/threads/message_create_params.py +++ b/src/openai/types/beta/threads/message_create_params.py @@ -32,7 +32,7 @@ class MessageCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ diff --git a/src/openai/types/beta/threads/message_update_params.py b/src/openai/types/beta/threads/message_update_params.py index 7000f33122..e8f8cc910c 100644 --- a/src/openai/types/beta/threads/message_update_params.py +++ b/src/openai/types/beta/threads/message_update_params.py @@ -16,5 +16,5 @@ class MessageUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index 5abc1de295..e8f2b74dee 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -138,7 +138,7 @@ class Run(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: str diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 824cb1a041..9767b142e1 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -85,7 +85,7 @@ class RunCreateParamsBase(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: Union[str, ChatModel, None] @@ -204,7 +204,7 @@ class AdditionalMessage(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ diff --git a/src/openai/types/beta/threads/run_update_params.py b/src/openai/types/beta/threads/run_update_params.py index e595eac882..cb4f053645 100644 --- a/src/openai/types/beta/threads/run_update_params.py +++ b/src/openai/types/beta/threads/run_update_params.py @@ -16,5 +16,5 @@ class RunUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ diff --git a/src/openai/types/beta/threads/runs/run_step.py b/src/openai/types/beta/threads/runs/run_step.py index e3163c508b..0445ae360d 100644 --- a/src/openai/types/beta/threads/runs/run_step.py +++ b/src/openai/types/beta/threads/runs/run_step.py @@ -75,7 +75,7 @@ class RunStep(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ object: Literal["thread.run.step"] diff --git a/src/openai/types/beta/vector_store.py b/src/openai/types/beta/vector_store.py index 488961b444..2d3ceea80c 100644 --- a/src/openai/types/beta/vector_store.py +++ b/src/openai/types/beta/vector_store.py @@ -53,7 +53,7 @@ class VectorStore(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: str diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py index a8f03a89b9..4fc7c38927 100644 --- a/src/openai/types/beta/vector_store_create_params.py +++ b/src/openai/types/beta/vector_store_create_params.py @@ -33,7 +33,7 @@ class VectorStoreCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: str diff --git a/src/openai/types/beta/vector_store_update_params.py b/src/openai/types/beta/vector_store_update_params.py index 0f9593e476..ff6c068efb 100644 --- a/src/openai/types/beta/vector_store_update_params.py +++ b/src/openai/types/beta/vector_store_update_params.py @@ -17,7 +17,7 @@ class VectorStoreUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: Optional[str] From 7c04ae7806c87d43eb646ae6f3d12ac4d725ab04 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:52:00 +0000 Subject: [PATCH 087/192] feat(api): support storing chat completions, enabling evals and model distillation in the dashboard (#1766) Learn more at http://openai.com/devday2024 --- .stats.yml | 2 +- src/openai/resources/chat/completions.py | 104 ++++++++++++++++-- .../types/chat/completion_create_params.py | 18 ++- src/openai/types/chat_model.py | 1 + src/openai/types/completion_usage.py | 16 ++- tests/api_resources/chat/test_completions.py | 8 ++ 6 files changed, 134 insertions(+), 15 deletions(-) diff --git a/.stats.yml b/.stats.yml index 67778eef99..ece287351b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ad878332083dd506a478a293db78dc9e7b1b2124f2682e1d991225bc5bbcc3b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-52b934aee6468039ec7f4ce046a282b5fbce114afc708e70f17121df654f71da.yml diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 28676d53eb..eff194d00c 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -64,6 +64,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -71,6 +72,7 @@ def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -90,8 +92,12 @@ def create( Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) @@ -143,6 +149,9 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/completions). + n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @@ -202,6 +211,9 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this completion request for traffic + logging in the [dashboard](https://platform.openai.com/completions). + stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) @@ -269,6 +281,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -276,6 +289,7 @@ def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -294,8 +308,12 @@ def create( Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) @@ -354,6 +372,9 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/completions). + n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @@ -413,6 +434,9 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this completion request for traffic + logging in the [dashboard](https://platform.openai.com/completions). + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -473,6 +497,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -480,6 +505,7 @@ def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -498,8 +524,12 @@ def create( Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) @@ -558,6 +588,9 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/completions). + n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @@ -617,6 +650,9 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this completion request for traffic + logging in the [dashboard](https://platform.openai.com/completions). + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -676,6 +712,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -683,6 +720,7 @@ def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -711,6 +749,7 @@ def create( "logprobs": logprobs, "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, + "metadata": metadata, "n": n, "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, @@ -718,6 +757,7 @@ def create( "seed": seed, "service_tier": service_tier, "stop": stop, + "store": store, "stream": stream, "stream_options": stream_options, "temperature": temperature, @@ -771,6 +811,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -778,6 +819,7 @@ async def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -797,8 +839,12 @@ async def create( Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) @@ -850,6 +896,9 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/completions). + n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @@ -909,6 +958,9 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this completion request for traffic + logging in the [dashboard](https://platform.openai.com/completions). + stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) @@ -976,6 +1028,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -983,6 +1036,7 @@ async def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -1001,8 +1055,12 @@ async def create( Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) @@ -1061,6 +1119,9 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/completions). + n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @@ -1120,6 +1181,9 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this completion request for traffic + logging in the [dashboard](https://platform.openai.com/completions). + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -1180,6 +1244,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -1187,6 +1252,7 @@ async def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -1205,8 +1271,12 @@ async def create( Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) @@ -1265,6 +1335,9 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/completions). + n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @@ -1324,6 +1397,9 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this completion request for traffic + logging in the [dashboard](https://platform.openai.com/completions). + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -1383,6 +1459,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -1390,6 +1467,7 @@ async def create( seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1418,6 +1496,7 @@ async def create( "logprobs": logprobs, "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, + "metadata": metadata, "n": n, "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, @@ -1425,6 +1504,7 @@ async def create( "seed": seed, "service_tier": service_tier, "stop": stop, + "store": store, "stream": stream, "stream_options": stream_options, "temperature": temperature, diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 4ed89b00f5..3f55dfbe6e 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -30,7 +30,11 @@ class CompletionCreateParamsBase(TypedDict, total=False): messages: Required[Iterable[ChatCompletionMessageParam]] """A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + Depending on the [model](https://platform.openai.com/docs/models) you use, + different message types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). """ model: Required[Union[str, ChatModel]] @@ -105,6 +109,12 @@ class CompletionCreateParamsBase(TypedDict, total=False): [o1 series models](https://platform.openai.com/docs/guides/reasoning). """ + metadata: Optional[Dict[str, str]] + """ + Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/completions). + """ + n: Optional[int] """How many chat completion choices to generate for each input message. @@ -183,6 +193,12 @@ class CompletionCreateParamsBase(TypedDict, total=False): stop: Union[Optional[str], List[str]] """Up to 4 sequences where the API will stop generating further tokens.""" + store: Optional[bool] + """ + Whether or not to store the output of this completion request for traffic + logging in the [dashboard](https://platform.openai.com/completions). + """ + stream_options: Optional[ChatCompletionStreamOptionsParam] """Options for streaming response. Only set this when you set `stream: true`.""" diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index f8438c75c8..f2d5674786 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -12,6 +12,7 @@ "gpt-4o", "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-realtime-preview-2024-10-01", "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", diff --git a/src/openai/types/completion_usage.py b/src/openai/types/completion_usage.py index a4b9116e35..fe112833e0 100644 --- a/src/openai/types/completion_usage.py +++ b/src/openai/types/completion_usage.py @@ -4,14 +4,25 @@ from .._models import BaseModel -__all__ = ["CompletionUsage", "CompletionTokensDetails"] +__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"] class CompletionTokensDetails(BaseModel): + audio_tokens: Optional[int] = None + """Audio input tokens generated by the model.""" + reasoning_tokens: Optional[int] = None """Tokens generated by the model for reasoning.""" +class PromptTokensDetails(BaseModel): + audio_tokens: Optional[int] = None + """Audio input tokens present in the prompt.""" + + cached_tokens: Optional[int] = None + """Cached tokens present in the prompt.""" + + class CompletionUsage(BaseModel): completion_tokens: int """Number of tokens in the generated completion.""" @@ -24,3 +35,6 @@ class CompletionUsage(BaseModel): completion_tokens_details: Optional[CompletionTokensDetails] = None """Breakdown of tokens used in a completion.""" + + prompt_tokens_details: Optional[PromptTokensDetails] = None + """Breakdown of tokens used in the prompt.""" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index df7bc799df..d353139543 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -56,6 +56,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: logprobs=True, max_completion_tokens=0, max_tokens=0, + metadata={"foo": "string"}, n=1, parallel_tool_calls=True, presence_penalty=-2, @@ -63,6 +64,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: seed=-9007199254740991, service_tier="auto", stop="string", + store=True, stream=False, stream_options={"include_usage": True}, temperature=1, @@ -177,6 +179,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: logprobs=True, max_completion_tokens=0, max_tokens=0, + metadata={"foo": "string"}, n=1, parallel_tool_calls=True, presence_penalty=-2, @@ -184,6 +187,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: seed=-9007199254740991, service_tier="auto", stop="string", + store=True, stream_options={"include_usage": True}, temperature=1, tool_choice="none", @@ -300,6 +304,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn logprobs=True, max_completion_tokens=0, max_tokens=0, + metadata={"foo": "string"}, n=1, parallel_tool_calls=True, presence_penalty=-2, @@ -307,6 +312,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn seed=-9007199254740991, service_tier="auto", stop="string", + store=True, stream=False, stream_options={"include_usage": True}, temperature=1, @@ -421,6 +427,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn logprobs=True, max_completion_tokens=0, max_tokens=0, + metadata={"foo": "string"}, n=1, parallel_tool_calls=True, presence_penalty=-2, @@ -428,6 +435,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn seed=-9007199254740991, service_tier="auto", stop="string", + store=True, stream_options={"include_usage": True}, temperature=1, tool_choice="none", From 5d92191dd55f506d5b2ed5365c66df450b4f275d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 19:35:25 +0000 Subject: [PATCH 088/192] docs: improve and reference contributing documentation (#1767) --- CONTRIBUTING.md | 44 ++++++++++++++++++++++++-------------------- README.md | 4 ++++ 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 176b8ffc2d..37e060bdcf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,9 +2,13 @@ ### With Rye -We use [Rye](https://rye.astral.sh/) to manage dependencies so we highly recommend [installing it](https://rye.astral.sh/guide/installation/) as it will automatically provision a Python environment with the expected Python version. +We use [Rye](https://rye.astral.sh/) to manage dependencies because it will automatically provision a Python environment with the expected Python version. To set it up, run: -After installing Rye, you'll just have to run this command: +```sh +$ ./scripts/bootstrap +``` + +Or [install Rye manually](https://rye.astral.sh/guide/installation/) and run: ```sh $ rye sync --all-features @@ -39,17 +43,17 @@ modify the contents of the `src/openai/lib/` and `examples/` directories. All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. -```bash +```ts # add an example to examples/.py #!/usr/bin/env -S rye run python … ``` -``` -chmod +x examples/.py +```sh +$ chmod +x examples/.py # run the example against your api -./examples/.py +$ ./examples/.py ``` ## Using the repository from source @@ -58,8 +62,8 @@ If you’d like to use the repository from source, you can either install from g To install via git: -```bash -pip install git+ssh://git@github.com/openai/openai-python.git +```sh +$ pip install git+ssh://git@github.com/openai/openai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -68,29 +72,29 @@ Building this package will create two files in the `dist/` directory, a `.tar.gz To create a distributable version of the library, all you have to do is run this command: -```bash -rye build +```sh +$ rye build # or -python -m build +$ python -m build ``` Then to install: ```sh -pip install ./path-to-wheel-file.whl +$ pip install ./path-to-wheel-file.whl ``` ## Running tests Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. -```bash +```sh # you will need npm installed -npx prism mock path/to/your/openapi.yml +$ npx prism mock path/to/your/openapi.yml ``` -```bash -rye run pytest +```sh +$ ./scripts/test ``` ## Linting and formatting @@ -100,14 +104,14 @@ This repository uses [ruff](https://github.com/astral-sh/ruff) and To lint: -```bash -rye run lint +```sh +$ ./scripts/lint ``` To format and fix all ruff issues automatically: -```bash -rye run format +```sh +$ ./scripts/format ``` ## Publishing and releases diff --git a/README.md b/README.md index a47b1a2d7c..8873dff24a 100644 --- a/README.md +++ b/README.md @@ -521,3 +521,7 @@ print(openai.__version__) ## Requirements Python 3.7 or higher. + +## Contributing + +See [the contributing documentation](./CONTRIBUTING.md). From e446e75abf92132468ee3ac5495bb5a3107ae91a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 20:04:38 +0000 Subject: [PATCH 089/192] docs: fix typo in fenced code block language (#1769) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 37e060bdcf..52c2eb213a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,7 +43,7 @@ modify the contents of the `src/openai/lib/` and `examples/` directories. All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. -```ts +```py # add an example to examples/.py #!/usr/bin/env -S rye run python From 10dd8980d03d1c373e6218dc5f43d48f47bee7b2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:39:22 +0000 Subject: [PATCH 090/192] chore(internal): add support for parsing bool response content (#1774) --- src/openai/_legacy_response.py | 3 ++ src/openai/_response.py | 3 ++ tests/test_legacy_response.py | 25 +++++++++++++++++ tests/test_response.py | 50 ++++++++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+) diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index c42fb8b83e..83a76fe448 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -255,6 +255,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == float: return cast(R, float(response.text)) + if cast_to == bool: + return cast(R, response.text.lower() == "true") + origin = get_origin(cast_to) or cast_to if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent): diff --git a/src/openai/_response.py b/src/openai/_response.py index f9d91786f6..2c23edf00b 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -192,6 +192,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == float: return cast(R, float(response.text)) + if cast_to == bool: + return cast(R, response.text.lower() == "true") + origin = get_origin(cast_to) or cast_to # handle the legacy binary response case diff --git a/tests/test_legacy_response.py b/tests/test_legacy_response.py index 3c2df53e58..4f24ce187d 100644 --- a/tests/test_legacy_response.py +++ b/tests/test_legacy_response.py @@ -32,6 +32,31 @@ def test_response_parse_mismatched_basemodel(client: OpenAI) -> None: response.parse(to=PydanticModel) +@pytest.mark.parametrize( + "content, expected", + [ + ("false", False), + ("true", True), + ("False", False), + ("True", True), + ("TrUe", True), + ("FalSe", False), + ], +) +def test_response_parse_bool(client: OpenAI, content: str, expected: bool) -> None: + response = LegacyAPIResponse( + raw=httpx.Response(200, content=content), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + result = response.parse(to=bool) + assert result is expected + + def test_response_parse_custom_stream(client: OpenAI) -> None: response = LegacyAPIResponse( raw=httpx.Response(200, content=b"foo"), diff --git a/tests/test_response.py b/tests/test_response.py index b7d88bdbde..d022306440 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -190,6 +190,56 @@ async def test_async_response_parse_annotated_type(async_client: AsyncOpenAI) -> assert obj.bar == 2 +@pytest.mark.parametrize( + "content, expected", + [ + ("false", False), + ("true", True), + ("False", False), + ("True", True), + ("TrUe", True), + ("FalSe", False), + ], +) +def test_response_parse_bool(client: OpenAI, content: str, expected: bool) -> None: + response = APIResponse( + raw=httpx.Response(200, content=content), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + result = response.parse(to=bool) + assert result is expected + + +@pytest.mark.parametrize( + "content, expected", + [ + ("false", False), + ("true", True), + ("False", False), + ("True", True), + ("TrUe", True), + ("FalSe", False), + ], +) +async def test_async_response_parse_bool(client: AsyncOpenAI, content: str, expected: bool) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=content), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + result = await response.parse(to=bool) + assert result is expected + + class OtherModel(BaseModel): a: str From 51e7ebb1a662aedc613cf26eca415d43fedba846 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 13:52:45 +0000 Subject: [PATCH 091/192] fix(client): avoid OverflowError with very large retry counts (#1779) --- src/openai/_base_client.py | 3 ++- tests/test_client.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index c4c9803e74..931cbd8534 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -690,7 +690,8 @@ def _calculate_retry_timeout( if retry_after is not None and 0 < retry_after <= 60: return retry_after - nb_retries = max_retries - remaining_retries + # Also cap retry count to 1000 to avoid any potential overflows with `pow` + nb_retries = min(max_retries - remaining_retries, 1000) # Apply exponential backoff, but not more than the max. sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY) diff --git a/tests/test_client.py b/tests/test_client.py index 463174465c..1da35ddd22 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -697,6 +697,7 @@ class Model(BaseModel): [3, "", 0.5], [2, "", 0.5 * 2.0], [1, "", 0.5 * 4.0], + [-1100, "", 7.8], # test large number potentially overflowing ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @@ -1553,6 +1554,7 @@ class Model(BaseModel): [3, "", 0.5], [2, "", 0.5 * 2.0], [1, "", 0.5 * 4.0], + [-1100, "", 7.8], # test large number potentially overflowing ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) From a929e0d6a8806e68b25a34be3a96873b02ce9565 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:43:09 +0000 Subject: [PATCH 092/192] chore: add repr to PageInfo class (#1780) --- src/openai/_base_client.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 931cbd8534..b2929df072 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -144,6 +144,12 @@ def __init__( self.url = url self.params = params + @override + def __repr__(self) -> str: + if self.url: + return f"{self.__class__.__name__}(url={self.url})" + return f"{self.__class__.__name__}(params={self.params})" + class BasePage(GenericModel, Generic[_T]): """ From 9f4f12b02596d2b331a4adbb2713903efd678759 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 16:49:46 +0000 Subject: [PATCH 093/192] feat(api): add gpt-4o-audio-preview model for chat completions (#1796) This enables audio inputs and outputs. https://platform.openai.com/docs/guides/audio --- .stats.yml | 2 +- api.md | 4 + src/openai/resources/chat/completions.py | 207 +++++++++++++++--- .../types/beta/assistant_stream_event.py | 5 +- src/openai/types/chat/__init__.py | 6 + ...chat_completion_assistant_message_param.py | 14 +- .../types/chat/chat_completion_audio.py | 27 +++ .../types/chat/chat_completion_audio_param.py | 21 ++ ...mpletion_content_part_input_audio_param.py | 22 ++ .../chat_completion_content_part_param.py | 3 +- .../types/chat/chat_completion_message.py | 8 + .../types/chat/chat_completion_modality.py | 7 + .../types/chat/completion_create_params.py | 30 ++- src/openai/types/chat_model.py | 3 + tests/api_resources/chat/test_completions.py | 20 ++ 15 files changed, 341 insertions(+), 38 deletions(-) create mode 100644 src/openai/types/chat/chat_completion_audio.py create mode 100644 src/openai/types/chat/chat_completion_audio_param.py create mode 100644 src/openai/types/chat/chat_completion_content_part_input_audio_param.py create mode 100644 src/openai/types/chat/chat_completion_modality.py diff --git a/.stats.yml b/.stats.yml index ece287351b..984e8a8d5f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-52b934aee6468039ec7f4ce046a282b5fbce114afc708e70f17121df654f71da.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8729aaa35436531ab453224af10e67f89677db8f350f0346bb3537489edea649.yml diff --git a/api.md b/api.md index 3a8a7c595c..4600adf77a 100644 --- a/api.md +++ b/api.md @@ -39,9 +39,12 @@ Types: from openai.types.chat import ( ChatCompletion, ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, ChatCompletionContentPartRefusal, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, @@ -49,6 +52,7 @@ from openai.types.chat import ( ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, + ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionRole, ChatCompletionStreamOptions, diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index eff194d00c..03919aab2f 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -18,12 +18,17 @@ from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..._streaming import Stream, AsyncStream -from ...types.chat import completion_create_params +from ...types.chat import ( + ChatCompletionAudioParam, + completion_create_params, +) from ..._base_client import make_request_options from ...types.chat_model import ChatModel from ...types.chat.chat_completion import ChatCompletion from ...types.chat.chat_completion_chunk import ChatCompletionChunk +from ...types.chat.chat_completion_modality import ChatCompletionModality from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam +from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ...types.chat.chat_completion_message_param import ChatCompletionMessageParam from ...types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam from ...types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam @@ -57,6 +62,7 @@ def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -65,6 +71,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -88,8 +95,12 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. Args: messages: A list of messages comprising the conversation so far. Depending on the @@ -103,6 +114,10 @@ def create( [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -150,7 +165,18 @@ def create( [o1 series models](https://platform.openai.com/docs/guides/reasoning). metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/completions). + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -211,8 +237,9 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. - store: Whether or not to store the output of this completion request for traffic - logging in the [dashboard](https://platform.openai.com/completions). + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only @@ -274,6 +301,7 @@ def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: Literal[True], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -282,6 +310,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -304,8 +333,12 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. Args: messages: A list of messages comprising the conversation so far. Depending on the @@ -326,6 +359,10 @@ def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -373,7 +410,18 @@ def create( [o1 series models](https://platform.openai.com/docs/guides/reasoning). metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/completions). + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -434,8 +482,9 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. - store: Whether or not to store the output of this completion request for traffic - logging in the [dashboard](https://platform.openai.com/completions). + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -490,6 +539,7 @@ def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: bool, + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -498,6 +548,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -520,8 +571,12 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. Args: messages: A list of messages comprising the conversation so far. Depending on the @@ -542,6 +597,10 @@ def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -589,7 +648,18 @@ def create( [o1 series models](https://platform.openai.com/docs/guides/reasoning). metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/completions). + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -650,8 +720,9 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. - store: Whether or not to store the output of this completion request for traffic - logging in the [dashboard](https://platform.openai.com/completions). + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -705,6 +776,7 @@ def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -713,6 +785,7 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -742,6 +815,7 @@ def create( { "messages": messages, "model": model, + "audio": audio, "frequency_penalty": frequency_penalty, "function_call": function_call, "functions": functions, @@ -750,6 +824,7 @@ def create( "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, "metadata": metadata, + "modalities": modalities, "n": n, "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, @@ -804,6 +879,7 @@ async def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -812,6 +888,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -835,8 +912,12 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. Args: messages: A list of messages comprising the conversation so far. Depending on the @@ -850,6 +931,10 @@ async def create( [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -897,7 +982,18 @@ async def create( [o1 series models](https://platform.openai.com/docs/guides/reasoning). metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/completions). + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -958,8 +1054,9 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. - store: Whether or not to store the output of this completion request for traffic - logging in the [dashboard](https://platform.openai.com/completions). + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only @@ -1021,6 +1118,7 @@ async def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: Literal[True], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -1029,6 +1127,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -1051,8 +1150,12 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. Args: messages: A list of messages comprising the conversation so far. Depending on the @@ -1073,6 +1176,10 @@ async def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -1120,7 +1227,18 @@ async def create( [o1 series models](https://platform.openai.com/docs/guides/reasoning). metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/completions). + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -1181,8 +1299,9 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. - store: Whether or not to store the output of this completion request for traffic - logging in the [dashboard](https://platform.openai.com/completions). + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -1237,6 +1356,7 @@ async def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: bool, + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -1245,6 +1365,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -1267,8 +1388,12 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. Args: messages: A list of messages comprising the conversation so far. Depending on the @@ -1289,6 +1414,10 @@ async def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -1336,7 +1465,18 @@ async def create( [o1 series models](https://platform.openai.com/docs/guides/reasoning). metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/completions). + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the @@ -1397,8 +1537,9 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. - store: Whether or not to store the output of this completion request for traffic - logging in the [dashboard](https://platform.openai.com/completions). + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -1452,6 +1593,7 @@ async def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, @@ -1460,6 +1602,7 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -1489,6 +1632,7 @@ async def create( { "messages": messages, "model": model, + "audio": audio, "frequency_penalty": frequency_penalty, "function_call": function_call, "functions": functions, @@ -1497,6 +1641,7 @@ async def create( "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, "metadata": metadata, + "modalities": modalities, "n": n, "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, diff --git a/src/openai/types/beta/assistant_stream_event.py b/src/openai/types/beta/assistant_stream_event.py index f1d8898ff2..41d3a0c5ea 100644 --- a/src/openai/types/beta/assistant_stream_event.py +++ b/src/openai/types/beta/assistant_stream_event.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union +from typing import Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from .thread import Thread @@ -51,6 +51,9 @@ class ThreadCreated(BaseModel): event: Literal["thread.created"] + enabled: Optional[bool] = None + """Whether to enable input audio transcription.""" + class ThreadRunCreated(BaseModel): data: Run diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index df3b48149c..b85365ecb1 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -4,10 +4,13 @@ from .chat_completion import ChatCompletion as ChatCompletion from .chat_completion_role import ChatCompletionRole as ChatCompletionRole +from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage +from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam +from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall @@ -43,3 +46,6 @@ from .chat_completion_function_call_option_param import ( ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, ) +from .chat_completion_content_part_input_audio_param import ( + ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam, +) diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 2429d41d33..35e3a3d784 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -9,7 +9,13 @@ from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam -__all__ = ["ChatCompletionAssistantMessageParam", "ContentArrayOfContentPart", "FunctionCall"] +__all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"] + + +class Audio(TypedDict, total=False): + id: Required[str] + """Unique identifier for a previous audio response from the model.""" + ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam] @@ -31,6 +37,12 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" + audio: Optional[Audio] + """Data about a previous audio response from the model. + + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + content: Union[str, Iterable[ContentArrayOfContentPart], None] """The contents of the assistant message. diff --git a/src/openai/types/chat/chat_completion_audio.py b/src/openai/types/chat/chat_completion_audio.py new file mode 100644 index 0000000000..135ee8845c --- /dev/null +++ b/src/openai/types/chat/chat_completion_audio.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + + +from ..._models import BaseModel + +__all__ = ["ChatCompletionAudio"] + + +class ChatCompletionAudio(BaseModel): + id: str + """Unique identifier for this audio response.""" + + data: str + """ + Base64 encoded audio bytes generated by the model, in the format specified in + the request. + """ + + expires_at: int + """ + The Unix timestamp (in seconds) for when this audio response will no longer be + accessible on the server for use in multi-turn conversations. + """ + + transcript: str + """Transcript of the audio generated by the model.""" diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py new file mode 100644 index 0000000000..6a4ce9ac1f --- /dev/null +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionAudioParam"] + + +class ChatCompletionAudioParam(TypedDict, total=False): + format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]] + """Specifies the output audio format. + + Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. + """ + + voice: Required[Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]] + """Specifies the voice type. + + Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. + """ diff --git a/src/openai/types/chat/chat_completion_content_part_input_audio_param.py b/src/openai/types/chat/chat_completion_content_part_input_audio_param.py new file mode 100644 index 0000000000..0b1b1a80b1 --- /dev/null +++ b/src/openai/types/chat/chat_completion_content_part_input_audio_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"] + + +class InputAudio(TypedDict, total=False): + data: Required[str] + """Base64 encoded audio data.""" + + format: Required[Literal["wav", "mp3"]] + """The format of the encoded audio data. Currently supports "wav" and "mp3".""" + + +class ChatCompletionContentPartInputAudioParam(TypedDict, total=False): + input_audio: Required[InputAudio] + + type: Required[Literal["input_audio"]] + """The type of the content part. Always `input_audio`.""" diff --git a/src/openai/types/chat/chat_completion_content_part_param.py b/src/openai/types/chat/chat_completion_content_part_param.py index e0c6e480f2..682d11f4c7 100644 --- a/src/openai/types/chat/chat_completion_content_part_param.py +++ b/src/openai/types/chat/chat_completion_content_part_param.py @@ -7,9 +7,10 @@ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam +from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam __all__ = ["ChatCompletionContentPartParam"] ChatCompletionContentPartParam: TypeAlias = Union[ - ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam + ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam ] diff --git a/src/openai/types/chat/chat_completion_message.py b/src/openai/types/chat/chat_completion_message.py index 492bb68c85..704fa5d5d1 100644 --- a/src/openai/types/chat/chat_completion_message.py +++ b/src/openai/types/chat/chat_completion_message.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from .chat_completion_audio import ChatCompletionAudio from .chat_completion_message_tool_call import ChatCompletionMessageToolCall __all__ = ["ChatCompletionMessage", "FunctionCall"] @@ -32,6 +33,13 @@ class ChatCompletionMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" + audio: Optional[ChatCompletionAudio] = None + """ + If the audio output modality is requested, this object contains data about the + audio response from the model. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + function_call: Optional[FunctionCall] = None """Deprecated and replaced by `tool_calls`. diff --git a/src/openai/types/chat/chat_completion_modality.py b/src/openai/types/chat/chat_completion_modality.py new file mode 100644 index 0000000000..8e3c145979 --- /dev/null +++ b/src/openai/types/chat/chat_completion_modality.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatCompletionModality"] + +ChatCompletionModality: TypeAlias = Literal["text", "audio"] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 3f55dfbe6e..af6a47c219 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -6,7 +6,9 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..chat_model import ChatModel +from .chat_completion_modality import ChatCompletionModality from .chat_completion_tool_param import ChatCompletionToolParam +from .chat_completion_audio_param import ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam from ..shared_params.function_parameters import FunctionParameters from ..shared_params.response_format_text import ResponseFormatText @@ -45,6 +47,13 @@ class CompletionCreateParamsBase(TypedDict, total=False): table for details on which models work with the Chat API. """ + audio: Optional[ChatCompletionAudioParam] + """Parameters for audio output. + + Required when audio output is requested with `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + frequency_penalty: Optional[float] """Number between -2.0 and 2.0. @@ -112,7 +121,21 @@ class CompletionCreateParamsBase(TypedDict, total=False): metadata: Optional[Dict[str, str]] """ Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/completions). + [dashboard](https://platform.openai.com/chat-completions). + """ + + modalities: Optional[List[ChatCompletionModality]] + """ + Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` """ n: Optional[int] @@ -195,8 +218,9 @@ class CompletionCreateParamsBase(TypedDict, total=False): store: Optional[bool] """ - Whether or not to store the output of this completion request for traffic - logging in the [dashboard](https://platform.openai.com/completions). + Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. """ stream_options: Optional[ChatCompletionStreamOptionsParam] diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index f2d5674786..b801aa0914 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -12,7 +12,10 @@ "gpt-4o", "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index d353139543..a341e78f7e 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -43,6 +43,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: } ], model="gpt-4o", + audio={ + "format": "wav", + "voice": "alloy", + }, frequency_penalty=-2, function_call="none", functions=[ @@ -57,6 +61,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, + modalities=["text", "audio"], n=1, parallel_tool_calls=True, presence_penalty=-2, @@ -166,6 +171,10 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: ], model="gpt-4o", stream=True, + audio={ + "format": "wav", + "voice": "alloy", + }, frequency_penalty=-2, function_call="none", functions=[ @@ -180,6 +189,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, + modalities=["text", "audio"], n=1, parallel_tool_calls=True, presence_penalty=-2, @@ -291,6 +301,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn } ], model="gpt-4o", + audio={ + "format": "wav", + "voice": "alloy", + }, frequency_penalty=-2, function_call="none", functions=[ @@ -305,6 +319,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, + modalities=["text", "audio"], n=1, parallel_tool_calls=True, presence_penalty=-2, @@ -414,6 +429,10 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ], model="gpt-4o", stream=True, + audio={ + "format": "wav", + "voice": "alloy", + }, frequency_penalty=-2, function_call="none", functions=[ @@ -428,6 +447,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, + modalities=["text", "audio"], n=1, parallel_tool_calls=True, presence_penalty=-2, From b0648e7555dd21d17645a7b9f9468a32f2fb42cc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 17:40:28 +0000 Subject: [PATCH 094/192] chore(internal): update test syntax (#1798) --- tests/test_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models.py b/tests/test_models.py index b703444248..117a90020e 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -245,7 +245,7 @@ class Model(BaseModel): assert m.foo is True m = Model.construct(foo="CARD_HOLDER") - assert m.foo is "CARD_HOLDER" + assert m.foo == "CARD_HOLDER" m = Model.construct(foo={"bar": False}) assert isinstance(m.foo, Submodel1) From 178b80d4b80aafcaf506a3e8046afc08576d0693 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 18:12:59 +0000 Subject: [PATCH 095/192] chore(internal): bump ruff dependency (#1801) --- pyproject.toml | 3 ++- requirements-dev.lock | 2 +- src/openai/types/audio/transcription.py | 1 - src/openai/types/audio/transcription_word.py | 1 - src/openai/types/audio/translation.py | 1 - src/openai/types/batch_request_counts.py | 1 - src/openai/types/beta/assistant_tool_choice_function.py | 1 - src/openai/types/beta/static_file_chunking_strategy.py | 1 - src/openai/types/chat/chat_completion_audio.py | 1 - src/openai/types/fine_tuning/fine_tuning_job_integration.py | 1 - src/openai/types/model_deleted.py | 1 - 11 files changed, 3 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ab8cf5cf38..c4465c00ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,11 +63,12 @@ format = { chain = [ "format:ruff", "format:docs", "fix:ruff", + # run formatting again to fix any inconsistencies when imports are stripped + "format:ruff", ]} "format:black" = "black ." "format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md" "format:ruff" = "ruff format" -"format:isort" = "isort ." "lint" = { chain = [ "check:ruff", diff --git a/requirements-dev.lock b/requirements-dev.lock index e464a89bfb..d5344f1c5b 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -80,7 +80,7 @@ pytz==2023.3.post1 # via dirty-equals respx==0.20.2 rich==13.7.1 -ruff==0.6.5 +ruff==0.6.9 setuptools==68.2.2 # via nodeenv six==1.16.0 diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py index 0b6ab39e78..edb5f227fc 100644 --- a/src/openai/types/audio/transcription.py +++ b/src/openai/types/audio/transcription.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["Transcription"] diff --git a/src/openai/types/audio/transcription_word.py b/src/openai/types/audio/transcription_word.py index 55b3c00880..969da32509 100644 --- a/src/openai/types/audio/transcription_word.py +++ b/src/openai/types/audio/transcription_word.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["TranscriptionWord"] diff --git a/src/openai/types/audio/translation.py b/src/openai/types/audio/translation.py index 3d9ede2939..7c0e905189 100644 --- a/src/openai/types/audio/translation.py +++ b/src/openai/types/audio/translation.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["Translation"] diff --git a/src/openai/types/batch_request_counts.py b/src/openai/types/batch_request_counts.py index ef6c84a0a1..7e1d49fb88 100644 --- a/src/openai/types/batch_request_counts.py +++ b/src/openai/types/batch_request_counts.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from .._models import BaseModel __all__ = ["BatchRequestCounts"] diff --git a/src/openai/types/beta/assistant_tool_choice_function.py b/src/openai/types/beta/assistant_tool_choice_function.py index d0d4255357..0c896d8087 100644 --- a/src/openai/types/beta/assistant_tool_choice_function.py +++ b/src/openai/types/beta/assistant_tool_choice_function.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["AssistantToolChoiceFunction"] diff --git a/src/openai/types/beta/static_file_chunking_strategy.py b/src/openai/types/beta/static_file_chunking_strategy.py index ba80e1a2b9..6080093517 100644 --- a/src/openai/types/beta/static_file_chunking_strategy.py +++ b/src/openai/types/beta/static_file_chunking_strategy.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["StaticFileChunkingStrategy"] diff --git a/src/openai/types/chat/chat_completion_audio.py b/src/openai/types/chat/chat_completion_audio.py index 135ee8845c..dd15508ebb 100644 --- a/src/openai/types/chat/chat_completion_audio.py +++ b/src/openai/types/chat/chat_completion_audio.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from ..._models import BaseModel __all__ = ["ChatCompletionAudio"] diff --git a/src/openai/types/fine_tuning/fine_tuning_job_integration.py b/src/openai/types/fine_tuning/fine_tuning_job_integration.py index 4904b85c11..8ac55a0b44 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job_integration.py +++ b/src/openai/types/fine_tuning/fine_tuning_job_integration.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject __all__ = ["FineTuningJobIntegration"] diff --git a/src/openai/types/model_deleted.py b/src/openai/types/model_deleted.py index d9a48bb1b5..7f81e1b380 100644 --- a/src/openai/types/model_deleted.py +++ b/src/openai/types/model_deleted.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from .._models import BaseModel __all__ = ["ModelDeleted"] From 13200876f6635286dcb4943e990cc5e279545787 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2024 07:23:51 +0000 Subject: [PATCH 096/192] chore(tests): add more retry tests (#1806) --- src/openai/_base_client.py | 2 +- tests/test_client.py | 21 +++++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index b2929df072..e1d4849ae2 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -1591,7 +1591,7 @@ async def _request( except Exception as err: log.debug("Encountered Exception", exc_info=True) - if retries_taken > 0: + if remaining_retries > 0: return await self._retry_request( input_options, cast_to, diff --git a/tests/test_client.py b/tests/test_client.py index 1da35ddd22..ff07ec393b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -10,6 +10,7 @@ import tracemalloc from typing import Any, Union, cast from unittest import mock +from typing_extensions import Literal import httpx import pytest @@ -764,7 +765,14 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> Non @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retries_taken(self, client: OpenAI, failures_before_success: int, respx_mock: MockRouter) -> None: + @pytest.mark.parametrize("failure_mode", ["status", "exception"]) + def test_retries_taken( + self, + client: OpenAI, + failures_before_success: int, + failure_mode: Literal["status", "exception"], + respx_mock: MockRouter, + ) -> None: client = client.with_options(max_retries=4) nb_retries = 0 @@ -773,6 +781,8 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: nonlocal nb_retries if nb_retries < failures_before_success: nb_retries += 1 + if failure_mode == "exception": + raise RuntimeError("oops") return httpx.Response(500) return httpx.Response(200) @@ -1623,8 +1633,13 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio + @pytest.mark.parametrize("failure_mode", ["status", "exception"]) async def test_retries_taken( - self, async_client: AsyncOpenAI, failures_before_success: int, respx_mock: MockRouter + self, + async_client: AsyncOpenAI, + failures_before_success: int, + failure_mode: Literal["status", "exception"], + respx_mock: MockRouter, ) -> None: client = async_client.with_options(max_retries=4) @@ -1634,6 +1649,8 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: nonlocal nb_retries if nb_retries < failures_before_success: nb_retries += 1 + if failure_mode == "exception": + raise RuntimeError("oops") return httpx.Response(500) return httpx.Response(200) From c3f53e98715161b8d0e925f862de14ad3e8d19dd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2024 11:54:15 +0000 Subject: [PATCH 097/192] chore(internal): remove unused black config (#1807) --- pyproject.toml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c4465c00ff..affa761971 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,6 @@ format = { chain = [ # run formatting again to fix any inconsistencies when imports are stripped "format:ruff", ]} -"format:black" = "black ." "format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md" "format:ruff" = "ruff format" @@ -126,10 +125,6 @@ path = "README.md" pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' replacement = '[\1](https://github.com/openai/openai-python/tree/main/\g<2>)' -[tool.black] -line-length = 120 -target-version = ["py37"] - [tool.pytest.ini_options] testpaths = ["tests"] addopts = "--tb=short" From 2ed3f4c63cc0b2cc4e64a6212fa900e4518de610 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 18:24:19 +0000 Subject: [PATCH 098/192] chore(internal): update spec version (#1810) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 984e8a8d5f..e1a430e50a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8729aaa35436531ab453224af10e67f89677db8f350f0346bb3537489edea649.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-f9320ebf347140052c7f8b0bc5c7db24f5e367c368c8cb34c3606af4e2b6591b.yml From c403ef384fb4989fac4fb550e36140c5bedf1d94 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:57:02 +0000 Subject: [PATCH 099/192] chore(internal): update spec version (#1816) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index e1a430e50a..0b08725565 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-f9320ebf347140052c7f8b0bc5c7db24f5e367c368c8cb34c3606af4e2b6591b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b60d5559d5150ecd3b49136064e5e251d832899770ff385b711378389afba370.yml From f1928897cac97eab4cc73bb085ecb9734edb6223 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:01:39 +0000 Subject: [PATCH 100/192] chore(internal): bump pytest to v8 & pydantic (#1829) --- requirements-dev.lock | 21 +++++++++------------ requirements.lock | 8 ++++---- src/openai/_compat.py | 2 +- src/openai/_models.py | 10 +++++----- src/openai/_types.py | 6 ++++-- tests/conftest.py | 14 ++++++++------ 6 files changed, 31 insertions(+), 30 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index d5344f1c5b..d088464204 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -16,8 +16,6 @@ anyio==4.4.0 # via openai argcomplete==3.1.2 # via nox -attrs==23.1.0 - # via pytest certifi==2023.7.22 # via httpcore # via httpx @@ -28,8 +26,9 @@ distlib==0.3.7 # via virtualenv distro==1.8.0 # via openai -exceptiongroup==1.1.3 +exceptiongroup==1.2.2 # via anyio + # via pytest filelock==3.12.4 # via virtualenv h11==0.14.0 @@ -60,20 +59,18 @@ packaging==23.2 # via pytest platformdirs==3.11.0 # via virtualenv -pluggy==1.3.0 - # via pytest -py==1.11.0 +pluggy==1.5.0 # via pytest -pydantic==2.7.1 +pydantic==2.9.2 # via openai -pydantic-core==2.18.2 +pydantic-core==2.23.4 # via pydantic pygments==2.18.0 # via rich pyright==1.1.380 -pytest==7.1.1 +pytest==8.3.3 # via pytest-asyncio -pytest-asyncio==0.21.1 +pytest-asyncio==0.24.0 python-dateutil==2.8.2 # via time-machine pytz==2023.3.post1 @@ -90,10 +87,10 @@ sniffio==1.3.0 # via httpx # via openai time-machine==2.9.0 -tomli==2.0.1 +tomli==2.0.2 # via mypy # via pytest -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via anyio # via mypy # via openai diff --git a/requirements.lock b/requirements.lock index 61e8fb1983..191186945d 100644 --- a/requirements.lock +++ b/requirements.lock @@ -19,7 +19,7 @@ certifi==2023.7.22 # via httpx distro==1.8.0 # via openai -exceptiongroup==1.1.3 +exceptiongroup==1.2.2 # via anyio h11==0.14.0 # via httpcore @@ -30,15 +30,15 @@ httpx==0.25.2 idna==3.4 # via anyio # via httpx -pydantic==2.7.1 +pydantic==2.9.2 # via openai -pydantic-core==2.18.2 +pydantic-core==2.23.4 # via pydantic sniffio==1.3.0 # via anyio # via httpx # via openai -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via anyio # via openai # via pydantic diff --git a/src/openai/_compat.py b/src/openai/_compat.py index 162a6fbe4f..d89920d955 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -133,7 +133,7 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: def model_dump( model: pydantic.BaseModel, *, - exclude: IncEx = None, + exclude: IncEx | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, warnings: bool = True, diff --git a/src/openai/_models.py b/src/openai/_models.py index d386eaa3a4..42551b769a 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -176,7 +176,7 @@ def __str__(self) -> str: # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. @classmethod @override - def construct( + def construct( # pyright: ignore[reportIncompatibleMethodOverride] cls: Type[ModelT], _fields_set: set[str] | None = None, **values: object, @@ -248,8 +248,8 @@ def model_dump( self, *, mode: Literal["json", "python"] | str = "python", - include: IncEx = None, - exclude: IncEx = None, + include: IncEx | None = None, + exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, @@ -303,8 +303,8 @@ def model_dump_json( self, *, indent: int | None = None, - include: IncEx = None, - exclude: IncEx = None, + include: IncEx | None = None, + exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, diff --git a/src/openai/_types.py b/src/openai/_types.py index 5611b2d38f..c8f4d5a922 100644 --- a/src/openai/_types.py +++ b/src/openai/_types.py @@ -16,7 +16,7 @@ Optional, Sequence, ) -from typing_extensions import Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable +from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable import httpx import pydantic @@ -195,7 +195,9 @@ def get(self, __key: str) -> str | None: ... # Note: copied from Pydantic # https://github.com/pydantic/pydantic/blob/32ea570bf96e84234d2992e1ddf40ab8a565925a/pydantic/main.py#L49 -IncEx: TypeAlias = "set[int] | set[str] | dict[int, Any] | dict[str, Any] | None" +IncEx: TypeAlias = Union[ + Set[int], Set[str], Mapping[int, Union["IncEx", Literal[True]]], Mapping[str, Union["IncEx", Literal[True]]] +] PostParser = Callable[[Any], Any] diff --git a/tests/conftest.py b/tests/conftest.py index 15af57e770..fa82d39d86 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,11 +1,11 @@ from __future__ import annotations import os -import asyncio import logging from typing import TYPE_CHECKING, Iterator, AsyncIterator import pytest +from pytest_asyncio import is_async_test from openai import OpenAI, AsyncOpenAI @@ -17,11 +17,13 @@ logging.getLogger("openai").setLevel(logging.DEBUG) -@pytest.fixture(scope="session") -def event_loop() -> Iterator[asyncio.AbstractEventLoop]: - loop = asyncio.new_event_loop() - yield loop - loop.close() +# automatically add `pytest.mark.asyncio()` to all of our async tests +# so we don't have to add that boilerplate everywhere +def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: + pytest_asyncio_tests = (item for item in items if is_async_test(item)) + session_scope_marker = pytest.mark.asyncio(loop_scope="session") + for async_test in pytest_asyncio_tests: + async_test.add_marker(session_scope_marker, append=False) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") From 82792b15d6e0ea12105e451b9eebabb409eba277 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:11:49 +0000 Subject: [PATCH 101/192] feat(api): add new, expressive voices for Realtime and Audio in Chat Completions (#1835) https://platform.openai.com/docs/changelog --- .stats.yml | 2 +- src/openai/types/chat/chat_completion_audio_param.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0b08725565..39413df445 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b60d5559d5150ecd3b49136064e5e251d832899770ff385b711378389afba370.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py index 6a4ce9ac1f..b92326d294 100644 --- a/src/openai/types/chat/chat_completion_audio_param.py +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -14,8 +14,9 @@ class ChatCompletionAudioParam(TypedDict, total=False): Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. """ - voice: Required[Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]] - """Specifies the voice type. + voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] + """The voice the model uses to respond. - Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. + Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, + `shimmer`, and `verse`. """ From 5d816618f7b0f43d075df6f786b52ed4191de009 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 12:27:34 +0000 Subject: [PATCH 102/192] chore(internal): bump mypy (#1839) --- requirements-dev.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index d088464204..64600eb215 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -48,7 +48,7 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -mypy==1.11.2 +mypy==1.13.0 mypy-extensions==1.0.0 # via mypy nodeenv==1.8.0 From 83397eb9ef5372fda3919daa9d83b292444c323b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 2 Nov 2024 02:46:36 +0000 Subject: [PATCH 103/192] fix: don't use dicts as iterables in transform (#1842) --- src/openai/_utils/_transform.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py index 47e262a515..7e9663d369 100644 --- a/src/openai/_utils/_transform.py +++ b/src/openai/_utils/_transform.py @@ -173,6 +173,11 @@ def _transform_recursive( # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + inner_type = extract_type_arg(stripped_type, 0) return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] From bbd759822bf79ca883f65f4b3011da9350f449f7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 14:59:58 +0000 Subject: [PATCH 104/192] fix: support json safe serialization for basemodel subclasses (#1844) --- src/openai/_compat.py | 6 ++++-- src/openai/_models.py | 9 ++++++--- src/openai/_utils/__init__.py | 1 + src/openai/_utils/_transform.py | 4 ++-- src/openai/_utils/_utils.py | 17 +++++++++++++++++ tests/test_models.py | 21 +++++++-------------- tests/test_transform.py | 15 +++++++++++++++ 7 files changed, 52 insertions(+), 21 deletions(-) diff --git a/src/openai/_compat.py b/src/openai/_compat.py index d89920d955..4794129c4d 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload from datetime import date, datetime -from typing_extensions import Self +from typing_extensions import Self, Literal import pydantic from pydantic.fields import FieldInfo @@ -137,9 +137,11 @@ def model_dump( exclude_unset: bool = False, exclude_defaults: bool = False, warnings: bool = True, + mode: Literal["json", "python"] = "python", ) -> dict[str, Any]: - if PYDANTIC_V2: + if PYDANTIC_V2 or hasattr(model, "model_dump"): return model.model_dump( + mode=mode, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, diff --git a/src/openai/_models.py b/src/openai/_models.py index 42551b769a..6cb469e21d 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -37,6 +37,7 @@ PropertyInfo, is_list, is_given, + json_safe, lru_cache, is_mapping, parse_date, @@ -279,8 +280,8 @@ def model_dump( Returns: A dictionary representation of the model. """ - if mode != "python": - raise ValueError("mode is only supported in Pydantic v2") + if mode not in {"json", "python"}: + raise ValueError("mode must be either 'json' or 'python'") if round_trip != False: raise ValueError("round_trip is only supported in Pydantic v2") if warnings != True: @@ -289,7 +290,7 @@ def model_dump( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") - return super().dict( # pyright: ignore[reportDeprecated] + dumped = super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, by_alias=by_alias, @@ -298,6 +299,8 @@ def model_dump( exclude_none=exclude_none, ) + return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped + @override def model_dump_json( self, diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 3efe66c8e8..a7cff3c091 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -6,6 +6,7 @@ is_list as is_list, is_given as is_given, is_tuple as is_tuple, + json_safe as json_safe, lru_cache as lru_cache, is_mapping as is_mapping, is_tuple_t as is_tuple_t, diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py index 7e9663d369..d7c05345d1 100644 --- a/src/openai/_utils/_transform.py +++ b/src/openai/_utils/_transform.py @@ -191,7 +191,7 @@ def _transform_recursive( return data if isinstance(data, pydantic.BaseModel): - return model_dump(data, exclude_unset=True) + return model_dump(data, exclude_unset=True, mode="json") annotated_type = _get_annotated_type(annotation) if annotated_type is None: @@ -329,7 +329,7 @@ async def _async_transform_recursive( return data if isinstance(data, pydantic.BaseModel): - return model_dump(data, exclude_unset=True) + return model_dump(data, exclude_unset=True, mode="json") annotated_type = _get_annotated_type(annotation) if annotated_type is None: diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py index 0bba17caad..e5811bba42 100644 --- a/src/openai/_utils/_utils.py +++ b/src/openai/_utils/_utils.py @@ -16,6 +16,7 @@ overload, ) from pathlib import Path +from datetime import date, datetime from typing_extensions import TypeGuard import sniffio @@ -395,3 +396,19 @@ def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]: maxsize=maxsize, ) return cast(Any, wrapper) # type: ignore[no-any-return] + + +def json_safe(data: object) -> object: + """Translates a mapping / sequence recursively in the same fashion + as `pydantic` v2's `model_dump(mode="json")`. + """ + if is_mapping(data): + return {json_safe(key): json_safe(value) for key, value in data.items()} + + if is_iterable(data) and not isinstance(data, (str, bytes, bytearray)): + return [json_safe(item) for item in data] + + if isinstance(data, (datetime, date)): + return data.isoformat() + + return data diff --git a/tests/test_models.py b/tests/test_models.py index 117a90020e..84dbce6914 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -520,19 +520,15 @@ class Model(BaseModel): assert m3.to_dict(exclude_none=True) == {} assert m3.to_dict(exclude_defaults=True) == {} - if PYDANTIC_V2: - - class Model2(BaseModel): - created_at: datetime + class Model2(BaseModel): + created_at: datetime - time_str = "2024-03-21T11:39:01.275859" - m4 = Model2.construct(created_at=time_str) - assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)} - assert m4.to_dict(mode="json") == {"created_at": time_str} - else: - with pytest.raises(ValueError, match="mode is only supported in Pydantic v2"): - m.to_dict(mode="json") + time_str = "2024-03-21T11:39:01.275859" + m4 = Model2.construct(created_at=time_str) + assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)} + assert m4.to_dict(mode="json") == {"created_at": time_str} + if not PYDANTIC_V2: with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.to_dict(warnings=False) @@ -558,9 +554,6 @@ class Model(BaseModel): assert m3.model_dump(exclude_none=True) == {} if not PYDANTIC_V2: - with pytest.raises(ValueError, match="mode is only supported in Pydantic v2"): - m.model_dump(mode="json") - with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump(round_trip=True) diff --git a/tests/test_transform.py b/tests/test_transform.py index 1eb6cde9d6..8c6aba6448 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -177,17 +177,32 @@ class DateDict(TypedDict, total=False): foo: Annotated[date, PropertyInfo(format="iso8601")] +class DatetimeModel(BaseModel): + foo: datetime + + +class DateModel(BaseModel): + foo: Optional[date] + + @parametrize @pytest.mark.asyncio async def test_iso8601_format(use_async: bool) -> None: dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + tz = "Z" if PYDANTIC_V2 else "+00:00" assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] + assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap] dt = dt.replace(tzinfo=None) assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692"} # type: ignore[comparison-overlap] + assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692"} # type: ignore[comparison-overlap] assert await transform({"foo": None}, DateDict, use_async) == {"foo": None} # type: ignore[comparison-overlap] + assert await transform(DateModel(foo=None), Any, use_async) == {"foo": None} # type: ignore assert await transform({"foo": date.fromisoformat("2023-02-23")}, DateDict, use_async) == {"foo": "2023-02-23"} # type: ignore[comparison-overlap] + assert await transform(DateModel(foo=date.fromisoformat("2023-02-23")), DateDict, use_async) == { + "foo": "2023-02-23" + } # type: ignore[comparison-overlap] @parametrize From 1fa56d073f7eb02128b12625b1cddd4ff7b583bd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 18:00:25 +0000 Subject: [PATCH 105/192] feat(project): drop support for Python 3.7 (#1845) 3.7 has been EOL for over a year and accounts for a small number of downloads --- README.md | 4 ++-- pyproject.toml | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 8873dff24a..34ae1dacea 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/) -The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.7+ +The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). @@ -520,7 +520,7 @@ print(openai.__version__) ## Requirements -Python 3.7 or higher. +Python 3.8 or higher. ## Contributing diff --git a/pyproject.toml b/pyproject.toml index affa761971..31cdc0a93a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,11 +16,10 @@ dependencies = [ "sniffio", "cached-property; python_version < '3.8'", ] -requires-python = ">= 3.7.1" +requires-python = ">= 3.8" classifiers = [ "Typing :: Typed", "Intended Audience :: Developers", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -139,7 +138,7 @@ filterwarnings = [ # there are a couple of flags that are still disabled by # default in strict mode as they are experimental and niche. typeCheckingMode = "strict" -pythonVersion = "3.7" +pythonVersion = "3.8" exclude = [ "_dev", From d8ec8ffd1e3dc91c66595abb56577b7ff0e1dbfe Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:43:25 +0000 Subject: [PATCH 106/192] feat(api): add support for predicted outputs (#1847) --- .stats.yml | 2 +- api.md | 3 +- src/openai/resources/audio/speech.py | 8 +- src/openai/resources/audio/transcriptions.py | 4 +- src/openai/resources/audio/translations.py | 4 +- src/openai/resources/beta/assistants.py | 40 +++--- src/openai/resources/beta/threads/messages.py | 8 +- .../resources/beta/threads/runs/runs.py | 56 ++++---- .../resources/beta/threads/runs/steps.py | 16 +-- src/openai/resources/beta/threads/threads.py | 36 ++--- .../beta/vector_stores/file_batches.py | 8 +- .../resources/beta/vector_stores/files.py | 8 +- .../beta/vector_stores/vector_stores.py | 8 +- src/openai/resources/chat/completions.py | 125 +++++++++++------- src/openai/resources/completions.py | 60 ++++----- src/openai/resources/embeddings.py | 12 +- src/openai/resources/files.py | 69 ++++++++-- src/openai/resources/fine_tuning/jobs/jobs.py | 4 +- src/openai/resources/images.py | 12 +- src/openai/resources/moderations.py | 4 +- src/openai/resources/uploads/uploads.py | 4 +- .../types/audio/speech_create_params.py | 4 +- .../audio/transcription_create_params.py | 2 +- .../types/audio/translation_create_params.py | 2 +- src/openai/types/beta/assistant.py | 8 +- .../types/beta/assistant_create_params.py | 8 +- .../types/beta/assistant_list_params.py | 2 +- .../types/beta/assistant_update_params.py | 8 +- src/openai/types/beta/file_search_tool.py | 4 +- .../types/beta/file_search_tool_param.py | 4 +- .../beta/thread_create_and_run_params.py | 6 +- .../types/beta/threads/message_list_params.py | 2 +- src/openai/types/beta/threads/run.py | 6 +- .../types/beta/threads/run_create_params.py | 8 +- .../types/beta/threads/run_list_params.py | 2 +- .../beta/threads/runs/step_list_params.py | 4 +- .../beta/threads/runs/step_retrieve_params.py | 2 +- .../types/beta/vector_store_list_params.py | 2 +- .../file_batch_list_files_params.py | 2 +- .../beta/vector_stores/file_list_params.py | 2 +- src/openai/types/chat/__init__.py | 3 + ...hat_completion_content_part_image_param.py | 2 +- ...hat_completion_prediction_content_param.py | 25 ++++ .../types/chat/completion_create_params.py | 23 ++-- src/openai/types/completion_create_params.py | 10 +- src/openai/types/completion_usage.py | 14 ++ src/openai/types/embedding_create_params.py | 6 +- src/openai/types/file_list_params.py | 23 +++- .../types/fine_tuning/job_create_params.py | 2 +- .../types/image_create_variation_params.py | 2 +- src/openai/types/image_edit_params.py | 2 +- src/openai/types/image_generate_params.py | 2 +- src/openai/types/moderation_create_params.py | 2 +- tests/api_resources/chat/test_completions.py | 16 +++ tests/api_resources/test_files.py | 24 ++-- 55 files changed, 447 insertions(+), 278 deletions(-) create mode 100644 src/openai/types/chat/chat_completion_prediction_content_param.py diff --git a/.stats.yml b/.stats.yml index 39413df445..f368bc881d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml diff --git a/api.md b/api.md index 4600adf77a..3e88faab91 100644 --- a/api.md +++ b/api.md @@ -54,6 +54,7 @@ from openai.types.chat import ( ChatCompletionMessageToolCall, ChatCompletionModality, ChatCompletionNamedToolChoice, + ChatCompletionPredictionContent, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -93,7 +94,7 @@ Methods: - client.files.create(\*\*params) -> FileObject - client.files.retrieve(file_id) -> FileObject -- client.files.list(\*\*params) -> SyncPage[FileObject] +- client.files.list(\*\*params) -> SyncCursorPage[FileObject] - client.files.delete(file_id) -> FileDeleted - client.files.content(file_id) -> HttpxBinaryResponseContent - client.files.retrieve_content(file_id) -> str diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 6085ae8afe..09faaddda6 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -70,13 +70,13 @@ def create( input: The text to generate audio for. The maximum length is 4096 characters. model: - One of the available [TTS models](https://platform.openai.com/docs/models/tts): + One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` voice: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. @@ -154,13 +154,13 @@ async def create( input: The text to generate audio for. The maximum length is 4096 characters. model: - One of the available [TTS models](https://platform.openai.com/docs/models/tts): + One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` voice: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index ccff507a41..9fad66ed35 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -82,7 +82,7 @@ def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should match the audio language. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, @@ -194,7 +194,7 @@ async def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should match the audio language. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index 27475f1a59..feaeea6e09 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -74,7 +74,7 @@ def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should be in English. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, @@ -171,7 +171,7 @@ async def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should be in English. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 2ebef183b6..7df212f155 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -79,8 +79,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. description: The description of the assistant. The maximum length is 512 characters. @@ -95,8 +95,8 @@ def create( name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -239,14 +239,14 @@ def update( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -344,8 +344,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -465,8 +465,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. description: The description of the assistant. The maximum length is 512 characters. @@ -481,8 +481,8 @@ async def create( name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -625,14 +625,14 @@ async def update( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -730,8 +730,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index 9e6ae8811a..3c25449664 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -218,8 +218,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -492,8 +492,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 68efaf1782..5c97af0e2e 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -111,7 +111,7 @@ def create( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. additional_instructions: Appends additional instructions at the end of the instructions for the run. This @@ -147,12 +147,12 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -255,7 +255,7 @@ def create( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. additional_instructions: Appends additional instructions at the end of the instructions for the run. This @@ -291,12 +291,12 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -395,7 +395,7 @@ def create( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. additional_instructions: Appends additional instructions at the end of the instructions for the run. This @@ -431,12 +431,12 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -663,8 +663,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -959,7 +959,7 @@ async def create( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. additional_instructions: Appends additional instructions at the end of the instructions for the run. This @@ -995,12 +995,12 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1103,7 +1103,7 @@ async def create( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. additional_instructions: Appends additional instructions at the end of the instructions for the run. This @@ -1139,12 +1139,12 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1243,7 +1243,7 @@ async def create( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. additional_instructions: Appends additional instructions at the end of the instructions for the run. This @@ -1279,12 +1279,12 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1511,8 +1511,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 5d6d55f9d9..9bd91e39e0 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -68,7 +68,7 @@ def retrieve( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. extra_headers: Send extra headers @@ -126,15 +126,15 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. include: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. limit: A limit on the number of objects to be returned. Limit can range between 1 and @@ -222,7 +222,7 @@ async def retrieve( to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. extra_headers: Send extra headers @@ -280,15 +280,15 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. include: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. limit: A limit on the number of objects to be returned. Limit can range between 1 and diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 17f5c6970b..728d375aa6 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -317,12 +317,12 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -451,12 +451,12 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -581,12 +581,12 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -971,12 +971,12 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1105,12 +1105,12 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1235,12 +1235,12 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index 34fcd8c61b..2d4cec3ce8 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -201,8 +201,8 @@ def list_files( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. @@ -422,8 +422,8 @@ def list_files( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index e96b492ac0..d633985e0d 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -162,8 +162,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. @@ -384,8 +384,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index d69add7b26..61a2eadc7b 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -251,8 +251,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -529,8 +529,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 03919aab2f..84e6cf9b72 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -31,6 +31,7 @@ from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ...types.chat.chat_completion_message_param import ChatCompletionMessageParam from ...types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from ...types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from ...types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam __all__ = ["Completions", "AsyncCompletions"] @@ -74,6 +75,7 @@ def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -111,7 +113,7 @@ def create( [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. audio: Parameters for audio output. Required when audio output is requested with @@ -122,7 +124,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) function_call: Deprecated in favor of `tool_choice`. @@ -183,19 +185,22 @@ def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -282,7 +287,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -313,6 +318,7 @@ def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -349,7 +355,7 @@ def create( [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -367,7 +373,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) function_call: Deprecated in favor of `tool_choice`. @@ -428,19 +434,22 @@ def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -520,7 +529,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -551,6 +560,7 @@ def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -587,7 +597,7 @@ def create( [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -605,7 +615,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) function_call: Deprecated in favor of `tool_choice`. @@ -666,19 +676,22 @@ def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -758,7 +771,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -788,6 +801,7 @@ def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -827,6 +841,7 @@ def create( "modalities": modalities, "n": n, "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, @@ -891,6 +906,7 @@ async def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -928,7 +944,7 @@ async def create( [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. audio: Parameters for audio output. Required when audio output is requested with @@ -939,7 +955,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) function_call: Deprecated in favor of `tool_choice`. @@ -1000,19 +1016,22 @@ async def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1099,7 +1118,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -1130,6 +1149,7 @@ async def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -1166,7 +1186,7 @@ async def create( [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -1184,7 +1204,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) function_call: Deprecated in favor of `tool_choice`. @@ -1245,19 +1265,22 @@ async def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1337,7 +1360,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -1368,6 +1391,7 @@ async def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -1404,7 +1428,7 @@ async def create( [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -1422,7 +1446,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) function_call: Deprecated in favor of `tool_choice`. @@ -1483,19 +1507,22 @@ async def create( choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1575,7 +1602,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -1605,6 +1632,7 @@ async def create( modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -1644,6 +1672,7 @@ async def create( "modalities": modalities, "n": n, "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 7f5a3fc4ff..7e95f79607 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -82,8 +82,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -108,7 +108,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -148,7 +148,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -187,7 +187,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -235,8 +235,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -268,7 +268,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -308,7 +308,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -340,7 +340,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -388,8 +388,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -421,7 +421,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -461,7 +461,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -493,7 +493,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -624,8 +624,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -650,7 +650,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -690,7 +690,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -729,7 +729,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -777,8 +777,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -810,7 +810,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -850,7 +850,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -882,7 +882,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -930,8 +930,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -963,7 +963,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -1003,7 +1003,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -1035,7 +1035,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 3a2763904b..2197c4d280 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -74,8 +74,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. dimensions: The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -85,7 +85,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -164,8 +164,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. dimensions: The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -175,7 +175,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index ee668e9bc2..977b9b3c48 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -4,6 +4,7 @@ import typing_extensions from typing import Mapping, cast +from typing_extensions import Literal import httpx @@ -26,7 +27,7 @@ to_custom_streamed_response_wrapper, async_to_custom_streamed_response_wrapper, ) -from ..pagination import SyncPage, AsyncPage +from ..pagination import SyncCursorPage, AsyncCursorPage from .._base_client import AsyncPaginator, make_request_options from ..types.file_object import FileObject from ..types.file_deleted import FileDeleted @@ -168,6 +169,9 @@ def retrieve( def list( self, *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -175,11 +179,23 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SyncPage[FileObject]: - """ - Returns a list of files that belong to the user's organization. + ) -> SyncCursorPage[FileObject]: + """Returns a list of files. Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 10,000, and the default is 10,000. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + purpose: Only return files with the given purpose. extra_headers: Send extra headers @@ -192,13 +208,21 @@ def list( """ return self._get_api_list( "/files", - page=SyncPage[FileObject], + page=SyncCursorPage[FileObject], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "purpose": purpose, + }, + file_list_params.FileListParams, + ), ), model=FileObject, ) @@ -438,6 +462,9 @@ async def retrieve( def list( self, *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -445,11 +472,23 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncPaginator[FileObject, AsyncPage[FileObject]]: - """ - Returns a list of files that belong to the user's organization. + ) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]: + """Returns a list of files. Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 10,000, and the default is 10,000. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + purpose: Only return files with the given purpose. extra_headers: Send extra headers @@ -462,13 +501,21 @@ def list( """ return self._get_api_list( "/files", - page=AsyncPage[FileObject], + page=AsyncCursorPage[FileObject], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "purpose": purpose, + }, + file_list_params.FileListParams, + ), ), model=FileObject, ) diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 7eb0c5dbfc..4024bf79f3 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -85,7 +85,7 @@ def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. @@ -376,7 +376,7 @@ async def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index e9629d48fd..2fbc077dd9 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -84,7 +84,7 @@ def create_variation( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -165,7 +165,7 @@ def edit( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -251,7 +251,7 @@ def generate( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -341,7 +341,7 @@ async def create_variation( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -422,7 +422,7 @@ async def edit( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -508,7 +508,7 @@ async def generate( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index 8b73da57b2..ce80bb7d55 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -68,7 +68,7 @@ def create( model: The content moderation model you would like to use. Learn more in [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about available models - [here](https://platform.openai.com/docs/models/moderation). + [here](https://platform.openai.com/docs/models#moderation). extra_headers: Send extra headers @@ -138,7 +138,7 @@ async def create( model: The content moderation model you would like to use. Learn more in [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about available models - [here](https://platform.openai.com/docs/models/moderation). + [here](https://platform.openai.com/docs/models#moderation). extra_headers: Send extra headers diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index dadd01d342..2384716bdd 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -85,7 +85,7 @@ def create( For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) For guidance on the proper filename extensions for each purpose, please follow the documentation on @@ -279,7 +279,7 @@ async def create( For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) For guidance on the proper filename extensions for each purpose, please follow the documentation on diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index dff66e49c7..a60d000708 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -16,7 +16,7 @@ class SpeechCreateParams(TypedDict, total=False): model: Required[Union[str, SpeechModel]] """ - One of the available [TTS models](https://platform.openai.com/docs/models/tts): + One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` """ @@ -25,7 +25,7 @@ class SpeechCreateParams(TypedDict, total=False): Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index 5ac2bb91e5..88805affbd 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -38,7 +38,7 @@ class TranscriptionCreateParams(TypedDict, total=False): """An optional text to guide the model's style or continue a previous audio segment. - The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should match the audio language. """ diff --git a/src/openai/types/audio/translation_create_params.py b/src/openai/types/audio/translation_create_params.py index 6859ed9d30..62f85b8757 100644 --- a/src/openai/types/audio/translation_create_params.py +++ b/src/openai/types/audio/translation_create_params.py @@ -30,7 +30,7 @@ class TranslationCreateParams(TypedDict, total=False): """An optional text to guide the model's style or continue a previous audio segment. - The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should be in English. """ diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index ea97de440f..3c8b8e403b 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -65,8 +65,8 @@ class Assistant(BaseModel): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ name: Optional[str] = None @@ -85,8 +85,8 @@ class Assistant(BaseModel): response_format: Optional[AssistantResponseFormatOption] = None """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index e11f842f05..568b223ce7 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -26,8 +26,8 @@ class AssistantCreateParams(TypedDict, total=False): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ description: Optional[str] @@ -53,8 +53,8 @@ class AssistantCreateParams(TypedDict, total=False): response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/openai/types/beta/assistant_list_params.py b/src/openai/types/beta/assistant_list_params.py index f54f63120b..834ffbcaf8 100644 --- a/src/openai/types/beta/assistant_list_params.py +++ b/src/openai/types/beta/assistant_list_params.py @@ -21,7 +21,7 @@ class AssistantListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index c4598df507..9a66e41ab3 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -35,8 +35,8 @@ class AssistantUpdateParams(TypedDict, total=False): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ name: Optional[str] @@ -45,8 +45,8 @@ class AssistantUpdateParams(TypedDict, total=False): response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py index aee6593e89..89fc16c04c 100644 --- a/src/openai/types/beta/file_search_tool.py +++ b/src/openai/types/beta/file_search_tool.py @@ -31,7 +31,7 @@ class FileSearch(BaseModel): Note that the file search tool may output fewer than `max_num_results` results. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ @@ -42,7 +42,7 @@ class FileSearch(BaseModel): score_threshold of 0. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py index 5ce91207ba..c73d0af79d 100644 --- a/src/openai/types/beta/file_search_tool_param.py +++ b/src/openai/types/beta/file_search_tool_param.py @@ -30,7 +30,7 @@ class FileSearch(TypedDict, total=False): Note that the file search tool may output fewer than `max_num_results` results. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ @@ -41,7 +41,7 @@ class FileSearch(TypedDict, total=False): score_threshold of 0. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 64ee6a8710..8310ba12f4 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -86,15 +86,15 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/openai/types/beta/threads/message_list_params.py b/src/openai/types/beta/threads/message_list_params.py index 18c2442fb5..a7c22a66fb 100644 --- a/src/openai/types/beta/threads/message_list_params.py +++ b/src/openai/types/beta/threads/message_list_params.py @@ -21,7 +21,7 @@ class MessageListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index e8f2b74dee..ad32135b7d 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -154,7 +154,7 @@ class Run(BaseModel): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ @@ -167,8 +167,8 @@ class Run(BaseModel): response_format: Optional[AssistantResponseFormatOption] = None """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 9767b142e1..88dc39645e 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -41,7 +41,7 @@ class RunCreateParamsBase(TypedDict, total=False): search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ @@ -99,15 +99,15 @@ class RunCreateParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/openai/types/beta/threads/run_list_params.py b/src/openai/types/beta/threads/run_list_params.py index 1e32bca4b4..fbea54f6f2 100644 --- a/src/openai/types/beta/threads/run_list_params.py +++ b/src/openai/types/beta/threads/run_list_params.py @@ -21,7 +21,7 @@ class RunListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/src/openai/types/beta/threads/runs/step_list_params.py b/src/openai/types/beta/threads/runs/step_list_params.py index 3931bd7e0c..a6be771d9f 100644 --- a/src/openai/types/beta/threads/runs/step_list_params.py +++ b/src/openai/types/beta/threads/runs/step_list_params.py @@ -26,7 +26,7 @@ class StepListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ @@ -39,7 +39,7 @@ class StepListParams(TypedDict, total=False): search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ diff --git a/src/openai/types/beta/threads/runs/step_retrieve_params.py b/src/openai/types/beta/threads/runs/step_retrieve_params.py index 22c1c049f4..ecbb72edbd 100644 --- a/src/openai/types/beta/threads/runs/step_retrieve_params.py +++ b/src/openai/types/beta/threads/runs/step_retrieve_params.py @@ -23,6 +23,6 @@ class StepRetrieveParams(TypedDict, total=False): search result content. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ diff --git a/src/openai/types/beta/vector_store_list_params.py b/src/openai/types/beta/vector_store_list_params.py index f39f67266d..e26ff90a85 100644 --- a/src/openai/types/beta/vector_store_list_params.py +++ b/src/openai/types/beta/vector_store_list_params.py @@ -21,7 +21,7 @@ class VectorStoreListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/src/openai/types/beta/vector_stores/file_batch_list_files_params.py b/src/openai/types/beta/vector_stores/file_batch_list_files_params.py index 24dee7d5a5..2a0a6c6aa7 100644 --- a/src/openai/types/beta/vector_stores/file_batch_list_files_params.py +++ b/src/openai/types/beta/vector_stores/file_batch_list_files_params.py @@ -23,7 +23,7 @@ class FileBatchListFilesParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/src/openai/types/beta/vector_stores/file_list_params.py b/src/openai/types/beta/vector_stores/file_list_params.py index 23dd7f0d94..867b5fb3bb 100644 --- a/src/openai/types/beta/vector_stores/file_list_params.py +++ b/src/openai/types/beta/vector_stores/file_list_params.py @@ -21,7 +21,7 @@ class FileListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index b85365ecb1..ef562a4b94 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -37,6 +37,9 @@ from .chat_completion_content_part_image_param import ( ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam, ) +from .chat_completion_prediction_content_param import ( + ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam, +) from .chat_completion_tool_choice_option_param import ( ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, ) diff --git a/src/openai/types/chat/chat_completion_content_part_image_param.py b/src/openai/types/chat/chat_completion_content_part_image_param.py index b1a186aa6d..9d407324d0 100644 --- a/src/openai/types/chat/chat_completion_content_part_image_param.py +++ b/src/openai/types/chat/chat_completion_content_part_image_param.py @@ -15,7 +15,7 @@ class ImageURL(TypedDict, total=False): """Specifies the detail level of the image. Learn more in the - [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). """ diff --git a/src/openai/types/chat/chat_completion_prediction_content_param.py b/src/openai/types/chat/chat_completion_prediction_content_param.py new file mode 100644 index 0000000000..c44e6e3653 --- /dev/null +++ b/src/openai/types/chat/chat_completion_prediction_content_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionPredictionContentParam"] + + +class ChatCompletionPredictionContentParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """ + The content that should be matched when generating a model response. If + generated tokens would match this content, the entire model response can be + returned much more quickly. + """ + + type: Required[Literal["content"]] + """The type of the predicted content you want to provide. + + This type is currently always `content`. + """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index af6a47c219..e838858314 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -13,6 +13,7 @@ from ..shared_params.function_parameters import FunctionParameters from ..shared_params.response_format_text import ResponseFormatText from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from .chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam from ..shared_params.response_format_json_object import ResponseFormatJSONObject from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema @@ -43,7 +44,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. """ @@ -60,7 +61,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) """ function_call: FunctionCall @@ -148,25 +149,31 @@ class CompletionCreateParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ + prediction: Optional[ChatCompletionPredictionContentParam] + """ + Static predicted output content, such as the content of a text file that is + being regenerated. + """ + presence_penalty: Optional[float] """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) """ response_format: ResponseFormat """An object specifying the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -276,7 +283,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/src/openai/types/completion_create_params.py b/src/openai/types/completion_create_params.py index 6c112b3902..fdb1680d26 100644 --- a/src/openai/types/completion_create_params.py +++ b/src/openai/types/completion_create_params.py @@ -17,8 +17,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]] @@ -53,7 +53,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) """ logit_bias: Optional[Dict[str, int]] @@ -106,7 +106,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) """ seed: Optional[int] @@ -156,7 +156,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/src/openai/types/completion_usage.py b/src/openai/types/completion_usage.py index fe112833e0..d8c4e84cf7 100644 --- a/src/openai/types/completion_usage.py +++ b/src/openai/types/completion_usage.py @@ -8,12 +8,26 @@ class CompletionTokensDetails(BaseModel): + accepted_prediction_tokens: Optional[int] = None + """ + When using Predicted Outputs, the number of tokens in the prediction that + appeared in the completion. + """ + audio_tokens: Optional[int] = None """Audio input tokens generated by the model.""" reasoning_tokens: Optional[int] = None """Tokens generated by the model for reasoning.""" + rejected_prediction_tokens: Optional[int] = None + """ + When using Predicted Outputs, the number of tokens in the prediction that did + not appear in the completion. However, like reasoning tokens, these tokens are + still counted in the total completion tokens for purposes of billing, output, + and context window limits. + """ + class PromptTokensDetails(BaseModel): audio_tokens: Optional[int] = None diff --git a/src/openai/types/embedding_create_params.py b/src/openai/types/embedding_create_params.py index 1548cdbd77..1385762885 100644 --- a/src/openai/types/embedding_create_params.py +++ b/src/openai/types/embedding_create_params.py @@ -28,8 +28,8 @@ class EmbeddingCreateParams(TypedDict, total=False): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ dimensions: int @@ -48,5 +48,5 @@ class EmbeddingCreateParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/src/openai/types/file_list_params.py b/src/openai/types/file_list_params.py index 212eca13c0..058d874c29 100644 --- a/src/openai/types/file_list_params.py +++ b/src/openai/types/file_list_params.py @@ -2,11 +2,32 @@ from __future__ import annotations -from typing_extensions import TypedDict +from typing_extensions import Literal, TypedDict __all__ = ["FileListParams"] class FileListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 10,000, and the default is 10,000. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ + purpose: str """Only return files with the given purpose.""" diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index 8f5ea86274..8814229b2e 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -13,7 +13,7 @@ class JobCreateParams(TypedDict, total=False): """The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). """ training_file: Required[str] diff --git a/src/openai/types/image_create_variation_params.py b/src/openai/types/image_create_variation_params.py index d6ecf0f1ae..d20f672912 100644 --- a/src/openai/types/image_create_variation_params.py +++ b/src/openai/types/image_create_variation_params.py @@ -47,5 +47,5 @@ class ImageCreateVariationParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index a596a8692b..1cb10611f3 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -58,5 +58,5 @@ class ImageEditParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/src/openai/types/image_generate_params.py b/src/openai/types/image_generate_params.py index 307adeb3da..c88c45f518 100644 --- a/src/openai/types/image_generate_params.py +++ b/src/openai/types/image_generate_params.py @@ -61,5 +61,5 @@ class ImageGenerateParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/src/openai/types/moderation_create_params.py b/src/openai/types/moderation_create_params.py index 3193fd9c2d..3ea2f3cd88 100644 --- a/src/openai/types/moderation_create_params.py +++ b/src/openai/types/moderation_create_params.py @@ -25,5 +25,5 @@ class ModerationCreateParams(TypedDict, total=False): Learn more in [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about available models - [here](https://platform.openai.com/docs/models/moderation). + [here](https://platform.openai.com/docs/models#moderation). """ diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index a341e78f7e..4359993e9c 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -64,6 +64,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: modalities=["text", "audio"], n=1, parallel_tool_calls=True, + prediction={ + "content": "string", + "type": "content", + }, presence_penalty=-2, response_format={"type": "text"}, seed=-9007199254740991, @@ -192,6 +196,10 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: modalities=["text", "audio"], n=1, parallel_tool_calls=True, + prediction={ + "content": "string", + "type": "content", + }, presence_penalty=-2, response_format={"type": "text"}, seed=-9007199254740991, @@ -322,6 +330,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn modalities=["text", "audio"], n=1, parallel_tool_calls=True, + prediction={ + "content": "string", + "type": "content", + }, presence_penalty=-2, response_format={"type": "text"}, seed=-9007199254740991, @@ -450,6 +462,10 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn modalities=["text", "audio"], n=1, parallel_tool_calls=True, + prediction={ + "content": "string", + "type": "content", + }, presence_penalty=-2, response_format={"type": "text"}, seed=-9007199254740991, diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index 725e55c193..62d2b88ebf 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -13,7 +13,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types import FileObject, FileDeleted -from openai.pagination import SyncPage, AsyncPage +from openai.pagination import SyncCursorPage, AsyncCursorPage # pyright: reportDeprecated=false @@ -98,14 +98,17 @@ def test_path_params_retrieve(self, client: OpenAI) -> None: @parametrize def test_method_list(self, client: OpenAI) -> None: file = client.files.list() - assert_matches_type(SyncPage[FileObject], file, path=["response"]) + assert_matches_type(SyncCursorPage[FileObject], file, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: file = client.files.list( + after="after", + limit=0, + order="asc", purpose="purpose", ) - assert_matches_type(SyncPage[FileObject], file, path=["response"]) + assert_matches_type(SyncCursorPage[FileObject], file, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: @@ -114,7 +117,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() - assert_matches_type(SyncPage[FileObject], file, path=["response"]) + assert_matches_type(SyncCursorPage[FileObject], file, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: @@ -123,7 +126,7 @@ def test_streaming_response_list(self, client: OpenAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() - assert_matches_type(SyncPage[FileObject], file, path=["response"]) + assert_matches_type(SyncCursorPage[FileObject], file, path=["response"]) assert cast(Any, response.is_closed) is True @@ -334,14 +337,17 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.list() - assert_matches_type(AsyncPage[FileObject], file, path=["response"]) + assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.list( + after="after", + limit=0, + order="asc", purpose="purpose", ) - assert_matches_type(AsyncPage[FileObject], file, path=["response"]) + assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @@ -350,7 +356,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() - assert_matches_type(AsyncPage[FileObject], file, path=["response"]) + assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @@ -359,7 +365,7 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() - assert_matches_type(AsyncPage[FileObject], file, path=["response"]) + assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"]) assert cast(Any, response.is_closed) is True From c837da879d167d0abfa4bfcbdb85ac64ab1265e3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 15:54:29 +0000 Subject: [PATCH 107/192] chore(tests): adjust retry timeout values (#1851) --- tests/test_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index ff07ec393b..912ea1316c 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -698,7 +698,7 @@ class Model(BaseModel): [3, "", 0.5], [2, "", 0.5 * 2.0], [1, "", 0.5 * 4.0], - [-1100, "", 7.8], # test large number potentially overflowing + [-1100, "", 8], # test large number potentially overflowing ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @@ -1564,7 +1564,7 @@ class Model(BaseModel): [3, "", 0.5], [2, "", 0.5 * 2.0], [1, "", 0.5 * 4.0], - [-1100, "", 7.8], # test large number potentially overflowing + [-1100, "", 8], # test large number potentially overflowing ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) From 3a5c381e787c62da66b72e5c9416ba902b092c56 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 22:39:39 +0000 Subject: [PATCH 108/192] docs: move comments in example snippets (#1860) --- README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 34ae1dacea..906dc37d39 100644 --- a/README.md +++ b/README.md @@ -26,8 +26,7 @@ import os from openai import OpenAI client = OpenAI( - # This is the default and can be omitted - api_key=os.environ.get("OPENAI_API_KEY"), + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted ) chat_completion = client.chat.completions.create( @@ -56,8 +55,7 @@ import asyncio from openai import AsyncOpenAI client = AsyncOpenAI( - # This is the default and can be omitted - api_key=os.environ.get("OPENAI_API_KEY"), + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted ) From 0010288fa67e1c82df48ec8b8f5edd59d420c458 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 22:59:22 +0000 Subject: [PATCH 109/192] docs: bump models in example snippets to gpt-4o (#1861) --- README.md | 22 +++++++++++----------- tests/test_client.py | 8 ++++---- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 906dc37d39..7f026a768b 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ chat_completion = client.chat.completions.create( "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) ``` @@ -67,7 +67,7 @@ async def main() -> None: "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) @@ -92,7 +92,7 @@ stream = client.chat.completions.create( "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", stream=True, ) for chat_completion in stream: @@ -113,7 +113,7 @@ stream = await client.chat.completions.create( "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", stream=True, ) async for chat_completion in stream: @@ -216,7 +216,7 @@ completion = client.chat.completions.create( "content": "Can you generate an example json object describing a fruit?", } ], - model="gpt-3.5-turbo-1106", + model="gpt-4o", response_format={"type": "json_object"}, ) ``` @@ -256,7 +256,7 @@ client = OpenAI() try: client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o", training_file="file-abc123", ) except openai.APIConnectionError as e: @@ -305,10 +305,10 @@ client.with_options(max_retries=5).chat.completions.create( messages=[ { "role": "user", - "content": "How can I get the name of the current day in Node.js?", + "content": "How can I get the name of the current day in JavaScript?", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) ``` @@ -339,7 +339,7 @@ client.with_options(timeout=5.0).chat.completions.create( "content": "How can I list all files in a directory using Python?", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) ``` @@ -384,7 +384,7 @@ response = client.chat.completions.with_raw_response.create( "role": "user", "content": "Say this is a test", }], - model="gpt-3.5-turbo", + model="gpt-4o", ) print(response.headers.get('X-My-Header')) @@ -417,7 +417,7 @@ with client.chat.completions.with_streaming_response.create( "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) as response: print(response.headers.get("X-My-Header")) diff --git a/tests/test_client.py b/tests/test_client.py index 912ea1316c..7ea2ab38d1 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -727,7 +727,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> No "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ), ), cast_to=httpx.Response, @@ -753,7 +753,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> Non "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ), ), cast_to=httpx.Response, @@ -1594,7 +1594,7 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ), ), cast_to=httpx.Response, @@ -1620,7 +1620,7 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ), ), cast_to=httpx.Response, From 09ee4e7cfeeb7ca4cf58deed4475c409563fab9e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:11:55 +0000 Subject: [PATCH 110/192] fix: don't use dicts as iterables in transform (#1865) --- src/openai/_utils/_transform.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py index d7c05345d1..a6b62cad0c 100644 --- a/src/openai/_utils/_transform.py +++ b/src/openai/_utils/_transform.py @@ -316,6 +316,11 @@ async def _async_transform_recursive( # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + inner_type = extract_type_arg(stripped_type, 0) return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] From 7080d1da45ca114c703b790ec0bed766f6ffaa60 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 10:29:16 +0000 Subject: [PATCH 111/192] chore(tests): limit array example length (#1870) --- .../audio/test_transcriptions.py | 4 +- tests/api_resources/beta/test_assistants.py | 20 +- tests/api_resources/beta/test_threads.py | 572 ++---------------- .../api_resources/beta/test_vector_stores.py | 4 +- .../beta/threads/test_messages.py | 24 +- tests/api_resources/beta/threads/test_runs.py | 420 ++----------- tests/api_resources/chat/test_completions.py | 88 +-- tests/api_resources/fine_tuning/test_jobs.py | 44 +- tests/api_resources/test_uploads.py | 20 +- 9 files changed, 118 insertions(+), 1078 deletions(-) diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index 3db013b079..bcb75b9d68 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -34,7 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: prompt="prompt", response_format="json", temperature=0, - timestamp_granularities=["word", "segment"], + timestamp_granularities=["word"], ) assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) @@ -85,7 +85,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> prompt="prompt", response_format="json", temperature=0, - timestamp_granularities=["word", "segment"], + timestamp_granularities=["word"], ) assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"]) diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index 642935cdaf..d9944448b7 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -39,19 +39,19 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: response_format="auto", temperature=1, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], }, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, ) assert_matches_type(Assistant, assistant, path=["response"]) @@ -137,10 +137,10 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: response_format="auto", temperature=1, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, ) assert_matches_type(Assistant, assistant, path=["response"]) @@ -271,19 +271,19 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> response_format="auto", temperature=1, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], }, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, ) assert_matches_type(Assistant, assistant, path=["response"]) @@ -369,10 +369,10 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> response_format="auto", temperature=1, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, ) assert_matches_type(Assistant, assistant, path=["response"]) diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 6fb36199a4..72c5cc0f19 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -36,103 +36,21 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, + } ], metadata={}, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], @@ -212,7 +130,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: thread_id="thread_id", metadata={}, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, ) @@ -315,103 +233,21 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, + } ], "metadata": {}, "tool_resources": { - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], @@ -420,10 +256,10 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) }, tool_choice="none", tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", @@ -485,103 +321,21 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, + } ], "metadata": {}, "tool_resources": { - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], @@ -590,10 +344,10 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) }, tool_choice="none", tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", @@ -646,103 +400,21 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, + } ], metadata={}, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], @@ -822,7 +494,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> thread_id="thread_id", metadata={}, tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, ) @@ -925,103 +597,21 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, + } ], "metadata": {}, "tool_resources": { - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], @@ -1030,10 +620,10 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie }, tool_choice="none", tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", @@ -1095,103 +685,21 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, + } ], "metadata": {}, "tool_resources": { - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": { "vector_store_ids": ["string"], "vector_stores": [ { "chunking_strategy": {"type": "auto"}, - "file_ids": ["string", "string", "string"], + "file_ids": ["string"], "metadata": {}, } ], @@ -1200,10 +708,10 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie }, tool_choice="none", tool_resources={ - "code_interpreter": {"file_ids": ["string", "string", "string"]}, + "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, }, - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py index 6f0c4d2144..162241a13d 100644 --- a/tests/api_resources/beta/test_vector_stores.py +++ b/tests/api_resources/beta/test_vector_stores.py @@ -34,7 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "anchor": "last_active_at", "days": 1, }, - file_ids=["string", "string", "string"], + file_ids=["string"], metadata={}, name="name", ) @@ -239,7 +239,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "anchor": "last_active_at", "days": 1, }, - file_ids=["string", "string", "string"], + file_ids=["string"], metadata={}, name="name", ) diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py index edd5f77a32..1d50c73e92 100644 --- a/tests/api_resources/beta/threads/test_messages.py +++ b/tests/api_resources/beta/threads/test_messages.py @@ -39,16 +39,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: attachments=[ { "file_id": "file_id", - "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], - }, - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], - }, - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], - }, + "tools": [{"type": "code_interpreter"}], + } ], metadata={}, ) @@ -316,16 +308,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> attachments=[ { "file_id": "file_id", - "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], - }, - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], - }, - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], - }, + "tools": [{"type": "code_interpreter"}], + } ], metadata={}, ) diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index cb0718ede7..ecce003a85 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -42,93 +42,11 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, + } ], instructions="instructions", max_completion_tokens=256, @@ -140,7 +58,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: stream=False, temperature=1, tool_choice="none", - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", @@ -207,93 +125,11 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, + } ], instructions="instructions", max_completion_tokens=256, @@ -304,7 +140,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: response_format="auto", temperature=1, tool_choice="none", - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", @@ -556,7 +392,7 @@ def test_method_submit_tool_outputs_overload_1(self, client: OpenAI) -> None: run = client.beta.threads.runs.submit_tool_outputs( run_id="run_id", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) assert_matches_type(Run, run, path=["response"]) @@ -569,15 +405,7 @@ def test_method_submit_tool_outputs_with_all_params_overload_1(self, client: Ope { "output": "output", "tool_call_id": "tool_call_id", - }, - { - "output": "output", - "tool_call_id": "tool_call_id", - }, - { - "output": "output", - "tool_call_id": "tool_call_id", - }, + } ], stream=False, ) @@ -588,7 +416,7 @@ def test_raw_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> No response = client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) assert response.is_closed is True @@ -601,7 +429,7 @@ def test_streaming_response_submit_tool_outputs_overload_1(self, client: OpenAI) with client.beta.threads.runs.with_streaming_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -617,14 +445,14 @@ def test_path_params_submit_tool_outputs_overload_1(self, client: OpenAI) -> Non client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) @parametrize @@ -633,7 +461,7 @@ def test_method_submit_tool_outputs_overload_2(self, client: OpenAI) -> None: run_id="run_id", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) run_stream.response.close() @@ -643,7 +471,7 @@ def test_raw_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> No run_id="run_id", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -656,7 +484,7 @@ def test_streaming_response_submit_tool_outputs_overload_2(self, client: OpenAI) run_id="run_id", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -673,7 +501,7 @@ def test_path_params_submit_tool_outputs_overload_2(self, client: OpenAI) -> Non run_id="run_id", thread_id="", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): @@ -681,7 +509,7 @@ def test_path_params_submit_tool_outputs_overload_2(self, client: OpenAI) -> Non run_id="", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) @@ -710,93 +538,11 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, + } ], instructions="instructions", max_completion_tokens=256, @@ -808,7 +554,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stream=False, temperature=1, tool_choice="none", - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", @@ -875,93 +621,11 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "attachments": [ { "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, + "tools": [{"type": "code_interpreter"}], + } ], "metadata": {}, - }, - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - { - "file_id": "file_id", - "tools": [ - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - {"type": "code_interpreter"}, - ], - }, - ], - "metadata": {}, - }, + } ], instructions="instructions", max_completion_tokens=256, @@ -972,7 +636,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn response_format="auto", temperature=1, tool_choice="none", - tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], + tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", @@ -1224,7 +888,7 @@ async def test_method_submit_tool_outputs_overload_1(self, async_client: AsyncOp run = await async_client.beta.threads.runs.submit_tool_outputs( run_id="run_id", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) assert_matches_type(Run, run, path=["response"]) @@ -1237,15 +901,7 @@ async def test_method_submit_tool_outputs_with_all_params_overload_1(self, async { "output": "output", "tool_call_id": "tool_call_id", - }, - { - "output": "output", - "tool_call_id": "tool_call_id", - }, - { - "output": "output", - "tool_call_id": "tool_call_id", - }, + } ], stream=False, ) @@ -1256,7 +912,7 @@ async def test_raw_response_submit_tool_outputs_overload_1(self, async_client: A response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) assert response.is_closed is True @@ -1269,7 +925,7 @@ async def test_streaming_response_submit_tool_outputs_overload_1(self, async_cli async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1285,14 +941,14 @@ async def test_path_params_submit_tool_outputs_overload_1(self, async_client: As await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="", thread_id="thread_id", - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) @parametrize @@ -1301,7 +957,7 @@ async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOp run_id="run_id", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) await run_stream.response.aclose() @@ -1311,7 +967,7 @@ async def test_raw_response_submit_tool_outputs_overload_2(self, async_client: A run_id="run_id", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1324,7 +980,7 @@ async def test_streaming_response_submit_tool_outputs_overload_2(self, async_cli run_id="run_id", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1341,7 +997,7 @@ async def test_path_params_submit_tool_outputs_overload_2(self, async_client: As run_id="run_id", thread_id="", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): @@ -1349,5 +1005,5 @@ async def test_path_params_submit_tool_outputs_overload_2(self, async_client: As run_id="", thread_id="thread_id", stream=True, - tool_outputs=[{}, {}, {}], + tool_outputs=[{}], ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 4359993e9c..d2e786cfe0 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -61,7 +61,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, - modalities=["text", "audio"], + modalities=["text"], n=1, parallel_tool_calls=True, prediction={ @@ -87,25 +87,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "strict": True, }, "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, + } ], top_logprobs=0, top_p=1, @@ -193,7 +175,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, - modalities=["text", "audio"], + modalities=["text"], n=1, parallel_tool_calls=True, prediction={ @@ -218,25 +200,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "strict": True, }, "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, + } ], top_logprobs=0, top_p=1, @@ -327,7 +291,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, - modalities=["text", "audio"], + modalities=["text"], n=1, parallel_tool_calls=True, prediction={ @@ -353,25 +317,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "strict": True, }, "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, + } ], top_logprobs=0, top_p=1, @@ -459,7 +405,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_completion_tokens=0, max_tokens=0, metadata={"foo": "string"}, - modalities=["text", "audio"], + modalities=["text"], n=1, parallel_tool_calls=True, prediction={ @@ -484,25 +430,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "strict": True, }, "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - }, + } ], top_logprobs=0, top_p=1, diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index 018ed82764..ad218bcb36 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -46,27 +46,9 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "project": "my-wandb-project", "entity": "entity", "name": "name", - "tags": ["custom-tag", "custom-tag", "custom-tag"], + "tags": ["custom-tag"], }, - }, - { - "type": "wandb", - "wandb": { - "project": "my-wandb-project", - "entity": "entity", - "name": "name", - "tags": ["custom-tag", "custom-tag", "custom-tag"], - }, - }, - { - "type": "wandb", - "wandb": { - "project": "my-wandb-project", - "entity": "entity", - "name": "name", - "tags": ["custom-tag", "custom-tag", "custom-tag"], - }, - }, + } ], seed=42, suffix="x", @@ -285,27 +267,9 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "project": "my-wandb-project", "entity": "entity", "name": "name", - "tags": ["custom-tag", "custom-tag", "custom-tag"], - }, - }, - { - "type": "wandb", - "wandb": { - "project": "my-wandb-project", - "entity": "entity", - "name": "name", - "tags": ["custom-tag", "custom-tag", "custom-tag"], - }, - }, - { - "type": "wandb", - "wandb": { - "project": "my-wandb-project", - "entity": "entity", - "name": "name", - "tags": ["custom-tag", "custom-tag", "custom-tag"], + "tags": ["custom-tag"], }, - }, + } ], seed=42, suffix="x", diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py index cb62df6b51..a14c4f8da2 100644 --- a/tests/api_resources/test_uploads.py +++ b/tests/api_resources/test_uploads.py @@ -99,7 +99,7 @@ def test_path_params_cancel(self, client: OpenAI) -> None: def test_method_complete(self, client: OpenAI) -> None: upload = client.uploads.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], ) assert_matches_type(Upload, upload, path=["response"]) @@ -107,7 +107,7 @@ def test_method_complete(self, client: OpenAI) -> None: def test_method_complete_with_all_params(self, client: OpenAI) -> None: upload = client.uploads.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], md5="md5", ) assert_matches_type(Upload, upload, path=["response"]) @@ -116,7 +116,7 @@ def test_method_complete_with_all_params(self, client: OpenAI) -> None: def test_raw_response_complete(self, client: OpenAI) -> None: response = client.uploads.with_raw_response.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], ) assert response.is_closed is True @@ -128,7 +128,7 @@ def test_raw_response_complete(self, client: OpenAI) -> None: def test_streaming_response_complete(self, client: OpenAI) -> None: with client.uploads.with_streaming_response.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -143,7 +143,7 @@ def test_path_params_complete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): client.uploads.with_raw_response.complete( upload_id="", - part_ids=["string", "string", "string"], + part_ids=["string"], ) @@ -232,7 +232,7 @@ async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: async def test_method_complete(self, async_client: AsyncOpenAI) -> None: upload = await async_client.uploads.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], ) assert_matches_type(Upload, upload, path=["response"]) @@ -240,7 +240,7 @@ async def test_method_complete(self, async_client: AsyncOpenAI) -> None: async def test_method_complete_with_all_params(self, async_client: AsyncOpenAI) -> None: upload = await async_client.uploads.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], md5="md5", ) assert_matches_type(Upload, upload, path=["response"]) @@ -249,7 +249,7 @@ async def test_method_complete_with_all_params(self, async_client: AsyncOpenAI) async def test_raw_response_complete(self, async_client: AsyncOpenAI) -> None: response = await async_client.uploads.with_raw_response.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], ) assert response.is_closed is True @@ -261,7 +261,7 @@ async def test_raw_response_complete(self, async_client: AsyncOpenAI) -> None: async def test_streaming_response_complete(self, async_client: AsyncOpenAI) -> None: async with async_client.uploads.with_streaming_response.complete( upload_id="upload_abc123", - part_ids=["string", "string", "string"], + part_ids=["string"], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -276,5 +276,5 @@ async def test_path_params_complete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): await async_client.uploads.with_raw_response.complete( upload_id="", - part_ids=["string", "string", "string"], + part_ids=["string"], ) From 3370cc86e7f80d5fb88c2180b37e36653ea2fce5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 10:42:24 +0000 Subject: [PATCH 112/192] chore(internal): spec update (#1873) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index f368bc881d..fdef8d2744 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fb9db2d2c1f0d6b39d8ee042db5d5c59acba6ad1daf47c18792c1f5fb24b3401.yml From eaba61e6f62e05cd92aaa5bce9e94a5071834f2c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 12:46:29 +0000 Subject: [PATCH 113/192] chore(internal): minor test changes (#1874) --- pyproject.toml | 1 + requirements-dev.lock | 1 + src/openai/_utils/_sync.py | 90 +++++++++++++++++--------------------- tests/test_client.py | 38 ++++++++++++++++ 4 files changed, 80 insertions(+), 50 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 31cdc0a93a..67c7070a12 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,6 +55,7 @@ dev-dependencies = [ "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", "rich>=13.7.1", + "nest_asyncio==1.6.0" ] [tool.rye.scripts] diff --git a/requirements-dev.lock b/requirements-dev.lock index 64600eb215..930a286174 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -51,6 +51,7 @@ mdurl==0.1.2 mypy==1.13.0 mypy-extensions==1.0.0 # via mypy +nest-asyncio==1.6.0 nodeenv==1.8.0 # via pyright nox==2023.4.22 diff --git a/src/openai/_utils/_sync.py b/src/openai/_utils/_sync.py index d0d810337e..8b3aaf2b5d 100644 --- a/src/openai/_utils/_sync.py +++ b/src/openai/_utils/_sync.py @@ -1,56 +1,62 @@ from __future__ import annotations +import sys +import asyncio import functools -from typing import TypeVar, Callable, Awaitable +import contextvars +from typing import Any, TypeVar, Callable, Awaitable from typing_extensions import ParamSpec -import anyio -import anyio.to_thread - -from ._reflection import function_has_argument - T_Retval = TypeVar("T_Retval") T_ParamSpec = ParamSpec("T_ParamSpec") -# copied from `asyncer`, https://github.com/tiangolo/asyncer -def asyncify( - function: Callable[T_ParamSpec, T_Retval], - *, - cancellable: bool = False, - limiter: anyio.CapacityLimiter | None = None, -) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: +if sys.version_info >= (3, 9): + to_thread = asyncio.to_thread +else: + # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread + # for Python 3.8 support + async def to_thread( + func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs + ) -> Any: + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Returns a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = asyncio.events.get_running_loop() + ctx = contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) + + +# inspired by `asyncer`, https://github.com/tiangolo/asyncer +def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: """ Take a blocking function and create an async one that receives the same - positional and keyword arguments, and that when called, calls the original function - in a worker thread using `anyio.to_thread.run_sync()`. Internally, - `asyncer.asyncify()` uses the same `anyio.to_thread.run_sync()`, but it supports - keyword arguments additional to positional arguments and it adds better support for - autocompletion and inline errors for the arguments of the function called and the - return value. - - If the `cancellable` option is enabled and the task waiting for its completion is - cancelled, the thread will still run its course but its return value (or any raised - exception) will be ignored. + positional and keyword arguments. For python version 3.9 and above, it uses + asyncio.to_thread to run the function in a separate thread. For python version + 3.8, it uses locally defined copy of the asyncio.to_thread function which was + introduced in python 3.9. - Use it like this: + Usage: - ```Python - def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: - # Do work - return "Some result" + ```python + def blocking_func(arg1, arg2, kwarg1=None): + # blocking code + return result - result = await to_thread.asyncify(do_work)("spam", "ham", kwarg1="a", kwarg2="b") - print(result) + result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1) ``` ## Arguments `function`: a blocking regular callable (e.g. a function) - `cancellable`: `True` to allow cancellation of the operation - `limiter`: capacity limiter to use to limit the total amount of threads running - (if omitted, the default limiter is used) ## Return @@ -60,22 +66,6 @@ def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: """ async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: - partial_f = functools.partial(function, *args, **kwargs) - - # In `v4.1.0` anyio added the `abandon_on_cancel` argument and deprecated the old - # `cancellable` argument, so we need to use the new `abandon_on_cancel` to avoid - # surfacing deprecation warnings. - if function_has_argument(anyio.to_thread.run_sync, "abandon_on_cancel"): - return await anyio.to_thread.run_sync( - partial_f, - abandon_on_cancel=cancellable, - limiter=limiter, - ) - - return await anyio.to_thread.run_sync( - partial_f, - cancellable=cancellable, - limiter=limiter, - ) + return await to_thread(function, *args, **kwargs) return wrapper diff --git a/tests/test_client.py b/tests/test_client.py index 7ea2ab38d1..7caa8cb319 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -4,11 +4,14 @@ import gc import os +import sys import json import asyncio import inspect +import subprocess import tracemalloc from typing import Any, Union, cast +from textwrap import dedent from unittest import mock from typing_extensions import Literal @@ -1766,3 +1769,38 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: ) as response: assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + + def test_get_platform(self) -> None: + # A previous implementation of asyncify could leave threads unterminated when + # used with nest_asyncio. + # + # Since nest_asyncio.apply() is global and cannot be un-applied, this + # test is run in a separate process to avoid affecting other tests. + test_code = dedent(""" + import asyncio + import nest_asyncio + import threading + + from openai._utils import asyncify + from openai._base_client import get_platform + + async def test_main() -> None: + result = await asyncify(get_platform)() + print(result) + for thread in threading.enumerate(): + print(thread.name) + + nest_asyncio.apply() + asyncio.run(test_main()) + """) + with subprocess.Popen( + [sys.executable, "-c", test_code], + text=True, + ) as process: + try: + process.wait(2) + if process.returncode: + raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") + except subprocess.TimeoutExpired as e: + process.kill() + raise AssertionError("calling get_platform using asyncify resulted in a hung process") from e From 8c22ab186784e47656b002740d96879bbdc2e970 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:17:44 +0000 Subject: [PATCH 114/192] feat(api): add gpt-4o-2024-11-20 model (#1877) --- .stats.yml | 2 +- src/openai/resources/batches.py | 4 ++-- src/openai/resources/files.py | 4 ++-- src/openai/types/batch_create_params.py | 2 +- src/openai/types/chat/chat_completion_audio_param.py | 5 +++-- src/openai/types/chat_model.py | 1 + 6 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.stats.yml b/.stats.yml index fdef8d2744..4827e5388f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fb9db2d2c1f0d6b39d8ee042db5d5c59acba6ad1daf47c18792c1f5fb24b3401.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-aa9b01fc0c17eb0cbc200533fc20d6a49c5e764ceaf8049e08b294532be6e9ff.yml diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index ee62faf774..d359c84360 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -78,7 +78,7 @@ def create( Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - requests, and can be up to 100 MB in size. + requests, and can be up to 200 MB in size. metadata: Optional custom metadata for the batch. @@ -283,7 +283,7 @@ async def create( Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - requests, and can be up to 100 MB in size. + requests, and can be up to 200 MB in size. metadata: Optional custom metadata for the batch. diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index 977b9b3c48..f86917c61d 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -85,7 +85,7 @@ def create( [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) models. - The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input). @@ -378,7 +378,7 @@ async def create( [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) models. - The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input). diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index 55517d285b..b30c4d4658 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -32,7 +32,7 @@ class BatchCreateParams(TypedDict, total=False): Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - requests, and can be up to 100 MB in size. + requests, and can be up to 200 MB in size. """ metadata: Optional[Dict[str, str]] diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py index b92326d294..1e20a52b41 100644 --- a/src/openai/types/chat/chat_completion_audio_param.py +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -17,6 +17,7 @@ class ChatCompletionAudioParam(TypedDict, total=False): voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. - Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, - `shimmer`, and `verse`. + Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also + supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices + are less expressive). """ diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index b801aa0914..3567a3ba65 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -10,6 +10,7 @@ "o1-mini", "o1-mini-2024-09-12", "gpt-4o", + "gpt-4o-2024-11-20", "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", "gpt-4o-realtime-preview", From 5ca18ebc9a20d1b04a24e8bcc5407401daaff377 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 22 Nov 2024 11:22:30 +0000 Subject: [PATCH 115/192] fix(pydantic-v1): avoid runtime error for assistants streaming (#1885) --- src/openai/_compat.py | 3 ++- tests/test_models.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/openai/_compat.py b/src/openai/_compat.py index 4794129c4d..df173f85e4 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -145,7 +145,8 @@ def model_dump( exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, - warnings=warnings, + # warnings are not supported in Pydantic v1 + warnings=warnings if PYDANTIC_V2 else True, ) return cast( "dict[str, Any]", diff --git a/tests/test_models.py b/tests/test_models.py index 84dbce6914..d2884bcbfa 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -561,6 +561,14 @@ class Model(BaseModel): m.model_dump(warnings=False) +def test_compat_method_no_error_for_warnings() -> None: + class Model(BaseModel): + foo: Optional[str] + + m = Model(foo="hello") + assert isinstance(model_dump(m, warnings=False), dict) + + def test_to_json() -> None: class Model(BaseModel): foo: Optional[str] = Field(alias="FOO", default=None) From ec71a9b30a3509b8374e1f20f98db3a9f1680f07 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:06:42 +0000 Subject: [PATCH 116/192] docs: add info log level to readme (#1887) --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7f026a768b..e37a3ce415 100644 --- a/README.md +++ b/README.md @@ -353,12 +353,14 @@ Note that requests that time out are [retried twice by default](#retries). We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. -You can enable logging by setting the environment variable `OPENAI_LOG` to `debug`. +You can enable logging by setting the environment variable `OPENAI_LOG` to `info`. ```shell -$ export OPENAI_LOG=debug +$ export OPENAI_LOG=info ``` +Or to `debug` for more verbose logging. + ### How to tell whether `None` means `null` or missing In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`: From b8155157f01dc34077eaa1f8af7fd064e5f18c4c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 12:24:06 +0000 Subject: [PATCH 117/192] chore: remove now unused `cached-property` dep (#1891) --- pyproject.toml | 1 - src/openai/_compat.py | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 67c7070a12..4e2467bb60 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,6 @@ dependencies = [ "anyio>=3.5.0, <5", "distro>=1.7.0, <2", "sniffio", - "cached-property; python_version < '3.8'", ] requires-python = ">= 3.8" classifiers = [ diff --git a/src/openai/_compat.py b/src/openai/_compat.py index df173f85e4..92d9ee61ee 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -214,9 +214,6 @@ def __set_name__(self, owner: type[Any], name: str) -> None: ... # __set__ is not defined at runtime, but @cached_property is designed to be settable def __set__(self, instance: object, value: _T) -> None: ... else: - try: - from functools import cached_property as cached_property - except ImportError: - from cached_property import cached_property as cached_property + from functools import cached_property as cached_property typed_cached_property = cached_property From 34c5dd9ea5aed42a343f548b8101b47fbbeab337 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:03:19 +0000 Subject: [PATCH 118/192] chore(internal): exclude mypy from running on tests (#1899) --- mypy.ini | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mypy.ini b/mypy.ini index a4517a002d..215128e026 100644 --- a/mypy.ini +++ b/mypy.ini @@ -5,7 +5,10 @@ show_error_codes = True # Exclude _files.py because mypy isn't smart enough to apply # the correct type narrowing and as this is an internal module # it's fine to just use Pyright. -exclude = ^(src/openai/_files\.py|_dev/.*\.py)$ +# +# We also exclude our `tests` as mypy doesn't always infer +# types correctly and Pyright will still catch any type errors. +exclude = ^(src/openai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True From 5c4f2ab840e9a32d28eed6ac8d9860d4d011b8bd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 28 Nov 2024 16:16:48 +0000 Subject: [PATCH 119/192] fix(client): compat with new httpx 0.28.0 release (#1904) --- src/openai/_base_client.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index e1d4849ae2..8bdad99feb 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -793,6 +793,7 @@ def __init__( custom_query: Mapping[str, object] | None = None, _strict_response_validation: bool, ) -> None: + kwargs: dict[str, Any] = {} if limits is not None: warnings.warn( "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", @@ -805,6 +806,7 @@ def __init__( limits = DEFAULT_CONNECTION_LIMITS if transport is not None: + kwargs["transport"] = transport warnings.warn( "The `transport` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, @@ -814,6 +816,7 @@ def __init__( raise ValueError("The `http_client` argument is mutually exclusive with `transport`") if proxies is not None: + kwargs["proxies"] = proxies warnings.warn( "The `proxies` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, @@ -857,10 +860,9 @@ def __init__( base_url=base_url, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), - proxies=proxies, - transport=transport, limits=limits, follow_redirects=True, + **kwargs, # type: ignore ) def is_closed(self) -> bool: @@ -1374,6 +1376,7 @@ def __init__( custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, ) -> None: + kwargs: dict[str, Any] = {} if limits is not None: warnings.warn( "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", @@ -1386,6 +1389,7 @@ def __init__( limits = DEFAULT_CONNECTION_LIMITS if transport is not None: + kwargs["transport"] = transport warnings.warn( "The `transport` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, @@ -1395,6 +1399,7 @@ def __init__( raise ValueError("The `http_client` argument is mutually exclusive with `transport`") if proxies is not None: + kwargs["proxies"] = proxies warnings.warn( "The `proxies` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, @@ -1438,10 +1443,9 @@ def __init__( base_url=base_url, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), - proxies=proxies, - transport=transport, limits=limits, follow_redirects=True, + **kwargs, # type: ignore ) def is_closed(self) -> bool: From b2364a1f31d4ea4fd3b5c8a37a6d8073137826d4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 20:43:48 +0000 Subject: [PATCH 120/192] chore(internal): bump pyright (#1917) --- requirements-dev.lock | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 930a286174..5bc4fb6d7c 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -68,7 +68,7 @@ pydantic-core==2.23.4 # via pydantic pygments==2.18.0 # via rich -pyright==1.1.380 +pyright==1.1.389 pytest==8.3.3 # via pytest-asyncio pytest-asyncio==0.24.0 @@ -97,6 +97,7 @@ typing-extensions==4.12.2 # via openai # via pydantic # via pydantic-core + # via pyright virtualenv==20.24.5 # via nox zipp==3.17.0 From adbc236c8cc0ebee188c2e86e8f0afa1818567f2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 02:15:40 +0000 Subject: [PATCH 121/192] chore: make the `Omit` type public (#1919) --- src/openai/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/openai/__init__.py b/src/openai/__init__.py index 1ef8f659a6..5df415547a 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from . import types -from ._types import NOT_GIVEN, NoneType, NotGiven, Transport, ProxiesTypes +from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes from ._utils import file_from_path from ._client import Client, OpenAI, Stream, Timeout, Transport, AsyncClient, AsyncOpenAI, AsyncStream, RequestOptions from ._models import BaseModel @@ -36,6 +36,7 @@ "ProxiesTypes", "NotGiven", "NOT_GIVEN", + "Omit", "OpenAIError", "APIError", "APIStatusError", From 2d448693693fedb2b60b227afcba08b70e2a714f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 20:20:39 +0000 Subject: [PATCH 122/192] chore: bump openapi url (#1922) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4827e5388f..19920c8be8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-aa9b01fc0c17eb0cbc200533fc20d6a49c5e764ceaf8049e08b294532be6e9ff.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-d702cba829ceda336f44d0eb89ce61dba353849a40f0193e7007439345daf1bb.yml From 5903dd08653b91fe6e541f9034cf7c9be19e325e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 20:53:44 +0000 Subject: [PATCH 123/192] feat(api): updates (#1924) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 19920c8be8..3cc042fe0a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-d702cba829ceda336f44d0eb89ce61dba353849a40f0193e7007439345daf1bb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e0e0678be19d1118fd796af291822075e40538dba326611e177e9f3dc245a53.yml From 13b0075dfb911aaaf23353b15929e04aadeedf74 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 14:37:03 +0000 Subject: [PATCH 124/192] chore(internal): bump pydantic dependency (#1929) --- requirements-dev.lock | 4 ++-- requirements.lock | 4 ++-- src/openai/_types.py | 6 ++---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 5bc4fb6d7c..fb7d64c474 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -62,9 +62,9 @@ platformdirs==3.11.0 # via virtualenv pluggy==1.5.0 # via pytest -pydantic==2.9.2 +pydantic==2.10.3 # via openai -pydantic-core==2.23.4 +pydantic-core==2.27.1 # via pydantic pygments==2.18.0 # via rich diff --git a/requirements.lock b/requirements.lock index 191186945d..c4d8923a70 100644 --- a/requirements.lock +++ b/requirements.lock @@ -30,9 +30,9 @@ httpx==0.25.2 idna==3.4 # via anyio # via httpx -pydantic==2.9.2 +pydantic==2.10.3 # via openai -pydantic-core==2.23.4 +pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio diff --git a/src/openai/_types.py b/src/openai/_types.py index c8f4d5a922..a5cf207aa3 100644 --- a/src/openai/_types.py +++ b/src/openai/_types.py @@ -194,10 +194,8 @@ def get(self, __key: str) -> str | None: ... StrBytesIntFloat = Union[str, bytes, int, float] # Note: copied from Pydantic -# https://github.com/pydantic/pydantic/blob/32ea570bf96e84234d2992e1ddf40ab8a565925a/pydantic/main.py#L49 -IncEx: TypeAlias = Union[ - Set[int], Set[str], Mapping[int, Union["IncEx", Literal[True]]], Mapping[str, Union["IncEx", Literal[True]]] -] +# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79 +IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]] PostParser = Callable[[Any], Any] From b5f43243c307cbeb5e41f8cbafdd0854a89d6fb5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 18:18:18 +0000 Subject: [PATCH 125/192] docs(readme): fix http client proxies example (#1932) --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e37a3ce415..d26a031a98 100644 --- a/README.md +++ b/README.md @@ -468,18 +468,19 @@ can also get all the extra fields on the Pydantic model as a dict with You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including: -- Support for proxies -- Custom transports +- Support for [proxies](https://www.python-httpx.org/advanced/proxies/) +- Custom [transports](https://www.python-httpx.org/advanced/transports/) - Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality ```python +import httpx from openai import OpenAI, DefaultHttpxClient client = OpenAI( # Or use the `OPENAI_BASE_URL` env var base_url="http://my.test.server.example.com:8083/", http_client=DefaultHttpxClient( - proxies="http://my.test.proxy.example.com/", + proxy="http://my.test.proxy.example.com/", transport=httpx.HTTPTransport(local_address="0.0.0.0"), ), ) From 850142e668ff7a4bbc74ac5b249768d1cc67389f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 12:13:43 +0000 Subject: [PATCH 126/192] chore(internal): bump pyright (#1939) --- requirements-dev.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index fb7d64c474..e187358330 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -68,7 +68,7 @@ pydantic-core==2.27.1 # via pydantic pygments==2.18.0 # via rich -pyright==1.1.389 +pyright==1.1.390 pytest==8.3.3 # via pytest-asyncio pytest-asyncio==0.24.0 From 11be09a15474f76a49d38ad49f70a27c3731e950 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 12:40:45 +0000 Subject: [PATCH 127/192] chore(internal): add support for TypeAliasType (#1942) --- pyproject.toml | 2 +- src/openai/_legacy_response.py | 20 ++++++++++---------- src/openai/_models.py | 3 +++ src/openai/_response.py | 20 ++++++++++---------- src/openai/_utils/__init__.py | 1 + src/openai/_utils/_typing.py | 31 ++++++++++++++++++++++++++++++- tests/test_models.py | 18 +++++++++++++++++- tests/utils.py | 4 ++++ 8 files changed, 76 insertions(+), 23 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4e2467bb60..95c9bb0246 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors = [ dependencies = [ "httpx>=0.23.0, <1", "pydantic>=1.9.0, <3", - "typing-extensions>=4.7, <5", + "typing-extensions>=4.10, <5", "anyio>=3.5.0, <5", "distro>=1.7.0, <2", "sniffio", diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index 83a76fe448..b4c8891cfc 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -24,7 +24,7 @@ import pydantic from ._types import NoneType -from ._utils import is_given, extract_type_arg, is_annotated_type +from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type from ._models import BaseModel, is_basemodel from ._constants import RAW_RESPONSE_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type @@ -192,9 +192,15 @@ def elapsed(self) -> datetime.timedelta: return self.http_response.elapsed def _parse(self, *, to: type[_T] | None = None) -> R | _T: + cast_to = to if to is not None else self._cast_to + + # unwrap `TypeAlias('Name', T)` -> `T` + if is_type_alias_type(cast_to): + cast_to = cast_to.__value__ # type: ignore[unreachable] + # unwrap `Annotated[T, ...]` -> `T` - if to and is_annotated_type(to): - to = extract_type_arg(to, 0) + if cast_to and is_annotated_type(cast_to): + cast_to = extract_type_arg(cast_to, 0) if self._stream: if to: @@ -230,18 +236,12 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: return cast( R, stream_cls( - cast_to=self._cast_to, + cast_to=cast_to, response=self.http_response, client=cast(Any, self._client), ), ) - cast_to = to if to is not None else self._cast_to - - # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(cast_to): - cast_to = extract_type_arg(cast_to, 0) - if cast_to is NoneType: return cast(R, None) diff --git a/src/openai/_models.py b/src/openai/_models.py index 6cb469e21d..7a547ce5c4 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -46,6 +46,7 @@ strip_not_given, extract_type_arg, is_annotated_type, + is_type_alias_type, strip_annotated_type, ) from ._compat import ( @@ -428,6 +429,8 @@ def construct_type(*, value: object, type_: object) -> object: # we allow `object` as the input type because otherwise, passing things like # `Literal['value']` will be reported as a type error by type checkers type_ = cast("type[object]", type_) + if is_type_alias_type(type_): + type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` if is_annotated_type(type_): diff --git a/src/openai/_response.py b/src/openai/_response.py index 2c23edf00b..37ec61cc3f 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -25,7 +25,7 @@ import pydantic from ._types import NoneType -from ._utils import is_given, extract_type_arg, is_annotated_type, extract_type_var_from_base +from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type, extract_type_var_from_base from ._models import BaseModel, is_basemodel from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type @@ -126,9 +126,15 @@ def __repr__(self) -> str: ) def _parse(self, *, to: type[_T] | None = None) -> R | _T: + cast_to = to if to is not None else self._cast_to + + # unwrap `TypeAlias('Name', T)` -> `T` + if is_type_alias_type(cast_to): + cast_to = cast_to.__value__ # type: ignore[unreachable] + # unwrap `Annotated[T, ...]` -> `T` - if to and is_annotated_type(to): - to = extract_type_arg(to, 0) + if cast_to and is_annotated_type(cast_to): + cast_to = extract_type_arg(cast_to, 0) if self._is_sse_stream: if to: @@ -164,18 +170,12 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: return cast( R, stream_cls( - cast_to=self._cast_to, + cast_to=cast_to, response=self.http_response, client=cast(Any, self._client), ), ) - cast_to = to if to is not None else self._cast_to - - # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(cast_to): - cast_to = extract_type_arg(cast_to, 0) - if cast_to is NoneType: return cast(R, None) diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index a7cff3c091..d4fda26f3c 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -39,6 +39,7 @@ is_iterable_type as is_iterable_type, is_required_type as is_required_type, is_annotated_type as is_annotated_type, + is_type_alias_type as is_type_alias_type, strip_annotated_type as strip_annotated_type, extract_type_var_from_base as extract_type_var_from_base, ) diff --git a/src/openai/_utils/_typing.py b/src/openai/_utils/_typing.py index c036991f04..278749b147 100644 --- a/src/openai/_utils/_typing.py +++ b/src/openai/_utils/_typing.py @@ -1,8 +1,17 @@ from __future__ import annotations +import sys +import typing +import typing_extensions from typing import Any, TypeVar, Iterable, cast from collections import abc as _c_abc -from typing_extensions import Required, Annotated, get_args, get_origin +from typing_extensions import ( + TypeIs, + Required, + Annotated, + get_args, + get_origin, +) from .._types import InheritsGeneric from .._compat import is_union as _is_union @@ -36,6 +45,26 @@ def is_typevar(typ: type) -> bool: return type(typ) == TypeVar # type: ignore +_TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,) +if sys.version_info >= (3, 12): + _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType) + + +def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]: + """Return whether the provided argument is an instance of `TypeAliasType`. + + ```python + type Int = int + is_type_alias_type(Int) + # > True + Str = TypeAliasType("Str", str) + is_type_alias_type(Str) + # > True + ``` + """ + return isinstance(tp, _TYPE_ALIAS_TYPES) + + # Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]] def strip_annotated_type(typ: type) -> type: if is_required_type(typ) or is_annotated_type(typ): diff --git a/tests/test_models.py b/tests/test_models.py index d2884bcbfa..19a71f13ba 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,7 +1,7 @@ import json from typing import Any, Dict, List, Union, Optional, cast from datetime import datetime, timezone -from typing_extensions import Literal, Annotated +from typing_extensions import Literal, Annotated, TypeAliasType import pytest import pydantic @@ -828,3 +828,19 @@ class B(BaseModel): # if the discriminator details object stays the same between invocations then # we hit the cache assert UnionType.__discriminator__ is discriminator + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +def test_type_alias_type() -> None: + Alias = TypeAliasType("Alias", str) + + class Model(BaseModel): + alias: Alias + union: Union[int, Alias] + + m = construct_type(value={"alias": "foo", "union": "bar"}, type_=Model) + assert isinstance(m, Model) + assert isinstance(m.alias, str) + assert m.alias == "foo" + assert isinstance(m.union, str) + assert m.union == "bar" diff --git a/tests/utils.py b/tests/utils.py index 165f4e5bfd..bb2f861218 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -16,6 +16,7 @@ is_union_type, extract_type_arg, is_annotated_type, + is_type_alias_type, ) from openai._compat import PYDANTIC_V2, field_outer_type, get_model_fields from openai._models import BaseModel @@ -51,6 +52,9 @@ def assert_matches_type( path: list[str], allow_none: bool = False, ) -> None: + if is_type_alias_type(type_): + type_ = type_.__value__ + # unwrap `Annotated[T, ...]` -> `T` if is_annotated_type(type_): type_ = extract_type_arg(type_, 0) From 22ef350affa00be57251f722e196b0137475fbe1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 11:17:39 +0000 Subject: [PATCH 128/192] chore(internal): remove some duplicated imports (#1946) --- src/openai/resources/beta/beta.py | 20 +++++++++---------- src/openai/resources/beta/threads/threads.py | 17 ++++++++-------- .../resources/fine_tuning/fine_tuning.py | 5 ++--- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 78ea0e017f..642ba664ba 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -2,14 +2,6 @@ from __future__ import annotations -from .threads import ( - Threads, - AsyncThreads, - ThreadsWithRawResponse, - AsyncThreadsWithRawResponse, - ThreadsWithStreamingResponse, - AsyncThreadsWithStreamingResponse, -) from ..._compat import cached_property from .assistants import ( Assistants, @@ -20,7 +12,15 @@ AsyncAssistantsWithStreamingResponse, ) from ..._resource import SyncAPIResource, AsyncAPIResource -from .vector_stores import ( +from .threads.threads import ( + Threads, + AsyncThreads, + ThreadsWithRawResponse, + AsyncThreadsWithRawResponse, + ThreadsWithStreamingResponse, + AsyncThreadsWithStreamingResponse, +) +from .vector_stores.vector_stores import ( VectorStores, AsyncVectorStores, VectorStoresWithRawResponse, @@ -28,8 +28,6 @@ VectorStoresWithStreamingResponse, AsyncVectorStoresWithStreamingResponse, ) -from .threads.threads import Threads, AsyncThreads -from .vector_stores.vector_stores import VectorStores, AsyncVectorStores __all__ = ["Beta", "AsyncBeta"] diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 728d375aa6..6d76a70232 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -8,14 +8,6 @@ import httpx from .... import _legacy_response -from .runs import ( - Runs, - AsyncRuns, - RunsWithRawResponse, - AsyncRunsWithRawResponse, - RunsWithStreamingResponse, - AsyncRunsWithStreamingResponse, -) from .messages import ( Messages, AsyncMessages, @@ -30,7 +22,14 @@ maybe_transform, async_maybe_transform, ) -from .runs.runs import Runs, AsyncRuns +from .runs.runs import ( + Runs, + AsyncRuns, + RunsWithRawResponse, + AsyncRunsWithRawResponse, + RunsWithStreamingResponse, + AsyncRunsWithStreamingResponse, +) from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper diff --git a/src/openai/resources/fine_tuning/fine_tuning.py b/src/openai/resources/fine_tuning/fine_tuning.py index c386de3c2a..d2bce87c48 100644 --- a/src/openai/resources/fine_tuning/fine_tuning.py +++ b/src/openai/resources/fine_tuning/fine_tuning.py @@ -2,7 +2,8 @@ from __future__ import annotations -from .jobs import ( +from ..._compat import cached_property +from .jobs.jobs import ( Jobs, AsyncJobs, JobsWithRawResponse, @@ -10,8 +11,6 @@ JobsWithStreamingResponse, AsyncJobsWithStreamingResponse, ) -from ..._compat import cached_property -from .jobs.jobs import Jobs, AsyncJobs from ..._resource import SyncAPIResource, AsyncAPIResource __all__ = ["FineTuning", "AsyncFineTuning"] From 06cac421b9c3f27ccf6efb247bb24059ac4af78a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 14:01:59 +0000 Subject: [PATCH 129/192] chore(internal): updated imports (#1948) --- src/openai/_client.py | 212 +++++++++++++++++++++--------------------- 1 file changed, 104 insertions(+), 108 deletions(-) diff --git a/src/openai/_client.py b/src/openai/_client.py index d3ee6cf0f1..5419e88f06 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -8,7 +8,7 @@ import httpx -from . import resources, _exceptions +from . import _exceptions from ._qs import Querystring from ._types import ( NOT_GIVEN, @@ -25,6 +25,7 @@ get_async_library, ) from ._version import __version__ +from .resources import files, images, models, batches, embeddings, completions, moderations from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import OpenAIError, APIStatusError from ._base_client import ( @@ -32,33 +33,28 @@ SyncAPIClient, AsyncAPIClient, ) +from .resources.beta import beta +from .resources.chat import chat +from .resources.audio import audio +from .resources.uploads import uploads +from .resources.fine_tuning import fine_tuning -__all__ = [ - "Timeout", - "Transport", - "ProxiesTypes", - "RequestOptions", - "resources", - "OpenAI", - "AsyncOpenAI", - "Client", - "AsyncClient", -] +__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"] class OpenAI(SyncAPIClient): - completions: resources.Completions - chat: resources.Chat - embeddings: resources.Embeddings - files: resources.Files - images: resources.Images - audio: resources.Audio - moderations: resources.Moderations - models: resources.Models - fine_tuning: resources.FineTuning - beta: resources.Beta - batches: resources.Batches - uploads: resources.Uploads + completions: completions.Completions + chat: chat.Chat + embeddings: embeddings.Embeddings + files: files.Files + images: images.Images + audio: audio.Audio + moderations: moderations.Moderations + models: models.Models + fine_tuning: fine_tuning.FineTuning + beta: beta.Beta + batches: batches.Batches + uploads: uploads.Uploads with_raw_response: OpenAIWithRawResponse with_streaming_response: OpenAIWithStreamedResponse @@ -133,18 +129,18 @@ def __init__( self._default_stream_cls = Stream - self.completions = resources.Completions(self) - self.chat = resources.Chat(self) - self.embeddings = resources.Embeddings(self) - self.files = resources.Files(self) - self.images = resources.Images(self) - self.audio = resources.Audio(self) - self.moderations = resources.Moderations(self) - self.models = resources.Models(self) - self.fine_tuning = resources.FineTuning(self) - self.beta = resources.Beta(self) - self.batches = resources.Batches(self) - self.uploads = resources.Uploads(self) + self.completions = completions.Completions(self) + self.chat = chat.Chat(self) + self.embeddings = embeddings.Embeddings(self) + self.files = files.Files(self) + self.images = images.Images(self) + self.audio = audio.Audio(self) + self.moderations = moderations.Moderations(self) + self.models = models.Models(self) + self.fine_tuning = fine_tuning.FineTuning(self) + self.beta = beta.Beta(self) + self.batches = batches.Batches(self) + self.uploads = uploads.Uploads(self) self.with_raw_response = OpenAIWithRawResponse(self) self.with_streaming_response = OpenAIWithStreamedResponse(self) @@ -261,18 +257,18 @@ def _make_status_error( class AsyncOpenAI(AsyncAPIClient): - completions: resources.AsyncCompletions - chat: resources.AsyncChat - embeddings: resources.AsyncEmbeddings - files: resources.AsyncFiles - images: resources.AsyncImages - audio: resources.AsyncAudio - moderations: resources.AsyncModerations - models: resources.AsyncModels - fine_tuning: resources.AsyncFineTuning - beta: resources.AsyncBeta - batches: resources.AsyncBatches - uploads: resources.AsyncUploads + completions: completions.AsyncCompletions + chat: chat.AsyncChat + embeddings: embeddings.AsyncEmbeddings + files: files.AsyncFiles + images: images.AsyncImages + audio: audio.AsyncAudio + moderations: moderations.AsyncModerations + models: models.AsyncModels + fine_tuning: fine_tuning.AsyncFineTuning + beta: beta.AsyncBeta + batches: batches.AsyncBatches + uploads: uploads.AsyncUploads with_raw_response: AsyncOpenAIWithRawResponse with_streaming_response: AsyncOpenAIWithStreamedResponse @@ -347,18 +343,18 @@ def __init__( self._default_stream_cls = AsyncStream - self.completions = resources.AsyncCompletions(self) - self.chat = resources.AsyncChat(self) - self.embeddings = resources.AsyncEmbeddings(self) - self.files = resources.AsyncFiles(self) - self.images = resources.AsyncImages(self) - self.audio = resources.AsyncAudio(self) - self.moderations = resources.AsyncModerations(self) - self.models = resources.AsyncModels(self) - self.fine_tuning = resources.AsyncFineTuning(self) - self.beta = resources.AsyncBeta(self) - self.batches = resources.AsyncBatches(self) - self.uploads = resources.AsyncUploads(self) + self.completions = completions.AsyncCompletions(self) + self.chat = chat.AsyncChat(self) + self.embeddings = embeddings.AsyncEmbeddings(self) + self.files = files.AsyncFiles(self) + self.images = images.AsyncImages(self) + self.audio = audio.AsyncAudio(self) + self.moderations = moderations.AsyncModerations(self) + self.models = models.AsyncModels(self) + self.fine_tuning = fine_tuning.AsyncFineTuning(self) + self.beta = beta.AsyncBeta(self) + self.batches = batches.AsyncBatches(self) + self.uploads = uploads.AsyncUploads(self) self.with_raw_response = AsyncOpenAIWithRawResponse(self) self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self) @@ -476,66 +472,66 @@ def _make_status_error( class OpenAIWithRawResponse: def __init__(self, client: OpenAI) -> None: - self.completions = resources.CompletionsWithRawResponse(client.completions) - self.chat = resources.ChatWithRawResponse(client.chat) - self.embeddings = resources.EmbeddingsWithRawResponse(client.embeddings) - self.files = resources.FilesWithRawResponse(client.files) - self.images = resources.ImagesWithRawResponse(client.images) - self.audio = resources.AudioWithRawResponse(client.audio) - self.moderations = resources.ModerationsWithRawResponse(client.moderations) - self.models = resources.ModelsWithRawResponse(client.models) - self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning) - self.beta = resources.BetaWithRawResponse(client.beta) - self.batches = resources.BatchesWithRawResponse(client.batches) - self.uploads = resources.UploadsWithRawResponse(client.uploads) + self.completions = completions.CompletionsWithRawResponse(client.completions) + self.chat = chat.ChatWithRawResponse(client.chat) + self.embeddings = embeddings.EmbeddingsWithRawResponse(client.embeddings) + self.files = files.FilesWithRawResponse(client.files) + self.images = images.ImagesWithRawResponse(client.images) + self.audio = audio.AudioWithRawResponse(client.audio) + self.moderations = moderations.ModerationsWithRawResponse(client.moderations) + self.models = models.ModelsWithRawResponse(client.models) + self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning) + self.beta = beta.BetaWithRawResponse(client.beta) + self.batches = batches.BatchesWithRawResponse(client.batches) + self.uploads = uploads.UploadsWithRawResponse(client.uploads) class AsyncOpenAIWithRawResponse: def __init__(self, client: AsyncOpenAI) -> None: - self.completions = resources.AsyncCompletionsWithRawResponse(client.completions) - self.chat = resources.AsyncChatWithRawResponse(client.chat) - self.embeddings = resources.AsyncEmbeddingsWithRawResponse(client.embeddings) - self.files = resources.AsyncFilesWithRawResponse(client.files) - self.images = resources.AsyncImagesWithRawResponse(client.images) - self.audio = resources.AsyncAudioWithRawResponse(client.audio) - self.moderations = resources.AsyncModerationsWithRawResponse(client.moderations) - self.models = resources.AsyncModelsWithRawResponse(client.models) - self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning) - self.beta = resources.AsyncBetaWithRawResponse(client.beta) - self.batches = resources.AsyncBatchesWithRawResponse(client.batches) - self.uploads = resources.AsyncUploadsWithRawResponse(client.uploads) + self.completions = completions.AsyncCompletionsWithRawResponse(client.completions) + self.chat = chat.AsyncChatWithRawResponse(client.chat) + self.embeddings = embeddings.AsyncEmbeddingsWithRawResponse(client.embeddings) + self.files = files.AsyncFilesWithRawResponse(client.files) + self.images = images.AsyncImagesWithRawResponse(client.images) + self.audio = audio.AsyncAudioWithRawResponse(client.audio) + self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations) + self.models = models.AsyncModelsWithRawResponse(client.models) + self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning) + self.beta = beta.AsyncBetaWithRawResponse(client.beta) + self.batches = batches.AsyncBatchesWithRawResponse(client.batches) + self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads) class OpenAIWithStreamedResponse: def __init__(self, client: OpenAI) -> None: - self.completions = resources.CompletionsWithStreamingResponse(client.completions) - self.chat = resources.ChatWithStreamingResponse(client.chat) - self.embeddings = resources.EmbeddingsWithStreamingResponse(client.embeddings) - self.files = resources.FilesWithStreamingResponse(client.files) - self.images = resources.ImagesWithStreamingResponse(client.images) - self.audio = resources.AudioWithStreamingResponse(client.audio) - self.moderations = resources.ModerationsWithStreamingResponse(client.moderations) - self.models = resources.ModelsWithStreamingResponse(client.models) - self.fine_tuning = resources.FineTuningWithStreamingResponse(client.fine_tuning) - self.beta = resources.BetaWithStreamingResponse(client.beta) - self.batches = resources.BatchesWithStreamingResponse(client.batches) - self.uploads = resources.UploadsWithStreamingResponse(client.uploads) + self.completions = completions.CompletionsWithStreamingResponse(client.completions) + self.chat = chat.ChatWithStreamingResponse(client.chat) + self.embeddings = embeddings.EmbeddingsWithStreamingResponse(client.embeddings) + self.files = files.FilesWithStreamingResponse(client.files) + self.images = images.ImagesWithStreamingResponse(client.images) + self.audio = audio.AudioWithStreamingResponse(client.audio) + self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations) + self.models = models.ModelsWithStreamingResponse(client.models) + self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning) + self.beta = beta.BetaWithStreamingResponse(client.beta) + self.batches = batches.BatchesWithStreamingResponse(client.batches) + self.uploads = uploads.UploadsWithStreamingResponse(client.uploads) class AsyncOpenAIWithStreamedResponse: def __init__(self, client: AsyncOpenAI) -> None: - self.completions = resources.AsyncCompletionsWithStreamingResponse(client.completions) - self.chat = resources.AsyncChatWithStreamingResponse(client.chat) - self.embeddings = resources.AsyncEmbeddingsWithStreamingResponse(client.embeddings) - self.files = resources.AsyncFilesWithStreamingResponse(client.files) - self.images = resources.AsyncImagesWithStreamingResponse(client.images) - self.audio = resources.AsyncAudioWithStreamingResponse(client.audio) - self.moderations = resources.AsyncModerationsWithStreamingResponse(client.moderations) - self.models = resources.AsyncModelsWithStreamingResponse(client.models) - self.fine_tuning = resources.AsyncFineTuningWithStreamingResponse(client.fine_tuning) - self.beta = resources.AsyncBetaWithStreamingResponse(client.beta) - self.batches = resources.AsyncBatchesWithStreamingResponse(client.batches) - self.uploads = resources.AsyncUploadsWithStreamingResponse(client.uploads) + self.completions = completions.AsyncCompletionsWithStreamingResponse(client.completions) + self.chat = chat.AsyncChatWithStreamingResponse(client.chat) + self.embeddings = embeddings.AsyncEmbeddingsWithStreamingResponse(client.embeddings) + self.files = files.AsyncFilesWithStreamingResponse(client.files) + self.images = images.AsyncImagesWithStreamingResponse(client.images) + self.audio = audio.AsyncAudioWithStreamingResponse(client.audio) + self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations) + self.models = models.AsyncModelsWithStreamingResponse(client.models) + self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning) + self.beta = beta.AsyncBetaWithStreamingResponse(client.beta) + self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches) + self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads) Client = OpenAI From ceff94d86ae1f28d476453837d7196b48f1f2048 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:53:17 +0000 Subject: [PATCH 130/192] docs(readme): example snippet for client context manager (#1953) --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index d26a031a98..d8eb0aebf5 100644 --- a/README.md +++ b/README.md @@ -496,6 +496,16 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. +```py +from openai import OpenAI + +with OpenAI() as client: + # make requests here + ... + +# HTTP client is now closed +``` + ## Versioning This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: From bce89988872ec7ffda25526026e5dd38017e0094 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:26:44 +0000 Subject: [PATCH 131/192] chore(internal): fix some typos (#1955) --- tests/test_client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index 7caa8cb319..7751e7d463 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -349,11 +349,11 @@ def test_default_query_option(self) -> None: FinalRequestOptions( method="get", url="/foo", - params={"foo": "baz", "query_param": "overriden"}, + params={"foo": "baz", "query_param": "overridden"}, ) ) url = httpx.URL(request.url) - assert dict(url.params) == {"foo": "baz", "query_param": "overriden"} + assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} def test_request_extra_json(self) -> None: request = self.client._build_request( @@ -1201,11 +1201,11 @@ def test_default_query_option(self) -> None: FinalRequestOptions( method="get", url="/foo", - params={"foo": "baz", "query_param": "overriden"}, + params={"foo": "baz", "query_param": "overridden"}, ) ) url = httpx.URL(request.url) - assert dict(url.params) == {"foo": "baz", "query_param": "overriden"} + assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} def test_request_extra_json(self) -> None: request = self.client._build_request( From b3143fd4baf16d10641e3dfc4ea701a0c3142a3f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:56:47 +0000 Subject: [PATCH 132/192] feat(api): new o1 and GPT-4o models + preference fine-tuning (#1956) learn more here: https://platform.openai.com/docs/changelog --- .stats.yml | 4 +- api.md | 16 + src/openai/resources/beta/beta.py | 32 ++ .../resources/beta/realtime/__init__.py | 33 ++ .../resources/beta/realtime/realtime.py | 102 ++++++ .../resources/beta/realtime/sessions.py | 337 ++++++++++++++++++ src/openai/resources/chat/completions.py | 240 ++++++++----- src/openai/resources/fine_tuning/jobs/jobs.py | 22 +- src/openai/types/beta/realtime/__init__.py | 6 + .../beta/realtime/session_create_params.py | 149 ++++++++ .../beta/realtime/session_create_response.py | 150 ++++++++ src/openai/types/chat/__init__.py | 4 + ...chat_completion_developer_message_param.py | 25 ++ .../chat/chat_completion_message_param.py | 2 + .../chat/chat_completion_reasoning_effort.py | 7 + .../types/chat/completion_create_params.py | 34 +- src/openai/types/chat_model.py | 7 +- .../types/fine_tuning/fine_tuning_job.py | 106 +++++- .../fine_tuning/fine_tuning_job_event.py | 13 + .../types/fine_tuning/job_create_params.py | 94 ++++- tests/api_resources/beta/realtime/__init__.py | 1 + .../beta/realtime/test_sessions.py | 146 ++++++++ tests/api_resources/chat/test_completions.py | 36 +- tests/api_resources/fine_tuning/test_jobs.py | 36 ++ tests/test_client.py | 16 +- 25 files changed, 1471 insertions(+), 147 deletions(-) create mode 100644 src/openai/resources/beta/realtime/__init__.py create mode 100644 src/openai/resources/beta/realtime/realtime.py create mode 100644 src/openai/resources/beta/realtime/sessions.py create mode 100644 src/openai/types/beta/realtime/__init__.py create mode 100644 src/openai/types/beta/realtime/session_create_params.py create mode 100644 src/openai/types/beta/realtime/session_create_response.py create mode 100644 src/openai/types/chat/chat_completion_developer_message_param.py create mode 100644 src/openai/types/chat/chat_completion_reasoning_effort.py create mode 100644 tests/api_resources/beta/realtime/__init__.py create mode 100644 tests/api_resources/beta/realtime/test_sessions.py diff --git a/.stats.yml b/.stats.yml index 3cc042fe0a..e3a0040a5a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e0e0678be19d1118fd796af291822075e40538dba326611e177e9f3dc245a53.yml +configured_endpoints: 69 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-779ea2754025daf5e18eb8ceb203ec321692636bc3a999338556a479178efa6c.yml diff --git a/api.md b/api.md index 3e88faab91..f51089745d 100644 --- a/api.md +++ b/api.md @@ -47,6 +47,7 @@ from openai.types.chat import ( ChatCompletionContentPartInputAudio, ChatCompletionContentPartRefusal, ChatCompletionContentPartText, + ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, @@ -55,6 +56,7 @@ from openai.types.chat import ( ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -234,6 +236,20 @@ Methods: # Beta +## Realtime + +### Sessions + +Types: + +```python +from openai.types.beta.realtime import Session, SessionCreateResponse +``` + +Methods: + +- client.beta.realtime.sessions.create(\*\*params) -> SessionCreateResponse + ## VectorStores Types: diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 642ba664ba..42ea9b88e5 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -20,6 +20,14 @@ ThreadsWithStreamingResponse, AsyncThreadsWithStreamingResponse, ) +from .realtime.realtime import ( + Realtime, + AsyncRealtime, + RealtimeWithRawResponse, + AsyncRealtimeWithRawResponse, + RealtimeWithStreamingResponse, + AsyncRealtimeWithStreamingResponse, +) from .vector_stores.vector_stores import ( VectorStores, AsyncVectorStores, @@ -33,6 +41,10 @@ class Beta(SyncAPIResource): + @cached_property + def realtime(self) -> Realtime: + return Realtime(self._client) + @cached_property def vector_stores(self) -> VectorStores: return VectorStores(self._client) @@ -66,6 +78,10 @@ def with_streaming_response(self) -> BetaWithStreamingResponse: class AsyncBeta(AsyncAPIResource): + @cached_property + def realtime(self) -> AsyncRealtime: + return AsyncRealtime(self._client) + @cached_property def vector_stores(self) -> AsyncVectorStores: return AsyncVectorStores(self._client) @@ -102,6 +118,10 @@ class BetaWithRawResponse: def __init__(self, beta: Beta) -> None: self._beta = beta + @cached_property + def realtime(self) -> RealtimeWithRawResponse: + return RealtimeWithRawResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> VectorStoresWithRawResponse: return VectorStoresWithRawResponse(self._beta.vector_stores) @@ -119,6 +139,10 @@ class AsyncBetaWithRawResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta + @cached_property + def realtime(self) -> AsyncRealtimeWithRawResponse: + return AsyncRealtimeWithRawResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> AsyncVectorStoresWithRawResponse: return AsyncVectorStoresWithRawResponse(self._beta.vector_stores) @@ -136,6 +160,10 @@ class BetaWithStreamingResponse: def __init__(self, beta: Beta) -> None: self._beta = beta + @cached_property + def realtime(self) -> RealtimeWithStreamingResponse: + return RealtimeWithStreamingResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> VectorStoresWithStreamingResponse: return VectorStoresWithStreamingResponse(self._beta.vector_stores) @@ -153,6 +181,10 @@ class AsyncBetaWithStreamingResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta + @cached_property + def realtime(self) -> AsyncRealtimeWithStreamingResponse: + return AsyncRealtimeWithStreamingResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> AsyncVectorStoresWithStreamingResponse: return AsyncVectorStoresWithStreamingResponse(self._beta.vector_stores) diff --git a/src/openai/resources/beta/realtime/__init__.py b/src/openai/resources/beta/realtime/__init__.py new file mode 100644 index 0000000000..474434e6e1 --- /dev/null +++ b/src/openai/resources/beta/realtime/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .realtime import ( + Realtime, + AsyncRealtime, + RealtimeWithRawResponse, + AsyncRealtimeWithRawResponse, + RealtimeWithStreamingResponse, + AsyncRealtimeWithStreamingResponse, +) +from .sessions import ( + Sessions, + AsyncSessions, + SessionsWithRawResponse, + AsyncSessionsWithRawResponse, + SessionsWithStreamingResponse, + AsyncSessionsWithStreamingResponse, +) + +__all__ = [ + "Sessions", + "AsyncSessions", + "SessionsWithRawResponse", + "AsyncSessionsWithRawResponse", + "SessionsWithStreamingResponse", + "AsyncSessionsWithStreamingResponse", + "Realtime", + "AsyncRealtime", + "RealtimeWithRawResponse", + "AsyncRealtimeWithRawResponse", + "RealtimeWithStreamingResponse", + "AsyncRealtimeWithStreamingResponse", +] diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py new file mode 100644 index 0000000000..e57e0be503 --- /dev/null +++ b/src/openai/resources/beta/realtime/realtime.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .sessions import ( + Sessions, + AsyncSessions, + SessionsWithRawResponse, + AsyncSessionsWithRawResponse, + SessionsWithStreamingResponse, + AsyncSessionsWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["Realtime", "AsyncRealtime"] + + +class Realtime(SyncAPIResource): + @cached_property + def sessions(self) -> Sessions: + return Sessions(self._client) + + @cached_property + def with_raw_response(self) -> RealtimeWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return RealtimeWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RealtimeWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return RealtimeWithStreamingResponse(self) + + +class AsyncRealtime(AsyncAPIResource): + @cached_property + def sessions(self) -> AsyncSessions: + return AsyncSessions(self._client) + + @cached_property + def with_raw_response(self) -> AsyncRealtimeWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRealtimeWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncRealtimeWithStreamingResponse(self) + + +class RealtimeWithRawResponse: + def __init__(self, realtime: Realtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> SessionsWithRawResponse: + return SessionsWithRawResponse(self._realtime.sessions) + + +class AsyncRealtimeWithRawResponse: + def __init__(self, realtime: AsyncRealtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> AsyncSessionsWithRawResponse: + return AsyncSessionsWithRawResponse(self._realtime.sessions) + + +class RealtimeWithStreamingResponse: + def __init__(self, realtime: Realtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> SessionsWithStreamingResponse: + return SessionsWithStreamingResponse(self._realtime.sessions) + + +class AsyncRealtimeWithStreamingResponse: + def __init__(self, realtime: AsyncRealtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> AsyncSessionsWithStreamingResponse: + return AsyncSessionsWithStreamingResponse(self._realtime.sessions) diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py new file mode 100644 index 0000000000..1d1ee701e5 --- /dev/null +++ b/src/openai/resources/beta/realtime/sessions.py @@ -0,0 +1,337 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal + +import httpx + +from .... import _legacy_response +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import ( + maybe_transform, + async_maybe_transform, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...._base_client import make_request_options +from ....types.beta.realtime import session_create_params +from ....types.beta.realtime.session_create_response import SessionCreateResponse + +__all__ = ["Sessions", "AsyncSessions"] + + +class Sessions(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SessionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return SessionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SessionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return SessionsWithStreamingResponse(self) + + def create( + self, + *, + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + tool_choice: str | NotGiven = NOT_GIVEN, + tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, + turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SessionCreateResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API. Can be configured with the same session parameters as the + `session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + model: The Realtime model used for this session. + + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + + tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify + a function. + + tools: Tools (functions) available to the model. + + turn_detection: Configuration for turn detection. Can be set to `null` to turn off. Server VAD + means that the model will detect the start and end of speech based on audio + volume and respond at the end of user speech. + + voice: The voice the model uses to respond. Voice cannot be changed during the session + once the model has responded with audio at least once. Current voice options are + `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._post( + "/realtime/sessions", + body=maybe_transform( + { + "model": model, + "input_audio_format": input_audio_format, + "input_audio_transcription": input_audio_transcription, + "instructions": instructions, + "max_response_output_tokens": max_response_output_tokens, + "modalities": modalities, + "output_audio_format": output_audio_format, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "turn_detection": turn_detection, + "voice": voice, + }, + session_create_params.SessionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SessionCreateResponse, + ) + + +class AsyncSessions(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSessionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSessionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncSessionsWithStreamingResponse(self) + + async def create( + self, + *, + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + tool_choice: str | NotGiven = NOT_GIVEN, + tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, + turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SessionCreateResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API. Can be configured with the same session parameters as the + `session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + model: The Realtime model used for this session. + + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + + tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify + a function. + + tools: Tools (functions) available to the model. + + turn_detection: Configuration for turn detection. Can be set to `null` to turn off. Server VAD + means that the model will detect the start and end of speech based on audio + volume and respond at the end of user speech. + + voice: The voice the model uses to respond. Voice cannot be changed during the session + once the model has responded with audio at least once. Current voice options are + `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._post( + "/realtime/sessions", + body=await async_maybe_transform( + { + "model": model, + "input_audio_format": input_audio_format, + "input_audio_transcription": input_audio_transcription, + "instructions": instructions, + "max_response_output_tokens": max_response_output_tokens, + "modalities": modalities, + "output_audio_format": output_audio_format, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "turn_detection": turn_detection, + "voice": voice, + }, + session_create_params.SessionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SessionCreateResponse, + ) + + +class SessionsWithRawResponse: + def __init__(self, sessions: Sessions) -> None: + self._sessions = sessions + + self.create = _legacy_response.to_raw_response_wrapper( + sessions.create, + ) + + +class AsyncSessionsWithRawResponse: + def __init__(self, sessions: AsyncSessions) -> None: + self._sessions = sessions + + self.create = _legacy_response.async_to_raw_response_wrapper( + sessions.create, + ) + + +class SessionsWithStreamingResponse: + def __init__(self, sessions: Sessions) -> None: + self._sessions = sessions + + self.create = to_streamed_response_wrapper( + sessions.create, + ) + + +class AsyncSessionsWithStreamingResponse: + def __init__(self, sessions: AsyncSessions) -> None: + self._sessions = sessions + + self.create = async_to_streamed_response_wrapper( + sessions.create, + ) diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 84e6cf9b72..f13b8c0b45 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -20,6 +20,7 @@ from ..._streaming import Stream, AsyncStream from ...types.chat import ( ChatCompletionAudioParam, + ChatCompletionReasoningEffort, completion_create_params, ) from ..._base_client import make_request_options @@ -30,6 +31,7 @@ from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ...types.chat.chat_completion_message_param import ChatCompletionMessageParam +from ...types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort from ...types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam from ...types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from ...types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam @@ -77,6 +79,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -104,6 +107,12 @@ def create( [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + Args: messages: A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message @@ -124,16 +133,18 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -195,13 +206,14 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -257,9 +269,8 @@ def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -320,6 +331,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -346,6 +358,12 @@ def create( [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + Args: messages: A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message @@ -373,16 +391,18 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -444,13 +464,14 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -499,9 +520,8 @@ def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -562,6 +582,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -588,6 +609,12 @@ def create( [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + Args: messages: A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message @@ -615,16 +642,18 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -686,13 +715,14 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + reasoning_effort: **o1 models only** - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -741,9 +771,8 @@ def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -803,6 +832,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -843,6 +873,7 @@ def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, "response_format": response_format, "seed": seed, "service_tier": service_tier, @@ -908,6 +939,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -935,6 +967,12 @@ async def create( [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + Args: messages: A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message @@ -955,16 +993,18 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -1026,13 +1066,14 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -1088,9 +1129,8 @@ async def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -1151,6 +1191,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -1177,6 +1218,12 @@ async def create( [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + Args: messages: A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message @@ -1204,16 +1251,18 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -1275,13 +1324,14 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -1330,9 +1380,8 @@ async def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -1393,6 +1442,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -1419,6 +1469,12 @@ async def create( [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + Args: messages: A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message @@ -1446,16 +1502,18 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -1517,13 +1575,14 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + reasoning_effort: **o1 models only** - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -1572,9 +1631,8 @@ async def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -1634,6 +1692,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -1674,6 +1733,7 @@ async def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, "response_format": response_format, "seed": seed, "service_tier": service_tier, diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 4024bf79f3..21773fbf96 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -64,6 +64,7 @@ def create( training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + method: job_create_params.Method | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, @@ -96,17 +97,22 @@ def create( your file with the purpose `fine-tune`. The contents of the file should differ depending on if the model uses the - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format, or if the fine-tuning method uses the + [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - hyperparameters: The hyperparameters used for the fine-tuning job. + hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. integrations: A list of integrations to enable for your fine-tuning job. + method: The method used for fine-tuning. + seed: The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. @@ -146,6 +152,7 @@ def create( "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, + "method": method, "seed": seed, "suffix": suffix, "validation_file": validation_file, @@ -355,6 +362,7 @@ async def create( training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + method: job_create_params.Method | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, @@ -387,17 +395,22 @@ async def create( your file with the purpose `fine-tune`. The contents of the file should differ depending on if the model uses the - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format, or if the fine-tuning method uses the + [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - hyperparameters: The hyperparameters used for the fine-tuning job. + hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. integrations: A list of integrations to enable for your fine-tuning job. + method: The method used for fine-tuning. + seed: The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. @@ -437,6 +450,7 @@ async def create( "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, + "method": method, "seed": seed, "suffix": suffix, "validation_file": validation_file, diff --git a/src/openai/types/beta/realtime/__init__.py b/src/openai/types/beta/realtime/__init__.py new file mode 100644 index 0000000000..1c5246db7a --- /dev/null +++ b/src/openai/types/beta/realtime/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .session_create_params import SessionCreateParams as SessionCreateParams +from .session_create_response import SessionCreateResponse as SessionCreateResponse diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py new file mode 100644 index 0000000000..f56f2c5c22 --- /dev/null +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -0,0 +1,149 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["SessionCreateParams", "InputAudioTranscription", "Tool", "TurnDetection"] + + +class SessionCreateParams(TypedDict, total=False): + model: Required[ + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + ] + """The Realtime model used for this session.""" + + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: InputAudioTranscription + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: float + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: str + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Iterable[Tool] + """Tools (functions) available to the model.""" + + turn_detection: TurnDetection + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class InputAudioTranscription(TypedDict, total=False): + model: str + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class Tool(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(TypedDict, total=False): + create_response: bool + """Whether or not to automatically generate a response when VAD is enabled. + + `true` by default. + """ + + prefix_padding_ms: int + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: int + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: float + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: str + """Type of turn detection, only `server_vad` is currently supported.""" diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py new file mode 100644 index 0000000000..31f591b261 --- /dev/null +++ b/src/openai/types/beta/realtime/session_create_response.py @@ -0,0 +1,150 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["SessionCreateResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"] + + +class ClientSecret(BaseModel): + expires_at: Optional[int] = None + """Timestamp for when the token expires. + + Currently, all tokens expire after one minute. + """ + + value: Optional[str] = None + """ + Ephemeral key usable in client environments to authenticate connections to the + Realtime API. Use this in client-side environments rather than a standard API + token, which should only be used server-side. + """ + + +class InputAudioTranscription(BaseModel): + model: Optional[str] = None + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class Tool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(BaseModel): + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[str] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class SessionCreateResponse(BaseModel): + client_secret: Optional[ClientSecret] = None + """Ephemeral key returned by the API.""" + + input_audio_format: Optional[str] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[InputAudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Optional[str] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[Tool]] = None + """Tools (functions) available to the model.""" + + turn_detection: Optional[TurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index ef562a4b94..962dc51da0 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -13,6 +13,7 @@ from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam @@ -28,6 +29,9 @@ from .chat_completion_content_part_text_param import ( ChatCompletionContentPartTextParam as ChatCompletionContentPartTextParam, ) +from .chat_completion_developer_message_param import ( + ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, +) from .chat_completion_message_tool_call_param import ( ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam, ) diff --git a/src/openai/types/chat/chat_completion_developer_message_param.py b/src/openai/types/chat/chat_completion_developer_message_param.py new file mode 100644 index 0000000000..01e4fdb654 --- /dev/null +++ b/src/openai/types/chat/chat_completion_developer_message_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionDeveloperMessageParam"] + + +class ChatCompletionDeveloperMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """The contents of the developer message.""" + + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ diff --git a/src/openai/types/chat/chat_completion_message_param.py b/src/openai/types/chat/chat_completion_message_param.py index ec65d94cae..942da24304 100644 --- a/src/openai/types/chat/chat_completion_message_param.py +++ b/src/openai/types/chat/chat_completion_message_param.py @@ -10,10 +10,12 @@ from .chat_completion_system_message_param import ChatCompletionSystemMessageParam from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam +from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam __all__ = ["ChatCompletionMessageParam"] ChatCompletionMessageParam: TypeAlias = Union[ + ChatCompletionDeveloperMessageParam, ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, diff --git a/src/openai/types/chat/chat_completion_reasoning_effort.py b/src/openai/types/chat/chat_completion_reasoning_effort.py new file mode 100644 index 0000000000..9e7946974a --- /dev/null +++ b/src/openai/types/chat/chat_completion_reasoning_effort.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatCompletionReasoningEffort"] + +ChatCompletionReasoningEffort: TypeAlias = Literal["low", "medium", "high"] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index e838858314..f168ddea6e 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -10,6 +10,7 @@ from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam +from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort from ..shared_params.function_parameters import FunctionParameters from ..shared_params.response_format_text import ResponseFormatText from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam @@ -60,19 +61,21 @@ class CompletionCreateParamsBase(TypedDict, total=False): Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) """ function_call: FunctionCall """Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. """ @@ -164,18 +167,20 @@ class CompletionCreateParamsBase(TypedDict, total=False): Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + """ + + reasoning_effort: ChatCompletionReasoningEffort + """**o1 models only** - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. """ response_format: ResponseFormat """An object specifying the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the @@ -237,9 +242,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. """ tool_choice: ChatCompletionToolChoiceOptionParam diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index 3567a3ba65..e1ac464320 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -5,6 +5,8 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "o1", + "o1-2024-12-17", "o1-preview", "o1-preview-2024-09-12", "o1-mini", @@ -13,10 +15,11 @@ "gpt-4o-2024-11-20", "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", diff --git a/src/openai/types/fine_tuning/fine_tuning_job.py b/src/openai/types/fine_tuning/fine_tuning_job.py index 7ac8792787..f5a11c2107 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job.py +++ b/src/openai/types/fine_tuning/fine_tuning_job.py @@ -6,7 +6,16 @@ from ..._models import BaseModel from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject -__all__ = ["FineTuningJob", "Error", "Hyperparameters"] +__all__ = [ + "FineTuningJob", + "Error", + "Hyperparameters", + "Method", + "MethodDpo", + "MethodDpoHyperparameters", + "MethodSupervised", + "MethodSupervisedHyperparameters", +] class Error(BaseModel): @@ -24,15 +33,96 @@ class Error(BaseModel): class Hyperparameters(BaseModel): - n_epochs: Union[Literal["auto"], int] + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodDpoHyperparameters(BaseModel): + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + beta: Union[Literal["auto"], float, None] = None + """The beta value for the DPO method. + + A higher beta value will increase the weight of the penalty between the policy + and reference model. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None """The number of epochs to train the model for. - An epoch refers to one full cycle through the training dataset. "auto" decides - the optimal number of epochs based on the size of the dataset. If setting the - number manually, we support any number between 1 and 50 epochs. + An epoch refers to one full cycle through the training dataset. """ +class MethodDpo(BaseModel): + hyperparameters: Optional[MethodDpoHyperparameters] = None + """The hyperparameters used for the fine-tuning job.""" + + +class MethodSupervisedHyperparameters(BaseModel): + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodSupervised(BaseModel): + hyperparameters: Optional[MethodSupervisedHyperparameters] = None + """The hyperparameters used for the fine-tuning job.""" + + +class Method(BaseModel): + dpo: Optional[MethodDpo] = None + """Configuration for the DPO fine-tuning method.""" + + supervised: Optional[MethodSupervised] = None + """Configuration for the supervised fine-tuning method.""" + + type: Optional[Literal["supervised", "dpo"]] = None + """The type of method. Is either `supervised` or `dpo`.""" + + class FineTuningJob(BaseModel): id: str """The object identifier, which can be referenced in the API endpoints.""" @@ -61,8 +151,7 @@ class FineTuningJob(BaseModel): hyperparameters: Hyperparameters """The hyperparameters used for the fine-tuning job. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - for more details. + This value will only be returned when running `supervised` jobs. """ model: str @@ -118,3 +207,6 @@ class FineTuningJob(BaseModel): integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None """A list of integrations to enable for this fine-tuning job.""" + + method: Optional[Method] = None + """The method used for fine-tuning.""" diff --git a/src/openai/types/fine_tuning/fine_tuning_job_event.py b/src/openai/types/fine_tuning/fine_tuning_job_event.py index 2d204bb980..1d728bd765 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job_event.py +++ b/src/openai/types/fine_tuning/fine_tuning_job_event.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +import builtins +from typing import Optional from typing_extensions import Literal from ..._models import BaseModel @@ -9,11 +11,22 @@ class FineTuningJobEvent(BaseModel): id: str + """The object identifier.""" created_at: int + """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" level: Literal["info", "warn", "error"] + """The log level of the event.""" message: str + """The message of the event.""" object: Literal["fine_tuning.job.event"] + """The object type, which is always "fine_tuning.job.event".""" + + data: Optional[builtins.object] = None + """The data associated with the event.""" + + type: Optional[Literal["message", "metrics"]] = None + """The type of event.""" diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index 8814229b2e..09c3f8571c 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -5,7 +5,17 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWandb"] +__all__ = [ + "JobCreateParams", + "Hyperparameters", + "Integration", + "IntegrationWandb", + "Method", + "MethodDpo", + "MethodDpoHyperparameters", + "MethodSupervised", + "MethodSupervisedHyperparameters", +] class JobCreateParams(TypedDict, total=False): @@ -26,8 +36,10 @@ class JobCreateParams(TypedDict, total=False): your file with the purpose `fine-tune`. The contents of the file should differ depending on if the model uses the - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format, or if the fine-tuning method uses the + [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) @@ -35,11 +47,17 @@ class JobCreateParams(TypedDict, total=False): """ hyperparameters: Hyperparameters - """The hyperparameters used for the fine-tuning job.""" + """ + The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. + """ integrations: Optional[Iterable[Integration]] """A list of integrations to enable for your fine-tuning job.""" + method: Method + """The method used for fine-tuning.""" + seed: Optional[int] """The seed controls the reproducibility of the job. @@ -134,3 +152,73 @@ class Integration(TypedDict, total=False): can set an explicit display name for your run, add tags to your run, and set a default entity (team, username, etc) to be associated with your run. """ + + +class MethodDpoHyperparameters(TypedDict, total=False): + batch_size: Union[Literal["auto"], int] + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + beta: Union[Literal["auto"], float] + """The beta value for the DPO method. + + A higher beta value will increase the weight of the penalty between the policy + and reference model. + """ + + learning_rate_multiplier: Union[Literal["auto"], float] + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int] + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodDpo(TypedDict, total=False): + hyperparameters: MethodDpoHyperparameters + """The hyperparameters used for the fine-tuning job.""" + + +class MethodSupervisedHyperparameters(TypedDict, total=False): + batch_size: Union[Literal["auto"], int] + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float] + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int] + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodSupervised(TypedDict, total=False): + hyperparameters: MethodSupervisedHyperparameters + """The hyperparameters used for the fine-tuning job.""" + + +class Method(TypedDict, total=False): + dpo: MethodDpo + """Configuration for the DPO fine-tuning method.""" + + supervised: MethodSupervised + """Configuration for the supervised fine-tuning method.""" + + type: Literal["supervised", "dpo"] + """The type of method. Is either `supervised` or `dpo`.""" diff --git a/tests/api_resources/beta/realtime/__init__.py b/tests/api_resources/beta/realtime/__init__.py new file mode 100644 index 0000000000..fd8019a9a1 --- /dev/null +++ b/tests/api_resources/beta/realtime/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py new file mode 100644 index 0000000000..65bfa27572 --- /dev/null +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -0,0 +1,146 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types.beta.realtime import SessionCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") + + +class TestSessions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: OpenAI) -> None: + session = client.beta.realtime.sessions.create( + model="gpt-4o-realtime-preview", + ) + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + session = client.beta.realtime.sessions.create( + model="gpt-4o-realtime-preview", + input_audio_format="pcm16", + input_audio_transcription={"model": "model"}, + instructions="instructions", + max_response_output_tokens=0, + modalities=["text"], + output_audio_format="pcm16", + temperature=0, + tool_choice="tool_choice", + tools=[ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + turn_detection={ + "create_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + "type": "type", + }, + voice="alloy", + ) + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: OpenAI) -> None: + response = client.beta.realtime.sessions.with_raw_response.create( + model="gpt-4o-realtime-preview", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = response.parse() + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: OpenAI) -> None: + with client.beta.realtime.sessions.with_streaming_response.create( + model="gpt-4o-realtime-preview", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = response.parse() + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncSessions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + async def test_method_create(self, async_client: AsyncOpenAI) -> None: + session = await async_client.beta.realtime.sessions.create( + model="gpt-4o-realtime-preview", + ) + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + session = await async_client.beta.realtime.sessions.create( + model="gpt-4o-realtime-preview", + input_audio_format="pcm16", + input_audio_transcription={"model": "model"}, + instructions="instructions", + max_response_output_tokens=0, + modalities=["text"], + output_audio_format="pcm16", + temperature=0, + tool_choice="tool_choice", + tools=[ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + turn_detection={ + "create_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + "type": "type", + }, + voice="alloy", + ) + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.realtime.sessions.with_raw_response.create( + model="gpt-4o-realtime-preview", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = response.parse() + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.realtime.sessions.with_streaming_response.create( + model="gpt-4o-realtime-preview", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = await response.parse() + assert_matches_type(SessionCreateResponse, session, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index d2e786cfe0..523fcc6ed9 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -25,7 +25,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -38,7 +38,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", "name": "name", } ], @@ -69,6 +69,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "type": "content", }, presence_penalty=-2, + reasoning_effort="low", response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", @@ -101,7 +102,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -118,7 +119,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -137,7 +138,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -151,7 +152,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", "name": "name", } ], @@ -183,6 +184,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "type": "content", }, presence_penalty=-2, + reasoning_effort="low", response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", @@ -214,7 +216,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -231,7 +233,7 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -255,7 +257,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -268,7 +270,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn messages=[ { "content": "string", - "role": "system", + "role": "developer", "name": "name", } ], @@ -299,6 +301,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "type": "content", }, presence_penalty=-2, + reasoning_effort="low", response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", @@ -331,7 +334,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -348,7 +351,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -367,7 +370,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -381,7 +384,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn messages=[ { "content": "string", - "role": "system", + "role": "developer", "name": "name", } ], @@ -413,6 +416,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "type": "content", }, presence_penalty=-2, + reasoning_effort="low", response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", @@ -444,7 +448,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -461,7 +465,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index ad218bcb36..050edba367 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -50,6 +50,24 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: }, } ], + method={ + "dpo": { + "hyperparameters": { + "batch_size": "auto", + "beta": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "supervised": { + "hyperparameters": { + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "type": "supervised", + }, seed=42, suffix="x", validation_file="file-abc123", @@ -271,6 +289,24 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> }, } ], + method={ + "dpo": { + "hyperparameters": { + "batch_size": "auto", + "beta": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "supervised": { + "hyperparameters": { + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "type": "supervised", + }, seed=42, suffix="x", validation_file="file-abc123", diff --git a/tests/test_client.py b/tests/test_client.py index 7751e7d463..e0d23403b1 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -795,7 +795,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -827,7 +827,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -859,7 +859,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -891,7 +891,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -1663,7 +1663,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -1696,7 +1696,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -1729,7 +1729,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", @@ -1762,7 +1762,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: messages=[ { "content": "string", - "role": "system", + "role": "developer", } ], model="gpt-4o", From 8141095bfa8713700efdbc6242214b1b19f55d5c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 18:05:17 +0000 Subject: [PATCH 133/192] feat: add Realtime API support (#1958) More information on the Realtime API can be found here: https://platform.openai.com/docs/guides/realtime --- .stats.yml | 2 +- api.md | 51 ++ pyproject.toml | 3 +- requirements-dev.lock | 2 + requirements.lock | 2 + src/openai/_client.py | 26 + .../resources/beta/realtime/realtime.py | 852 ++++++++++++++++++ src/openai/types/__init__.py | 1 + src/openai/types/beta/realtime/__init__.py | 74 ++ .../realtime/conversation_created_event.py | 27 + .../types/beta/realtime/conversation_item.py | 61 ++ .../realtime/conversation_item_content.py | 28 + .../conversation_item_content_param.py | 27 + .../conversation_item_create_event.py | 28 + .../conversation_item_create_event_param.py | 28 + .../conversation_item_created_event.py | 25 + .../conversation_item_delete_event.py | 19 + .../conversation_item_delete_event_param.py | 18 + .../conversation_item_deleted_event.py | 18 + ...put_audio_transcription_completed_event.py | 26 + ..._input_audio_transcription_failed_event.py | 39 + .../beta/realtime/conversation_item_param.py | 62 ++ .../conversation_item_truncate_event.py | 32 + .../conversation_item_truncate_event_param.py | 31 + .../conversation_item_truncated_event.py | 24 + src/openai/types/beta/realtime/error_event.py | 36 + .../input_audio_buffer_append_event.py | 23 + .../input_audio_buffer_append_event_param.py | 22 + .../input_audio_buffer_clear_event.py | 16 + .../input_audio_buffer_clear_event_param.py | 15 + .../input_audio_buffer_cleared_event.py | 15 + .../input_audio_buffer_commit_event.py | 16 + .../input_audio_buffer_commit_event_param.py | 15 + .../input_audio_buffer_committed_event.py | 21 + ...input_audio_buffer_speech_started_event.py | 26 + ...input_audio_buffer_speech_stopped_event.py | 25 + .../realtime/rate_limits_updated_event.py | 33 + .../beta/realtime/realtime_client_event.py | 32 + .../realtime/realtime_client_event_param.py | 30 + .../beta/realtime/realtime_connect_params.py | 11 + .../types/beta/realtime/realtime_response.py | 42 + .../beta/realtime/realtime_response_status.py | 39 + .../beta/realtime/realtime_response_usage.py | 52 ++ .../beta/realtime/realtime_server_event.py | 72 ++ .../realtime/response_audio_delta_event.py | 30 + .../realtime/response_audio_done_event.py | 27 + .../response_audio_transcript_delta_event.py | 30 + .../response_audio_transcript_done_event.py | 30 + .../beta/realtime/response_cancel_event.py | 22 + .../realtime/response_cancel_event_param.py | 21 + .../response_content_part_added_event.py | 45 + .../response_content_part_done_event.py | 45 + .../beta/realtime/response_create_event.py | 115 +++ .../realtime/response_create_event_param.py | 116 +++ .../beta/realtime/response_created_event.py | 19 + .../beta/realtime/response_done_event.py | 19 + ...nse_function_call_arguments_delta_event.py | 30 + ...onse_function_call_arguments_done_event.py | 30 + .../response_output_item_added_event.py | 25 + .../response_output_item_done_event.py | 25 + .../realtime/response_text_delta_event.py | 30 + .../beta/realtime/response_text_done_event.py | 30 + src/openai/types/beta/realtime/session.py | 148 +++ .../beta/realtime/session_created_event.py | 19 + .../beta/realtime/session_update_event.py | 158 ++++ .../realtime/session_update_event_param.py | 166 ++++ .../beta/realtime/session_updated_event.py | 19 + .../types/websocket_connection_options.py | 36 + tests/api_resources/beta/test_realtime.py | 17 + 69 files changed, 3297 insertions(+), 2 deletions(-) create mode 100644 src/openai/types/beta/realtime/conversation_created_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item.py create mode 100644 src/openai/types/beta/realtime/conversation_item_content.py create mode 100644 src/openai/types/beta/realtime/conversation_item_content_param.py create mode 100644 src/openai/types/beta/realtime/conversation_item_create_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item_create_event_param.py create mode 100644 src/openai/types/beta/realtime/conversation_item_created_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item_delete_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item_delete_event_param.py create mode 100644 src/openai/types/beta/realtime/conversation_item_deleted_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item_param.py create mode 100644 src/openai/types/beta/realtime/conversation_item_truncate_event.py create mode 100644 src/openai/types/beta/realtime/conversation_item_truncate_event_param.py create mode 100644 src/openai/types/beta/realtime/conversation_item_truncated_event.py create mode 100644 src/openai/types/beta/realtime/error_event.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_append_event.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_append_event_param.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_clear_event.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_cleared_event.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_commit_event.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_committed_event.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py create mode 100644 src/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py create mode 100644 src/openai/types/beta/realtime/rate_limits_updated_event.py create mode 100644 src/openai/types/beta/realtime/realtime_client_event.py create mode 100644 src/openai/types/beta/realtime/realtime_client_event_param.py create mode 100644 src/openai/types/beta/realtime/realtime_connect_params.py create mode 100644 src/openai/types/beta/realtime/realtime_response.py create mode 100644 src/openai/types/beta/realtime/realtime_response_status.py create mode 100644 src/openai/types/beta/realtime/realtime_response_usage.py create mode 100644 src/openai/types/beta/realtime/realtime_server_event.py create mode 100644 src/openai/types/beta/realtime/response_audio_delta_event.py create mode 100644 src/openai/types/beta/realtime/response_audio_done_event.py create mode 100644 src/openai/types/beta/realtime/response_audio_transcript_delta_event.py create mode 100644 src/openai/types/beta/realtime/response_audio_transcript_done_event.py create mode 100644 src/openai/types/beta/realtime/response_cancel_event.py create mode 100644 src/openai/types/beta/realtime/response_cancel_event_param.py create mode 100644 src/openai/types/beta/realtime/response_content_part_added_event.py create mode 100644 src/openai/types/beta/realtime/response_content_part_done_event.py create mode 100644 src/openai/types/beta/realtime/response_create_event.py create mode 100644 src/openai/types/beta/realtime/response_create_event_param.py create mode 100644 src/openai/types/beta/realtime/response_created_event.py create mode 100644 src/openai/types/beta/realtime/response_done_event.py create mode 100644 src/openai/types/beta/realtime/response_function_call_arguments_delta_event.py create mode 100644 src/openai/types/beta/realtime/response_function_call_arguments_done_event.py create mode 100644 src/openai/types/beta/realtime/response_output_item_added_event.py create mode 100644 src/openai/types/beta/realtime/response_output_item_done_event.py create mode 100644 src/openai/types/beta/realtime/response_text_delta_event.py create mode 100644 src/openai/types/beta/realtime/response_text_done_event.py create mode 100644 src/openai/types/beta/realtime/session.py create mode 100644 src/openai/types/beta/realtime/session_created_event.py create mode 100644 src/openai/types/beta/realtime/session_update_event.py create mode 100644 src/openai/types/beta/realtime/session_update_event_param.py create mode 100644 src/openai/types/beta/realtime/session_updated_event.py create mode 100644 src/openai/types/websocket_connection_options.py create mode 100644 tests/api_resources/beta/test_realtime.py diff --git a/.stats.yml b/.stats.yml index e3a0040a5a..12219ccaa1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-779ea2754025daf5e18eb8ceb203ec321692636bc3a999338556a479178efa6c.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0d64ca9e45f51b4279f87b205eeb3a3576df98407698ce053f2e2302c1c08df1.yml diff --git a/api.md b/api.md index f51089745d..cf3b01cf90 100644 --- a/api.md +++ b/api.md @@ -238,6 +238,57 @@ Methods: ## Realtime +Types: + +```python +from openai.types.beta.realtime import ( + ConversationCreatedEvent, + ConversationItem, + ConversationItemContent, + ConversationItemCreateEvent, + ConversationItemCreatedEvent, + ConversationItemDeleteEvent, + ConversationItemDeletedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemTruncateEvent, + ConversationItemTruncatedEvent, + ErrorEvent, + InputAudioBufferAppendEvent, + InputAudioBufferClearEvent, + InputAudioBufferClearedEvent, + InputAudioBufferCommitEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + RateLimitsUpdatedEvent, + RealtimeClientEvent, + RealtimeResponse, + RealtimeResponseStatus, + RealtimeResponseUsage, + RealtimeServerEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCancelEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreateEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + SessionCreatedEvent, + SessionUpdateEvent, + SessionUpdatedEvent, +) +``` + ### Sessions Types: diff --git a/pyproject.toml b/pyproject.toml index 95c9bb0246..c5c3799356 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,8 @@ classifiers = [ Homepage = "https://github.com/openai/openai-python" Repository = "https://github.com/openai/openai-python" - +[project.optional-dependencies] +realtime = ["websockets >= 13, < 15"] [tool.rye] managed = true diff --git a/requirements-dev.lock b/requirements-dev.lock index e187358330..45cd4c23ef 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -100,5 +100,7 @@ typing-extensions==4.12.2 # via pyright virtualenv==20.24.5 # via nox +websockets==14.1 + # via openai zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index c4d8923a70..0eea0124ed 100644 --- a/requirements.lock +++ b/requirements.lock @@ -43,3 +43,5 @@ typing-extensions==4.12.2 # via openai # via pydantic # via pydantic-core +websockets==14.1 + # via openai diff --git a/src/openai/_client.py b/src/openai/_client.py index 5419e88f06..c784694f20 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -63,6 +63,14 @@ class OpenAI(SyncAPIClient): organization: str | None project: str | None + websocket_base_url: str | httpx.URL | None + """Base URL for WebSocket connections. + + If not specified, the default base URL will be used, with 'wss://' replacing the + 'http://' or 'https://' scheme. For example: 'http://example.com/' becomes + 'wss://example.com' + """ + def __init__( self, *, @@ -70,6 +78,7 @@ def __init__( organization: str | None = None, project: str | None = None, base_url: str | httpx.URL | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -111,6 +120,8 @@ def __init__( project = os.environ.get("OPENAI_PROJECT_ID") self.project = project + self.websocket_base_url = websocket_base_url + if base_url is None: base_url = os.environ.get("OPENAI_BASE_URL") if base_url is None: @@ -172,6 +183,7 @@ def copy( api_key: str | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, @@ -208,6 +220,7 @@ def copy( api_key=api_key or self.api_key, organization=organization or self.organization, project=project or self.project, + websocket_base_url=websocket_base_url or self.websocket_base_url, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, @@ -277,6 +290,14 @@ class AsyncOpenAI(AsyncAPIClient): organization: str | None project: str | None + websocket_base_url: str | httpx.URL | None + """Base URL for WebSocket connections. + + If not specified, the default base URL will be used, with 'wss://' replacing the + 'http://' or 'https://' scheme. For example: 'http://example.com/' becomes + 'wss://example.com' + """ + def __init__( self, *, @@ -284,6 +305,7 @@ def __init__( organization: str | None = None, project: str | None = None, base_url: str | httpx.URL | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -325,6 +347,8 @@ def __init__( project = os.environ.get("OPENAI_PROJECT_ID") self.project = project + self.websocket_base_url = websocket_base_url + if base_url is None: base_url = os.environ.get("OPENAI_BASE_URL") if base_url is None: @@ -386,6 +410,7 @@ def copy( api_key: str | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, @@ -422,6 +447,7 @@ def copy( api_key=api_key or self.api_key, organization=organization or self.organization, project=project or self.project, + websocket_base_url=websocket_base_url or self.websocket_base_url, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index e57e0be503..c79fd46217 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -2,6 +2,15 @@ from __future__ import annotations +import json +import logging +from types import TracebackType +from typing import TYPE_CHECKING, Any, Iterator, cast +from typing_extensions import AsyncIterator + +import httpx +from pydantic import BaseModel + from .sessions import ( Sessions, AsyncSessions, @@ -10,11 +19,34 @@ SessionsWithStreamingResponse, AsyncSessionsWithStreamingResponse, ) +from ...._types import NOT_GIVEN, Query, Headers, NotGiven +from ...._utils import ( + maybe_transform, + strip_not_given, + async_maybe_transform, +) from ...._compat import cached_property +from ...._models import construct_type_unchecked from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._exceptions import OpenAIError +from ...._base_client import _merge_mappings +from ....types.beta.realtime import session_update_event_param, response_create_event_param +from ....types.websocket_connection_options import WebsocketConnectionOptions +from ....types.beta.realtime.realtime_client_event import RealtimeClientEvent +from ....types.beta.realtime.realtime_server_event import RealtimeServerEvent +from ....types.beta.realtime.conversation_item_param import ConversationItemParam +from ....types.beta.realtime.realtime_client_event_param import RealtimeClientEventParam + +if TYPE_CHECKING: + from websockets.sync.client import ClientConnection as WebsocketConnection + from websockets.asyncio.client import ClientConnection as AsyncWebsocketConnection + + from ...._client import OpenAI, AsyncOpenAI __all__ = ["Realtime", "AsyncRealtime"] +log: logging.Logger = logging.getLogger(__name__) + class Realtime(SyncAPIResource): @cached_property @@ -40,6 +72,33 @@ def with_streaming_response(self) -> RealtimeWithStreamingResponse: """ return RealtimeWithStreamingResponse(self) + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> RealtimeConnectionManager: + """ + The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling. + + Some notable benefits of the API include: + + - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output. + - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction. + - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback. + + The Realtime API is a stateful, event-based API that communicates over a WebSocket. + """ + return RealtimeConnectionManager( + client=self._client, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + model=model, + ) + class AsyncRealtime(AsyncAPIResource): @cached_property @@ -65,6 +124,33 @@ def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse: """ return AsyncRealtimeWithStreamingResponse(self) + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> AsyncRealtimeConnectionManager: + """ + The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling. + + Some notable benefits of the API include: + + - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output. + - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction. + - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback. + + The Realtime API is a stateful, event-based API that communicates over a WebSocket. + """ + return AsyncRealtimeConnectionManager( + client=self._client, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + model=model, + ) + class RealtimeWithRawResponse: def __init__(self, realtime: Realtime) -> None: @@ -100,3 +186,769 @@ def __init__(self, realtime: AsyncRealtime) -> None: @cached_property def sessions(self) -> AsyncSessionsWithStreamingResponse: return AsyncSessionsWithStreamingResponse(self._realtime.sessions) + + +class AsyncRealtimeConnection: + """Represents a live websocket connection to the Realtime API""" + + session: AsyncRealtimeSessionResource + response: AsyncRealtimeResponseResource + conversation: AsyncRealtimeConversationResource + input_audio_buffer: AsyncRealtimeInputAudioBufferResource + + _connection: AsyncWebsocketConnection + + def __init__(self, connection: AsyncWebsocketConnection) -> None: + self._connection = connection + + self.session = AsyncRealtimeSessionResource(self) + self.response = AsyncRealtimeResponseResource(self) + self.conversation = AsyncRealtimeConversationResource(self) + self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self) + + async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]: + """ + An infinite-iterator that will continue to yield events until + the connection is closed. + """ + from websockets.exceptions import ConnectionClosedOK + + try: + while True: + yield await self.recv() + except ConnectionClosedOK: + return + + async def recv(self) -> RealtimeServerEvent: + """ + Receive the next message from the connection and parses it into a `RealtimeServerEvent` object. + + Canceling this method is safe. There's no risk of losing data. + """ + return self.parse_event(await self.recv_bytes()) + + async def recv_bytes(self) -> bytes: + """Receive the next message from the connection as raw bytes. + + Canceling this method is safe. There's no risk of losing data. + + If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does, + then you can call `.parse_event(data)`. + """ + message = await self._connection.recv(decode=False) + log.debug(f"Received websocket message: %s", message) + if not isinstance(message, bytes): + # passing `decode=False` should always result in us getting `bytes` back + raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}") + + return message + + async def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None: + data = ( + event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True) + if isinstance(event, BaseModel) + else json.dumps(await async_maybe_transform(event, RealtimeClientEventParam)) + ) + await self._connection.send(data) + + async def close(self, *, code: int = 1000, reason: str = "") -> None: + await self._connection.close(code=code, reason=reason) + + def parse_event(self, data: str | bytes) -> RealtimeServerEvent: + """ + Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object. + + This is helpful if you're using `.recv_bytes()`. + """ + return cast( + RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent)) + ) + + +class AsyncRealtimeConnectionManager: + """ + Context manager over a `AsyncRealtimeConnection` that is returned by `beta.realtime.connect()` + + This context manager ensures that the connection will be closed when it exits. + + --- + + Note that if your application doesn't work well with the context manager approach then you + can call the `.enter()` method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = await client.beta.realtime.connect(...).enter() + # ... + await connection.close() + ``` + """ + + def __init__( + self, + *, + client: AsyncOpenAI, + model: str, + extra_query: Query, + extra_headers: Headers, + websocket_connection_options: WebsocketConnectionOptions, + ) -> None: + self.__client = client + self.__model = model + self.__connection: AsyncRealtimeConnection | None = None + self.__extra_query = extra_query + self.__extra_headers = extra_headers + self.__websocket_connection_options = websocket_connection_options + + async def __aenter__(self) -> AsyncRealtimeConnection: + """ + 👋 If your application doesn't work well with the context manager approach then you + can call this method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = await client.beta.realtime.connect(...).enter() + # ... + await connection.close() + ``` + """ + try: + from websockets.asyncio.client import connect + except ImportError as exc: + raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc + + url = self._prepare_url().copy_with( + params={ + **self.__client.base_url.params, + "model": self.__model, + **self.__extra_query, + }, + ) + log.debug("Connecting to %s", url) + if self.__websocket_connection_options: + log.debug("Connection options: %s", self.__websocket_connection_options) + + self.__connection = AsyncRealtimeConnection( + await connect( + str(url), + user_agent_header=self.__client.user_agent, + additional_headers=_merge_mappings( + { + **self.__client.auth_headers, + "OpenAI-Beta": "realtime=v1", + }, + self.__extra_headers, + ), + **self.__websocket_connection_options, + ) + ) + + return self.__connection + + enter = __aenter__ + + def _prepare_url(http://github.com/self) -> httpx.URL: + if self.__client.websocket_base_url is not None: + base_url = httpx.URL(self.__client.websocket_base_url) + else: + base_url = self.__client._base_url.copy_with(scheme="wss") + + merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime" + return base_url.copy_with(raw_path=merge_raw_path) + + async def __aexit__( + self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None + ) -> None: + if self.__connection is not None: + await self.__connection.close() + + +class RealtimeConnection: + """Represents a live websocket connection to the Realtime API""" + + session: RealtimeSessionResource + response: RealtimeResponseResource + conversation: RealtimeConversationResource + input_audio_buffer: RealtimeInputAudioBufferResource + + _connection: WebsocketConnection + + def __init__(self, connection: WebsocketConnection) -> None: + self._connection = connection + + self.session = RealtimeSessionResource(self) + self.response = RealtimeResponseResource(self) + self.conversation = RealtimeConversationResource(self) + self.input_audio_buffer = RealtimeInputAudioBufferResource(self) + + def __iter__(self) -> Iterator[RealtimeServerEvent]: + """ + An infinite-iterator that will continue to yield events until + the connection is closed. + """ + from websockets.exceptions import ConnectionClosedOK + + try: + while True: + yield self.recv() + except ConnectionClosedOK: + return + + def recv(self) -> RealtimeServerEvent: + """ + Receive the next message from the connection and parses it into a `RealtimeServerEvent` object. + + Canceling this method is safe. There's no risk of losing data. + """ + return self.parse_event(self.recv_bytes()) + + def recv_bytes(self) -> bytes: + """Receive the next message from the connection as raw bytes. + + Canceling this method is safe. There's no risk of losing data. + + If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does, + then you can call `.parse_event(data)`. + """ + message = self._connection.recv(decode=False) + log.debug(f"Received websocket message: %s", message) + if not isinstance(message, bytes): + # passing `decode=False` should always result in us getting `bytes` back + raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}") + + return message + + def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None: + data = ( + event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True) + if isinstance(event, BaseModel) + else json.dumps(maybe_transform(event, RealtimeClientEventParam)) + ) + self._connection.send(data) + + def close(self, *, code: int = 1000, reason: str = "") -> None: + self._connection.close(code=code, reason=reason) + + def parse_event(self, data: str | bytes) -> RealtimeServerEvent: + """ + Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object. + + This is helpful if you're using `.recv_bytes()`. + """ + return cast( + RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent)) + ) + + +class RealtimeConnectionManager: + """ + Context manager over a `RealtimeConnection` that is returned by `beta.realtime.connect()` + + This context manager ensures that the connection will be closed when it exits. + + --- + + Note that if your application doesn't work well with the context manager approach then you + can call the `.enter()` method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = client.beta.realtime.connect(...).enter() + # ... + connection.close() + ``` + """ + + def __init__( + self, + *, + client: OpenAI, + model: str, + extra_query: Query, + extra_headers: Headers, + websocket_connection_options: WebsocketConnectionOptions, + ) -> None: + self.__client = client + self.__model = model + self.__connection: RealtimeConnection | None = None + self.__extra_query = extra_query + self.__extra_headers = extra_headers + self.__websocket_connection_options = websocket_connection_options + + def __enter__(self) -> RealtimeConnection: + """ + 👋 If your application doesn't work well with the context manager approach then you + can call this method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = client.beta.realtime.connect(...).enter() + # ... + connection.close() + ``` + """ + try: + from websockets.sync.client import connect + except ImportError as exc: + raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc + + url = self._prepare_url().copy_with( + params={ + **self.__client.base_url.params, + "model": self.__model, + **self.__extra_query, + }, + ) + log.debug("Connecting to %s", url) + if self.__websocket_connection_options: + log.debug("Connection options: %s", self.__websocket_connection_options) + + self.__connection = RealtimeConnection( + connect( + str(url), + user_agent_header=self.__client.user_agent, + additional_headers=_merge_mappings( + { + **self.__client.auth_headers, + "OpenAI-Beta": "realtime=v1", + }, + self.__extra_headers, + ), + **self.__websocket_connection_options, + ) + ) + + return self.__connection + + enter = __enter__ + + def _prepare_url(http://github.com/self) -> httpx.URL: + if self.__client.websocket_base_url is not None: + base_url = httpx.URL(self.__client.websocket_base_url) + else: + base_url = self.__client._base_url.copy_with(scheme="wss") + + merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime" + return base_url.copy_with(raw_path=merge_raw_path) + + def __exit__( + self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None + ) -> None: + if self.__connection is not None: + self.__connection.close() + + +class BaseRealtimeConnectionResource: + def __init__(self, connection: RealtimeConnection) -> None: + self._connection = connection + + +class RealtimeSessionResource(BaseRealtimeConnectionResource): + def update(self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to update the session’s default configuration. + + The client may + send this event at any time to update the session configuration, and any + field may be updated at any time, except for "voice". The server will respond + with a `session.updated` event that shows the full effective configuration. + Only fields that are present are updated, thus the correct way to clear a + field like "instructions" is to pass an empty string. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "session.update", "session": session, "event_id": event_id}), + ) + ) + + +class RealtimeResponseResource(BaseRealtimeConnectionResource): + def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.cancelled` event or an error if there is no response to + cancel. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}), + ) + ) + + def create( + self, + *, + event_id: str | NotGiven = NOT_GIVEN, + response: response_create_event_param.Response | NotGiven = NOT_GIVEN, + ) -> None: + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions`, and `temperature`. These fields will override the Session's + configuration for this Response only. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.create", "event_id": event_id, "response": response}), + ) + ) + + +class RealtimeConversationResource(BaseRealtimeConnectionResource): + @cached_property + def item(self) -> RealtimeConversationItemResource: + return RealtimeConversationItemResource(self._connection) + + +class RealtimeConversationItemResource(BaseRealtimeConnectionResource): + def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}), + ) + ) + + def create( + self, + *, + item: ConversationItemParam, + event_id: str | NotGiven = NOT_GIVEN, + previous_item_id: str | NotGiven = NOT_GIVEN, + ) -> None: + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.create", + "item": item, + "event_id": event_id, + "previous_item_id": previous_item_id, + } + ), + ) + ) + + def truncate( + self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.truncate", + "audio_end_ms": audio_end_ms, + "content_index": content_index, + "item_id": item_id, + "event_id": event_id, + } + ), + ) + ) + + +class RealtimeInputAudioBufferResource(BaseRealtimeConnectionResource): + def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) + ) + + def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event to commit the user input audio buffer, which will create a + new user message item in the conversation. This event will produce an error + if the input audio buffer is empty. When in Server VAD mode, the client does + not need to send this event, the server will commit the audio buffer + automatically. + + Committing the input audio buffer will trigger input audio transcription + (if enabled in session configuration), but it will not create a response + from the model. The server will respond with an `input_audio_buffer.committed` + event. + """ + self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) + ) + + def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. In Server VAD + mode, the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike made other client events, the server will + not send a confirmation response to this event. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}), + ) + ) + + +class BaseAsyncRealtimeConnectionResource: + def __init__(self, connection: AsyncRealtimeConnection) -> None: + self._connection = connection + + +class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource): + async def update( + self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to update the session’s default configuration. + + The client may + send this event at any time to update the session configuration, and any + field may be updated at any time, except for "voice". The server will respond + with a `session.updated` event that shows the full effective configuration. + Only fields that are present are updated, thus the correct way to clear a + field like "instructions" is to pass an empty string. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "session.update", "session": session, "event_id": event_id}), + ) + ) + + +class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource): + async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.cancelled` event or an error if there is no response to + cancel. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}), + ) + ) + + async def create( + self, + *, + event_id: str | NotGiven = NOT_GIVEN, + response: response_create_event_param.Response | NotGiven = NOT_GIVEN, + ) -> None: + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions`, and `temperature`. These fields will override the Session's + configuration for this Response only. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.create", "event_id": event_id, "response": response}), + ) + ) + + +class AsyncRealtimeConversationResource(BaseAsyncRealtimeConnectionResource): + @cached_property + def item(self) -> AsyncRealtimeConversationItemResource: + return AsyncRealtimeConversationItemResource(self._connection) + + +class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource): + async def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}), + ) + ) + + async def create( + self, + *, + item: ConversationItemParam, + event_id: str | NotGiven = NOT_GIVEN, + previous_item_id: str | NotGiven = NOT_GIVEN, + ) -> None: + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.create", + "item": item, + "event_id": event_id, + "previous_item_id": previous_item_id, + } + ), + ) + ) + + async def truncate( + self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.truncate", + "audio_end_ms": audio_end_ms, + "content_index": content_index, + "item_id": item_id, + "event_id": event_id, + } + ), + ) + ) + + +class AsyncRealtimeInputAudioBufferResource(BaseAsyncRealtimeConnectionResource): + async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + await self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) + ) + + async def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event to commit the user input audio buffer, which will create a + new user message item in the conversation. This event will produce an error + if the input audio buffer is empty. When in Server VAD mode, the client does + not need to send this event, the server will commit the audio buffer + automatically. + + Committing the input audio buffer will trigger input audio transcription + (if enabled in session configuration), but it will not create a response + from the model. The server will respond with an `input_audio_buffer.committed` + event. + """ + await self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) + ) + + async def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. In Server VAD + mode, the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike made other client events, the server will + not send a confirmation response to this event. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}), + ) + ) diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 7677be01b2..72950f2491 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -47,6 +47,7 @@ from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam +from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam diff --git a/src/openai/types/beta/realtime/__init__.py b/src/openai/types/beta/realtime/__init__.py index 1c5246db7a..372d4ec19d 100644 --- a/src/openai/types/beta/realtime/__init__.py +++ b/src/openai/types/beta/realtime/__init__.py @@ -2,5 +2,79 @@ from __future__ import annotations +from .session import Session as Session +from .error_event import ErrorEvent as ErrorEvent +from .conversation_item import ConversationItem as ConversationItem +from .realtime_response import RealtimeResponse as RealtimeResponse +from .response_done_event import ResponseDoneEvent as ResponseDoneEvent +from .session_update_event import SessionUpdateEvent as SessionUpdateEvent +from .realtime_client_event import RealtimeClientEvent as RealtimeClientEvent +from .realtime_server_event import RealtimeServerEvent as RealtimeServerEvent +from .response_cancel_event import ResponseCancelEvent as ResponseCancelEvent +from .response_create_event import ResponseCreateEvent as ResponseCreateEvent from .session_create_params import SessionCreateParams as SessionCreateParams +from .session_created_event import SessionCreatedEvent as SessionCreatedEvent +from .session_updated_event import SessionUpdatedEvent as SessionUpdatedEvent +from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent +from .conversation_item_param import ConversationItemParam as ConversationItemParam +from .realtime_connect_params import RealtimeConnectParams as RealtimeConnectParams +from .realtime_response_usage import RealtimeResponseUsage as RealtimeResponseUsage from .session_create_response import SessionCreateResponse as SessionCreateResponse +from .realtime_response_status import RealtimeResponseStatus as RealtimeResponseStatus +from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent +from .conversation_item_content import ConversationItemContent as ConversationItemContent +from .rate_limits_updated_event import RateLimitsUpdatedEvent as RateLimitsUpdatedEvent +from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent +from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent +from .conversation_created_event import ConversationCreatedEvent as ConversationCreatedEvent +from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent +from .session_update_event_param import SessionUpdateEventParam as SessionUpdateEventParam +from .realtime_client_event_param import RealtimeClientEventParam as RealtimeClientEventParam +from .response_cancel_event_param import ResponseCancelEventParam as ResponseCancelEventParam +from .response_create_event_param import ResponseCreateEventParam as ResponseCreateEventParam +from .conversation_item_create_event import ConversationItemCreateEvent as ConversationItemCreateEvent +from .conversation_item_delete_event import ConversationItemDeleteEvent as ConversationItemDeleteEvent +from .input_audio_buffer_clear_event import InputAudioBufferClearEvent as InputAudioBufferClearEvent +from .conversation_item_content_param import ConversationItemContentParam as ConversationItemContentParam +from .conversation_item_created_event import ConversationItemCreatedEvent as ConversationItemCreatedEvent +from .conversation_item_deleted_event import ConversationItemDeletedEvent as ConversationItemDeletedEvent +from .input_audio_buffer_append_event import InputAudioBufferAppendEvent as InputAudioBufferAppendEvent +from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent as InputAudioBufferCommitEvent +from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent +from .conversation_item_truncate_event import ConversationItemTruncateEvent as ConversationItemTruncateEvent +from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent as InputAudioBufferClearedEvent +from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent +from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent +from .conversation_item_truncated_event import ConversationItemTruncatedEvent as ConversationItemTruncatedEvent +from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent +from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent +from .conversation_item_create_event_param import ConversationItemCreateEventParam as ConversationItemCreateEventParam +from .conversation_item_delete_event_param import ConversationItemDeleteEventParam as ConversationItemDeleteEventParam +from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam as InputAudioBufferClearEventParam +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent +from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam as InputAudioBufferAppendEventParam +from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam as InputAudioBufferCommitEventParam +from .response_audio_transcript_delta_event import ( + ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, +) +from .conversation_item_truncate_event_param import ( + ConversationItemTruncateEventParam as ConversationItemTruncateEventParam, +) +from .input_audio_buffer_speech_started_event import ( + InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, +) +from .input_audio_buffer_speech_stopped_event import ( + InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, +) +from .response_function_call_arguments_done_event import ( + ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, +) +from .response_function_call_arguments_delta_event import ( + ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, +) +from .conversation_item_input_audio_transcription_failed_event import ( + ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, +) +from .conversation_item_input_audio_transcription_completed_event import ( + ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, +) diff --git a/src/openai/types/beta/realtime/conversation_created_event.py b/src/openai/types/beta/realtime/conversation_created_event.py new file mode 100644 index 0000000000..4ba0540867 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_created_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationCreatedEvent", "Conversation"] + + +class Conversation(BaseModel): + id: Optional[str] = None + """The unique ID of the conversation.""" + + object: Optional[Literal["realtime.conversation"]] = None + """The object type, must be `realtime.conversation`.""" + + +class ConversationCreatedEvent(BaseModel): + conversation: Conversation + """The conversation resource.""" + + event_id: str + """The unique ID of the server event.""" + + type: Literal["conversation.created"] + """The event type, must be `conversation.created`.""" diff --git a/src/openai/types/beta/realtime/conversation_item.py b/src/openai/types/beta/realtime/conversation_item.py new file mode 100644 index 0000000000..4edf6c4d5f --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item_content import ConversationItemContent + +__all__ = ["ConversationItem"] + + +class ConversationItem(BaseModel): + id: Optional[str] = None + """ + The unique ID of the item, this can be generated by the client to help manage + server-side context, but is not required because the server will generate one if + not provided. + """ + + arguments: Optional[str] = None + """The arguments of the function call (for `function_call` items).""" + + call_id: Optional[str] = None + """ + The ID of the function call (for `function_call` and `function_call_output` + items). If passed on a `function_call_output` item, the server will check that a + `function_call` item with the same ID exists in the conversation history. + """ + + content: Optional[List[ConversationItemContent]] = None + """The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + - Message items of role `user` support `input_text` and `input_audio` content + - Message items of role `assistant` support `text` content. + """ + + name: Optional[str] = None + """The name of the function being called (for `function_call` items).""" + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`.""" + + output: Optional[str] = None + """The output of the function call (for `function_call_output` items).""" + + role: Optional[Literal["user", "assistant", "system"]] = None + """ + The role of the message sender (`user`, `assistant`, `system`), only applicable + for `message` items. + """ + + status: Optional[Literal["completed", "incomplete"]] = None + """The status of the item (`completed`, `incomplete`). + + These have no effect on the conversation, but are accepted for consistency with + the `conversation.item.created` event. + """ + + type: Optional[Literal["message", "function_call", "function_call_output"]] = None + """The type of the item (`message`, `function_call`, `function_call_output`).""" diff --git a/src/openai/types/beta/realtime/conversation_item_content.py b/src/openai/types/beta/realtime/conversation_item_content.py new file mode 100644 index 0000000000..b854aa0e0f --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_content.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemContent"] + + +class ConversationItemContent(BaseModel): + id: Optional[str] = None + """ + ID of a previous conversation item (like a model response), used for + `item_reference` content types. + """ + + audio: Optional[str] = None + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: Optional[str] = None + """The text content, used for `input_text` and `text` content types.""" + + transcript: Optional[str] = None + """The transcript of the audio, used for `input_audio` content type.""" + + type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" diff --git a/src/openai/types/beta/realtime/conversation_item_content_param.py b/src/openai/types/beta/realtime/conversation_item_content_param.py new file mode 100644 index 0000000000..b354d78971 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_content_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ConversationItemContentParam"] + + +class ConversationItemContentParam(TypedDict, total=False): + id: str + """ + ID of a previous conversation item (like a model response), used for + `item_reference` content types. + """ + + audio: str + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: str + """The text content, used for `input_text` and `text` content types.""" + + transcript: str + """The transcript of the audio, used for `input_audio` content type.""" + + type: Literal["input_text", "input_audio", "item_reference", "text"] + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" diff --git a/src/openai/types/beta/realtime/conversation_item_create_event.py b/src/openai/types/beta/realtime/conversation_item_create_event.py new file mode 100644 index 0000000000..50d309675b --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_create_event.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemCreateEvent"] + + +class ConversationItemCreateEvent(BaseModel): + item: ConversationItem + """The item to add to the conversation.""" + + type: Literal["conversation.item.create"] + """The event type, must be `conversation.item.create`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + previous_item_id: Optional[str] = None + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If + set, it allows an item to be inserted mid-conversation. If the ID cannot be + found, an error will be returned and the item will not be added. + """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event_param.py b/src/openai/types/beta/realtime/conversation_item_create_event_param.py new file mode 100644 index 0000000000..b8c8bbc251 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_create_event_param.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .conversation_item_param import ConversationItemParam + +__all__ = ["ConversationItemCreateEventParam"] + + +class ConversationItemCreateEventParam(TypedDict, total=False): + item: Required[ConversationItemParam] + """The item to add to the conversation.""" + + type: Required[Literal["conversation.item.create"]] + """The event type, must be `conversation.item.create`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + previous_item_id: str + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If + set, it allows an item to be inserted mid-conversation. If the ID cannot be + found, an error will be returned and the item will not be added. + """ diff --git a/src/openai/types/beta/realtime/conversation_item_created_event.py b/src/openai/types/beta/realtime/conversation_item_created_event.py new file mode 100644 index 0000000000..2f20388246 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_created_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemCreatedEvent"] + + +class ConversationItemCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """The item to add to the conversation.""" + + previous_item_id: str + """ + The ID of the preceding item in the Conversation context, allows the client to + understand the order of the conversation. + """ + + type: Literal["conversation.item.created"] + """The event type, must be `conversation.item.created`.""" diff --git a/src/openai/types/beta/realtime/conversation_item_delete_event.py b/src/openai/types/beta/realtime/conversation_item_delete_event.py new file mode 100644 index 0000000000..02ca8250ce --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_delete_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemDeleteEvent"] + + +class ConversationItemDeleteEvent(BaseModel): + item_id: str + """The ID of the item to delete.""" + + type: Literal["conversation.item.delete"] + """The event type, must be `conversation.item.delete`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/conversation_item_delete_event_param.py b/src/openai/types/beta/realtime/conversation_item_delete_event_param.py new file mode 100644 index 0000000000..c3f88d6627 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_delete_event_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConversationItemDeleteEventParam"] + + +class ConversationItemDeleteEventParam(TypedDict, total=False): + item_id: Required[str] + """The ID of the item to delete.""" + + type: Required[Literal["conversation.item.delete"]] + """The event type, must be `conversation.item.delete`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/conversation_item_deleted_event.py b/src/openai/types/beta/realtime/conversation_item_deleted_event.py new file mode 100644 index 0000000000..a35a97817a --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_deleted_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemDeletedEvent"] + + +class ConversationItemDeletedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item that was deleted.""" + + type: Literal["conversation.item.deleted"] + """The event type, must be `conversation.item.deleted`.""" diff --git a/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py b/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py new file mode 100644 index 0000000000..ded79cc0f7 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent"] + + +class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): + content_index: int + """The index of the content part containing the audio.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item containing the audio.""" + + transcript: str + """The transcribed text.""" + + type: Literal["conversation.item.input_audio_transcription.completed"] + """ + The event type, must be `conversation.item.input_audio_transcription.completed`. + """ diff --git a/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py b/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py new file mode 100644 index 0000000000..cecac93e64 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemInputAudioTranscriptionFailedEvent", "Error"] + + +class Error(BaseModel): + code: Optional[str] = None + """Error code, if any.""" + + message: Optional[str] = None + """A human-readable error message.""" + + param: Optional[str] = None + """Parameter related to the error, if any.""" + + type: Optional[str] = None + """The type of error.""" + + +class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel): + content_index: int + """The index of the content part containing the audio.""" + + error: Error + """Details of the transcription error.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item.""" + + type: Literal["conversation.item.input_audio_transcription.failed"] + """The event type, must be `conversation.item.input_audio_transcription.failed`.""" diff --git a/src/openai/types/beta/realtime/conversation_item_param.py b/src/openai/types/beta/realtime/conversation_item_param.py new file mode 100644 index 0000000000..ac0f8431e5 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_param.py @@ -0,0 +1,62 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, TypedDict + +from .conversation_item_content_param import ConversationItemContentParam + +__all__ = ["ConversationItemParam"] + + +class ConversationItemParam(TypedDict, total=False): + id: str + """ + The unique ID of the item, this can be generated by the client to help manage + server-side context, but is not required because the server will generate one if + not provided. + """ + + arguments: str + """The arguments of the function call (for `function_call` items).""" + + call_id: str + """ + The ID of the function call (for `function_call` and `function_call_output` + items). If passed on a `function_call_output` item, the server will check that a + `function_call` item with the same ID exists in the conversation history. + """ + + content: Iterable[ConversationItemContentParam] + """The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + - Message items of role `user` support `input_text` and `input_audio` content + - Message items of role `assistant` support `text` content. + """ + + name: str + """The name of the function being called (for `function_call` items).""" + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`.""" + + output: str + """The output of the function call (for `function_call_output` items).""" + + role: Literal["user", "assistant", "system"] + """ + The role of the message sender (`user`, `assistant`, `system`), only applicable + for `message` items. + """ + + status: Literal["completed", "incomplete"] + """The status of the item (`completed`, `incomplete`). + + These have no effect on the conversation, but are accepted for consistency with + the `conversation.item.created` event. + """ + + type: Literal["message", "function_call", "function_call_output"] + """The type of the item (`message`, `function_call`, `function_call_output`).""" diff --git a/src/openai/types/beta/realtime/conversation_item_truncate_event.py b/src/openai/types/beta/realtime/conversation_item_truncate_event.py new file mode 100644 index 0000000000..cb336bba2c --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_truncate_event.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemTruncateEvent"] + + +class ConversationItemTruncateEvent(BaseModel): + audio_end_ms: int + """Inclusive duration up to which audio is truncated, in milliseconds. + + If the audio_end_ms is greater than the actual audio duration, the server will + respond with an error. + """ + + content_index: int + """The index of the content part to truncate. Set this to 0.""" + + item_id: str + """The ID of the assistant message item to truncate. + + Only assistant message items can be truncated. + """ + + type: Literal["conversation.item.truncate"] + """The event type, must be `conversation.item.truncate`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/conversation_item_truncate_event_param.py b/src/openai/types/beta/realtime/conversation_item_truncate_event_param.py new file mode 100644 index 0000000000..d3ad1e1e25 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_truncate_event_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConversationItemTruncateEventParam"] + + +class ConversationItemTruncateEventParam(TypedDict, total=False): + audio_end_ms: Required[int] + """Inclusive duration up to which audio is truncated, in milliseconds. + + If the audio_end_ms is greater than the actual audio duration, the server will + respond with an error. + """ + + content_index: Required[int] + """The index of the content part to truncate. Set this to 0.""" + + item_id: Required[str] + """The ID of the assistant message item to truncate. + + Only assistant message items can be truncated. + """ + + type: Required[Literal["conversation.item.truncate"]] + """The event type, must be `conversation.item.truncate`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/conversation_item_truncated_event.py b/src/openai/types/beta/realtime/conversation_item_truncated_event.py new file mode 100644 index 0000000000..36368fa28f --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_truncated_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemTruncatedEvent"] + + +class ConversationItemTruncatedEvent(BaseModel): + audio_end_ms: int + """The duration up to which the audio was truncated, in milliseconds.""" + + content_index: int + """The index of the content part that was truncated.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the assistant message item that was truncated.""" + + type: Literal["conversation.item.truncated"] + """The event type, must be `conversation.item.truncated`.""" diff --git a/src/openai/types/beta/realtime/error_event.py b/src/openai/types/beta/realtime/error_event.py new file mode 100644 index 0000000000..e020fc3848 --- /dev/null +++ b/src/openai/types/beta/realtime/error_event.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ErrorEvent", "Error"] + + +class Error(BaseModel): + message: str + """A human-readable error message.""" + + type: str + """The type of error (e.g., "invalid_request_error", "server_error").""" + + code: Optional[str] = None + """Error code, if any.""" + + event_id: Optional[str] = None + """The event_id of the client event that caused the error, if applicable.""" + + param: Optional[str] = None + """Parameter related to the error, if any.""" + + +class ErrorEvent(BaseModel): + error: Error + """Details of the error.""" + + event_id: str + """The unique ID of the server event.""" + + type: Literal["error"] + """The event type, must be `error`.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_append_event.py b/src/openai/types/beta/realtime/input_audio_buffer_append_event.py new file mode 100644 index 0000000000..a253a6488c --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_append_event.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferAppendEvent"] + + +class InputAudioBufferAppendEvent(BaseModel): + audio: str + """Base64-encoded audio bytes. + + This must be in the format specified by the `input_audio_format` field in the + session configuration. + """ + + type: Literal["input_audio_buffer.append"] + """The event type, must be `input_audio_buffer.append`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_append_event_param.py b/src/openai/types/beta/realtime/input_audio_buffer_append_event_param.py new file mode 100644 index 0000000000..3ad0bc737d --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_append_event_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferAppendEventParam"] + + +class InputAudioBufferAppendEventParam(TypedDict, total=False): + audio: Required[str] + """Base64-encoded audio bytes. + + This must be in the format specified by the `input_audio_format` field in the + session configuration. + """ + + type: Required[Literal["input_audio_buffer.append"]] + """The event type, must be `input_audio_buffer.append`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_clear_event.py b/src/openai/types/beta/realtime/input_audio_buffer_clear_event.py new file mode 100644 index 0000000000..b0624d34df --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_clear_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferClearEvent"] + + +class InputAudioBufferClearEvent(BaseModel): + type: Literal["input_audio_buffer.clear"] + """The event type, must be `input_audio_buffer.clear`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py b/src/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py new file mode 100644 index 0000000000..2bd6bc5a02 --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferClearEventParam"] + + +class InputAudioBufferClearEventParam(TypedDict, total=False): + type: Required[Literal["input_audio_buffer.clear"]] + """The event type, must be `input_audio_buffer.clear`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_cleared_event.py b/src/openai/types/beta/realtime/input_audio_buffer_cleared_event.py new file mode 100644 index 0000000000..632e1b94bc --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_cleared_event.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferClearedEvent"] + + +class InputAudioBufferClearedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + type: Literal["input_audio_buffer.cleared"] + """The event type, must be `input_audio_buffer.cleared`.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_commit_event.py b/src/openai/types/beta/realtime/input_audio_buffer_commit_event.py new file mode 100644 index 0000000000..7b6f5e46b7 --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_commit_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferCommitEvent"] + + +class InputAudioBufferCommitEvent(BaseModel): + type: Literal["input_audio_buffer.commit"] + """The event type, must be `input_audio_buffer.commit`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py b/src/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py new file mode 100644 index 0000000000..c9c927ab98 --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferCommitEventParam"] + + +class InputAudioBufferCommitEventParam(TypedDict, total=False): + type: Required[Literal["input_audio_buffer.commit"]] + """The event type, must be `input_audio_buffer.commit`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py b/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py new file mode 100644 index 0000000000..3071eff357 --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferCommittedEvent"] + + +class InputAudioBufferCommittedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created.""" + + previous_item_id: str + """The ID of the preceding item after which the new item will be inserted.""" + + type: Literal["input_audio_buffer.committed"] + """The event type, must be `input_audio_buffer.committed`.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py b/src/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py new file mode 100644 index 0000000000..4f3ab082c4 --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferSpeechStartedEvent"] + + +class InputAudioBufferSpeechStartedEvent(BaseModel): + audio_start_ms: int + """ + Milliseconds from the start of all audio written to the buffer during the + session when speech was first detected. This will correspond to the beginning of + audio sent to the model, and thus includes the `prefix_padding_ms` configured in + the Session. + """ + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created when speech stops.""" + + type: Literal["input_audio_buffer.speech_started"] + """The event type, must be `input_audio_buffer.speech_started`.""" diff --git a/src/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py b/src/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py new file mode 100644 index 0000000000..40568170f2 --- /dev/null +++ b/src/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferSpeechStoppedEvent"] + + +class InputAudioBufferSpeechStoppedEvent(BaseModel): + audio_end_ms: int + """Milliseconds since the session started when speech stopped. + + This will correspond to the end of audio sent to the model, and thus includes + the `min_silence_duration_ms` configured in the Session. + """ + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created.""" + + type: Literal["input_audio_buffer.speech_stopped"] + """The event type, must be `input_audio_buffer.speech_stopped`.""" diff --git a/src/openai/types/beta/realtime/rate_limits_updated_event.py b/src/openai/types/beta/realtime/rate_limits_updated_event.py new file mode 100644 index 0000000000..7e12283c46 --- /dev/null +++ b/src/openai/types/beta/realtime/rate_limits_updated_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RateLimitsUpdatedEvent", "RateLimit"] + + +class RateLimit(BaseModel): + limit: Optional[int] = None + """The maximum allowed value for the rate limit.""" + + name: Optional[Literal["requests", "tokens"]] = None + """The name of the rate limit (`requests`, `tokens`).""" + + remaining: Optional[int] = None + """The remaining value before the limit is reached.""" + + reset_seconds: Optional[float] = None + """Seconds until the rate limit resets.""" + + +class RateLimitsUpdatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + rate_limits: List[RateLimit] + """List of rate limit information.""" + + type: Literal["rate_limits.updated"] + """The event type, must be `rate_limits.updated`.""" diff --git a/src/openai/types/beta/realtime/realtime_client_event.py b/src/openai/types/beta/realtime/realtime_client_event.py new file mode 100644 index 0000000000..0769184cd0 --- /dev/null +++ b/src/openai/types/beta/realtime/realtime_client_event.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ...._utils import PropertyInfo +from .session_update_event import SessionUpdateEvent +from .response_cancel_event import ResponseCancelEvent +from .response_create_event import ResponseCreateEvent +from .conversation_item_create_event import ConversationItemCreateEvent +from .conversation_item_delete_event import ConversationItemDeleteEvent +from .input_audio_buffer_clear_event import InputAudioBufferClearEvent +from .input_audio_buffer_append_event import InputAudioBufferAppendEvent +from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent +from .conversation_item_truncate_event import ConversationItemTruncateEvent + +__all__ = ["RealtimeClientEvent"] + +RealtimeClientEvent: TypeAlias = Annotated[ + Union[ + SessionUpdateEvent, + InputAudioBufferAppendEvent, + InputAudioBufferCommitEvent, + InputAudioBufferClearEvent, + ConversationItemCreateEvent, + ConversationItemTruncateEvent, + ConversationItemDeleteEvent, + ResponseCreateEvent, + ResponseCancelEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/beta/realtime/realtime_client_event_param.py b/src/openai/types/beta/realtime/realtime_client_event_param.py new file mode 100644 index 0000000000..4020892c33 --- /dev/null +++ b/src/openai/types/beta/realtime/realtime_client_event_param.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .session_update_event_param import SessionUpdateEventParam +from .response_cancel_event_param import ResponseCancelEventParam +from .response_create_event_param import ResponseCreateEventParam +from .conversation_item_create_event_param import ConversationItemCreateEventParam +from .conversation_item_delete_event_param import ConversationItemDeleteEventParam +from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam +from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam +from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam +from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam + +__all__ = ["RealtimeClientEventParam"] + +RealtimeClientEventParam: TypeAlias = Union[ + SessionUpdateEventParam, + InputAudioBufferAppendEventParam, + InputAudioBufferCommitEventParam, + InputAudioBufferClearEventParam, + ConversationItemCreateEventParam, + ConversationItemTruncateEventParam, + ConversationItemDeleteEventParam, + ResponseCreateEventParam, + ResponseCancelEventParam, +] diff --git a/src/openai/types/beta/realtime/realtime_connect_params.py b/src/openai/types/beta/realtime/realtime_connect_params.py new file mode 100644 index 0000000000..76474f3de4 --- /dev/null +++ b/src/openai/types/beta/realtime/realtime_connect_params.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["RealtimeConnectParams"] + + +class RealtimeConnectParams(TypedDict, total=False): + model: Required[str] diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py new file mode 100644 index 0000000000..3e1b1406c0 --- /dev/null +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem +from .realtime_response_usage import RealtimeResponseUsage +from .realtime_response_status import RealtimeResponseStatus + +__all__ = ["RealtimeResponse"] + + +class RealtimeResponse(BaseModel): + id: Optional[str] = None + """The unique ID of the response.""" + + metadata: Optional[object] = None + """Developer-provided string key-value pairs associated with this response.""" + + object: Optional[Literal["realtime.response"]] = None + """The object type, must be `realtime.response`.""" + + output: Optional[List[ConversationItem]] = None + """The list of output items generated by the response.""" + + status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None + """ + The final status of the response (`completed`, `cancelled`, `failed`, or + `incomplete`). + """ + + status_details: Optional[RealtimeResponseStatus] = None + """Additional details about the status.""" + + usage: Optional[RealtimeResponseUsage] = None + """Usage statistics for the Response, this will correspond to billing. + + A Realtime API session will maintain a conversation context and append new Items + to the Conversation, thus output from previous turns (text and audio tokens) + will become the input for later turns. + """ diff --git a/src/openai/types/beta/realtime/realtime_response_status.py b/src/openai/types/beta/realtime/realtime_response_status.py new file mode 100644 index 0000000000..7189cd58a1 --- /dev/null +++ b/src/openai/types/beta/realtime/realtime_response_status.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RealtimeResponseStatus", "Error"] + + +class Error(BaseModel): + code: Optional[str] = None + """Error code, if any.""" + + type: Optional[str] = None + """The type of error.""" + + +class RealtimeResponseStatus(BaseModel): + error: Optional[Error] = None + """ + A description of the error that caused the response to fail, populated when the + `status` is `failed`. + """ + + reason: Optional[Literal["turn_detected", "client_cancelled", "max_output_tokens", "content_filter"]] = None + """The reason the Response did not complete. + + For a `cancelled` Response, one of `turn_detected` (the server VAD detected a + new start of speech) or `client_cancelled` (the client sent a cancel event). For + an `incomplete` Response, one of `max_output_tokens` or `content_filter` (the + server-side safety filter activated and cut off the response). + """ + + type: Optional[Literal["completed", "cancelled", "incomplete", "failed"]] = None + """ + The type of error that caused the response to fail, corresponding with the + `status` field (`completed`, `cancelled`, `incomplete`, `failed`). + """ diff --git a/src/openai/types/beta/realtime/realtime_response_usage.py b/src/openai/types/beta/realtime/realtime_response_usage.py new file mode 100644 index 0000000000..7ca822e25e --- /dev/null +++ b/src/openai/types/beta/realtime/realtime_response_usage.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["RealtimeResponseUsage", "InputTokenDetails", "OutputTokenDetails"] + + +class InputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """The number of audio tokens used in the Response.""" + + cached_tokens: Optional[int] = None + """The number of cached tokens used in the Response.""" + + text_tokens: Optional[int] = None + """The number of text tokens used in the Response.""" + + +class OutputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """The number of audio tokens used in the Response.""" + + text_tokens: Optional[int] = None + """The number of text tokens used in the Response.""" + + +class RealtimeResponseUsage(BaseModel): + input_token_details: Optional[InputTokenDetails] = None + """Details about the input tokens used in the Response.""" + + input_tokens: Optional[int] = None + """ + The number of input tokens used in the Response, including text and audio + tokens. + """ + + output_token_details: Optional[OutputTokenDetails] = None + """Details about the output tokens used in the Response.""" + + output_tokens: Optional[int] = None + """ + The number of output tokens sent in the Response, including text and audio + tokens. + """ + + total_tokens: Optional[int] = None + """ + The total number of tokens in the Response including input and output text and + audio tokens. + """ diff --git a/src/openai/types/beta/realtime/realtime_server_event.py b/src/openai/types/beta/realtime/realtime_server_event.py new file mode 100644 index 0000000000..5f8ed55b13 --- /dev/null +++ b/src/openai/types/beta/realtime/realtime_server_event.py @@ -0,0 +1,72 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ...._utils import PropertyInfo +from .error_event import ErrorEvent +from .response_done_event import ResponseDoneEvent +from .session_created_event import SessionCreatedEvent +from .session_updated_event import SessionUpdatedEvent +from .response_created_event import ResponseCreatedEvent +from .response_text_done_event import ResponseTextDoneEvent +from .rate_limits_updated_event import RateLimitsUpdatedEvent +from .response_audio_done_event import ResponseAudioDoneEvent +from .response_text_delta_event import ResponseTextDeltaEvent +from .conversation_created_event import ConversationCreatedEvent +from .response_audio_delta_event import ResponseAudioDeltaEvent +from .conversation_item_created_event import ConversationItemCreatedEvent +from .conversation_item_deleted_event import ConversationItemDeletedEvent +from .response_output_item_done_event import ResponseOutputItemDoneEvent +from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent +from .response_content_part_done_event import ResponseContentPartDoneEvent +from .response_output_item_added_event import ResponseOutputItemAddedEvent +from .conversation_item_truncated_event import ConversationItemTruncatedEvent +from .response_content_part_added_event import ResponseContentPartAddedEvent +from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent +from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent +from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent +from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent +from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent +from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent +from .conversation_item_input_audio_transcription_failed_event import ConversationItemInputAudioTranscriptionFailedEvent +from .conversation_item_input_audio_transcription_completed_event import ( + ConversationItemInputAudioTranscriptionCompletedEvent, +) + +__all__ = ["RealtimeServerEvent"] + +RealtimeServerEvent: TypeAlias = Annotated[ + Union[ + ErrorEvent, + SessionCreatedEvent, + SessionUpdatedEvent, + ConversationCreatedEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferClearedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + ConversationItemCreatedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemTruncatedEvent, + ConversationItemDeletedEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + RateLimitsUpdatedEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/beta/realtime/response_audio_delta_event.py b/src/openai/types/beta/realtime/response_audio_delta_event.py new file mode 100644 index 0000000000..8e0128d942 --- /dev/null +++ b/src/openai/types/beta/realtime/response_audio_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioDeltaEvent"] + + +class ResponseAudioDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """Base64-encoded audio data delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.audio.delta"] + """The event type, must be `response.audio.delta`.""" diff --git a/src/openai/types/beta/realtime/response_audio_done_event.py b/src/openai/types/beta/realtime/response_audio_done_event.py new file mode 100644 index 0000000000..68e78bc778 --- /dev/null +++ b/src/openai/types/beta/realtime/response_audio_done_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioDoneEvent"] + + +class ResponseAudioDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.audio.done"] + """The event type, must be `response.audio.done`.""" diff --git a/src/openai/types/beta/realtime/response_audio_transcript_delta_event.py b/src/openai/types/beta/realtime/response_audio_transcript_delta_event.py new file mode 100644 index 0000000000..3609948d10 --- /dev/null +++ b/src/openai/types/beta/realtime/response_audio_transcript_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDeltaEvent"] + + +class ResponseAudioTranscriptDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """The transcript delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.audio_transcript.delta"] + """The event type, must be `response.audio_transcript.delta`.""" diff --git a/src/openai/types/beta/realtime/response_audio_transcript_done_event.py b/src/openai/types/beta/realtime/response_audio_transcript_done_event.py new file mode 100644 index 0000000000..4e4436a95f --- /dev/null +++ b/src/openai/types/beta/realtime/response_audio_transcript_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDoneEvent"] + + +class ResponseAudioTranscriptDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + transcript: str + """The final transcript of the audio.""" + + type: Literal["response.audio_transcript.done"] + """The event type, must be `response.audio_transcript.done`.""" diff --git a/src/openai/types/beta/realtime/response_cancel_event.py b/src/openai/types/beta/realtime/response_cancel_event.py new file mode 100644 index 0000000000..c5ff991e9a --- /dev/null +++ b/src/openai/types/beta/realtime/response_cancel_event.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseCancelEvent"] + + +class ResponseCancelEvent(BaseModel): + type: Literal["response.cancel"] + """The event type, must be `response.cancel`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + response_id: Optional[str] = None + """ + A specific response ID to cancel - if not provided, will cancel an in-progress + response in the default conversation. + """ diff --git a/src/openai/types/beta/realtime/response_cancel_event_param.py b/src/openai/types/beta/realtime/response_cancel_event_param.py new file mode 100644 index 0000000000..f33740730a --- /dev/null +++ b/src/openai/types/beta/realtime/response_cancel_event_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCancelEventParam"] + + +class ResponseCancelEventParam(TypedDict, total=False): + type: Required[Literal["response.cancel"]] + """The event type, must be `response.cancel`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + response_id: str + """ + A specific response ID to cancel - if not provided, will cancel an in-progress + response in the default conversation. + """ diff --git a/src/openai/types/beta/realtime/response_content_part_added_event.py b/src/openai/types/beta/realtime/response_content_part_added_event.py new file mode 100644 index 0000000000..45c8f20f97 --- /dev/null +++ b/src/openai/types/beta/realtime/response_content_part_added_event.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseContentPartAddedEvent", "Part"] + + +class Part(BaseModel): + audio: Optional[str] = None + """Base64-encoded audio data (if type is "audio").""" + + text: Optional[str] = None + """The text content (if type is "text").""" + + transcript: Optional[str] = None + """The transcript of the audio (if type is "audio").""" + + type: Optional[Literal["text", "audio"]] = None + """The content type ("text", "audio").""" + + +class ResponseContentPartAddedEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item to which the content part was added.""" + + output_index: int + """The index of the output item in the response.""" + + part: Part + """The content part that was added.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.content_part.added"] + """The event type, must be `response.content_part.added`.""" diff --git a/src/openai/types/beta/realtime/response_content_part_done_event.py b/src/openai/types/beta/realtime/response_content_part_done_event.py new file mode 100644 index 0000000000..3d16116106 --- /dev/null +++ b/src/openai/types/beta/realtime/response_content_part_done_event.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseContentPartDoneEvent", "Part"] + + +class Part(BaseModel): + audio: Optional[str] = None + """Base64-encoded audio data (if type is "audio").""" + + text: Optional[str] = None + """The text content (if type is "text").""" + + transcript: Optional[str] = None + """The transcript of the audio (if type is "audio").""" + + type: Optional[Literal["text", "audio"]] = None + """The content type ("text", "audio").""" + + +class ResponseContentPartDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + part: Part + """The content part that is done.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.content_part.done"] + """The event type, must be `response.content_part.done`.""" diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py new file mode 100644 index 0000000000..00ba1e5dad --- /dev/null +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -0,0 +1,115 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseCreateEvent", "Response", "ResponseTool"] + + +class ResponseTool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class Response(BaseModel): + conversation: Union[str, Literal["auto", "none"], None] = None + """Controls which conversation the response is added to. + + Currently supports `auto` and `none`, with `auto` as the default value. The + `auto` value means that the contents of the response will be added to the + default conversation. Set this to `none` to create an out-of-band response which + will not add items to default conversation. + """ + + input: Optional[List[ConversationItem]] = None + """Input items to include in the prompt for the model. + + Creates a new context for this response, without including the default + conversation. Can include references to items from the default conversation. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + metadata: Optional[object] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format. Keys can be a maximum of 64 characters long and values can be + a maximum of 512 characters long. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[ResponseTool]] = None + """Tools (functions) available to the model.""" + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class ResponseCreateEvent(BaseModel): + type: Literal["response.create"] + """The event type, must be `response.create`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + response: Optional[Response] = None + """Create a new Realtime response with these parameters""" diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py new file mode 100644 index 0000000000..7c92b32df1 --- /dev/null +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -0,0 +1,116 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from .conversation_item_param import ConversationItemParam + +__all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"] + + +class ResponseTool(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" + + +class Response(TypedDict, total=False): + conversation: Union[str, Literal["auto", "none"]] + """Controls which conversation the response is added to. + + Currently supports `auto` and `none`, with `auto` as the default value. The + `auto` value means that the contents of the response will be added to the + default conversation. Set this to `none` to create an out-of-band response which + will not add items to default conversation. + """ + + input: Iterable[ConversationItemParam] + """Input items to include in the prompt for the model. + + Creates a new context for this response, without including the default + conversation. Can include references to items from the default conversation. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + metadata: Optional[object] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format. Keys can be a maximum of 64 characters long and values can be + a maximum of 512 characters long. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: float + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: str + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Iterable[ResponseTool] + """Tools (functions) available to the model.""" + + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class ResponseCreateEventParam(TypedDict, total=False): + type: Required[Literal["response.create"]] + """The event type, must be `response.create`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + response: Response + """Create a new Realtime response with these parameters""" diff --git a/src/openai/types/beta/realtime/response_created_event.py b/src/openai/types/beta/realtime/response_created_event.py new file mode 100644 index 0000000000..a4990cf095 --- /dev/null +++ b/src/openai/types/beta/realtime/response_created_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .realtime_response import RealtimeResponse + +__all__ = ["ResponseCreatedEvent"] + + +class ResponseCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response: RealtimeResponse + """The response resource.""" + + type: Literal["response.created"] + """The event type, must be `response.created`.""" diff --git a/src/openai/types/beta/realtime/response_done_event.py b/src/openai/types/beta/realtime/response_done_event.py new file mode 100644 index 0000000000..9e655184b6 --- /dev/null +++ b/src/openai/types/beta/realtime/response_done_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .realtime_response import RealtimeResponse + +__all__ = ["ResponseDoneEvent"] + + +class ResponseDoneEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response: RealtimeResponse + """The response resource.""" + + type: Literal["response.done"] + """The event type, must be `response.done`.""" diff --git a/src/openai/types/beta/realtime/response_function_call_arguments_delta_event.py b/src/openai/types/beta/realtime/response_function_call_arguments_delta_event.py new file mode 100644 index 0000000000..cdbb64e658 --- /dev/null +++ b/src/openai/types/beta/realtime/response_function_call_arguments_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"] + + +class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + call_id: str + """The ID of the function call.""" + + delta: str + """The arguments delta as a JSON string.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the function call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.function_call_arguments.delta"] + """The event type, must be `response.function_call_arguments.delta`.""" diff --git a/src/openai/types/beta/realtime/response_function_call_arguments_done_event.py b/src/openai/types/beta/realtime/response_function_call_arguments_done_event.py new file mode 100644 index 0000000000..0a5db53323 --- /dev/null +++ b/src/openai/types/beta/realtime/response_function_call_arguments_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDoneEvent"] + + +class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + arguments: str + """The final arguments as a JSON string.""" + + call_id: str + """The ID of the function call.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the function call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.function_call_arguments.done"] + """The event type, must be `response.function_call_arguments.done`.""" diff --git a/src/openai/types/beta/realtime/response_output_item_added_event.py b/src/openai/types/beta/realtime/response_output_item_added_event.py new file mode 100644 index 0000000000..c89bfdc3be --- /dev/null +++ b/src/openai/types/beta/realtime/response_output_item_added_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseOutputItemAddedEvent"] + + +class ResponseOutputItemAddedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """The item to add to the conversation.""" + + output_index: int + """The index of the output item in the Response.""" + + response_id: str + """The ID of the Response to which the item belongs.""" + + type: Literal["response.output_item.added"] + """The event type, must be `response.output_item.added`.""" diff --git a/src/openai/types/beta/realtime/response_output_item_done_event.py b/src/openai/types/beta/realtime/response_output_item_done_event.py new file mode 100644 index 0000000000..b5910e22aa --- /dev/null +++ b/src/openai/types/beta/realtime/response_output_item_done_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseOutputItemDoneEvent"] + + +class ResponseOutputItemDoneEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """The item to add to the conversation.""" + + output_index: int + """The index of the output item in the Response.""" + + response_id: str + """The ID of the Response to which the item belongs.""" + + type: Literal["response.output_item.done"] + """The event type, must be `response.output_item.done`.""" diff --git a/src/openai/types/beta/realtime/response_text_delta_event.py b/src/openai/types/beta/realtime/response_text_delta_event.py new file mode 100644 index 0000000000..c463b3c3d0 --- /dev/null +++ b/src/openai/types/beta/realtime/response_text_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseTextDeltaEvent"] + + +class ResponseTextDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """The text delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.text.delta"] + """The event type, must be `response.text.delta`.""" diff --git a/src/openai/types/beta/realtime/response_text_done_event.py b/src/openai/types/beta/realtime/response_text_done_event.py new file mode 100644 index 0000000000..020ff41d58 --- /dev/null +++ b/src/openai/types/beta/realtime/response_text_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseTextDoneEvent"] + + +class ResponseTextDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + text: str + """The final text content.""" + + type: Literal["response.text.done"] + """The event type, must be `response.text.done`.""" diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py new file mode 100644 index 0000000000..09cdbb02bc --- /dev/null +++ b/src/openai/types/beta/realtime/session.py @@ -0,0 +1,148 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["Session", "InputAudioTranscription", "Tool", "TurnDetection"] + + +class InputAudioTranscription(BaseModel): + model: Optional[str] = None + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class Tool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(BaseModel): + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[Literal["server_vad"]] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class Session(BaseModel): + id: Optional[str] = None + """Unique identifier for the session object.""" + + input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[InputAudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + model: Union[ + str, + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + None, + ] = None + """The Realtime model used for this session.""" + + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[Tool]] = None + """Tools (functions) available to the model.""" + + turn_detection: Optional[TurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ diff --git a/src/openai/types/beta/realtime/session_created_event.py b/src/openai/types/beta/realtime/session_created_event.py new file mode 100644 index 0000000000..baf6af388b --- /dev/null +++ b/src/openai/types/beta/realtime/session_created_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .session import Session +from ...._models import BaseModel + +__all__ = ["SessionCreatedEvent"] + + +class SessionCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + session: Session + """Realtime session object configuration.""" + + type: Literal["session.created"] + """The event type, must be `session.created`.""" diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py new file mode 100644 index 0000000000..c04220aa25 --- /dev/null +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -0,0 +1,158 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["SessionUpdateEvent", "Session", "SessionInputAudioTranscription", "SessionTool", "SessionTurnDetection"] + + +class SessionInputAudioTranscription(BaseModel): + model: Optional[str] = None + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class SessionTool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class SessionTurnDetection(BaseModel): + create_response: Optional[bool] = None + """Whether or not to automatically generate a response when VAD is enabled. + + `true` by default. + """ + + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[str] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class Session(BaseModel): + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + + input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[SessionInputAudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[SessionTool]] = None + """Tools (functions) available to the model.""" + + turn_detection: Optional[SessionTurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class SessionUpdateEvent(BaseModel): + session: Session + """Realtime session object configuration.""" + + type: Literal["session.update"] + """The event type, must be `session.update`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py new file mode 100644 index 0000000000..aa06069b04 --- /dev/null +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -0,0 +1,166 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = [ + "SessionUpdateEventParam", + "Session", + "SessionInputAudioTranscription", + "SessionTool", + "SessionTurnDetection", +] + + +class SessionInputAudioTranscription(TypedDict, total=False): + model: str + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class SessionTool(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" + + +class SessionTurnDetection(TypedDict, total=False): + create_response: bool + """Whether or not to automatically generate a response when VAD is enabled. + + `true` by default. + """ + + prefix_padding_ms: int + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: int + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: float + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: str + """Type of turn detection, only `server_vad` is currently supported.""" + + +class Session(TypedDict, total=False): + model: Required[ + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + ] + """The Realtime model used for this session.""" + + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: SessionInputAudioTranscription + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: float + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: str + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Iterable[SessionTool] + """Tools (functions) available to the model.""" + + turn_detection: SessionTurnDetection + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class SessionUpdateEventParam(TypedDict, total=False): + session: Required[Session] + """Realtime session object configuration.""" + + type: Required[Literal["session.update"]] + """The event type, must be `session.update`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/src/openai/types/beta/realtime/session_updated_event.py b/src/openai/types/beta/realtime/session_updated_event.py new file mode 100644 index 0000000000..b9b6488eb3 --- /dev/null +++ b/src/openai/types/beta/realtime/session_updated_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .session import Session +from ...._models import BaseModel + +__all__ = ["SessionUpdatedEvent"] + + +class SessionUpdatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + session: Session + """Realtime session object configuration.""" + + type: Literal["session.updated"] + """The event type, must be `session.updated`.""" diff --git a/src/openai/types/websocket_connection_options.py b/src/openai/types/websocket_connection_options.py new file mode 100644 index 0000000000..40fd24ab03 --- /dev/null +++ b/src/openai/types/websocket_connection_options.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing_extensions import Sequence, TypedDict + +if TYPE_CHECKING: + from websockets import Subprotocol + from websockets.extensions import ClientExtensionFactory + + +class WebsocketConnectionOptions(TypedDict, total=False): + """Websocket connection options copied from `websockets`. + + For example: https://websockets.readthedocs.io/en/stable/reference/asyncio/client.html#websockets.asyncio.client.connect + """ + + extensions: Sequence[ClientExtensionFactory] | None + """List of supported extensions, in order in which they should be negotiated and run.""" + + subprotocols: Sequence[Subprotocol] | None + """List of supported subprotocols, in order of decreasing preference.""" + + compression: str | None + """The “permessage-deflate” extension is enabled by default. Set compression to None to disable it. See the [compression guide](https://websockets.readthedocs.io/en/stable/topics/compression.html) for details.""" + + # limits + max_size: int | None + """Maximum size of incoming messages in bytes. None disables the limit.""" + + max_queue: int | None | tuple[int | None, int | None] + """High-water mark of the buffer where frames are received. It defaults to 16 frames. The low-water mark defaults to max_queue // 4. You may pass a (high, low) tuple to set the high-water and low-water marks. If you want to disable flow control entirely, you may set it to None, although that’s a bad idea.""" + + write_limit: int | tuple[int, int | None] + """High-water mark of write buffer in bytes. It is passed to set_write_buffer_limits(). It defaults to 32 KiB. You may pass a (high, low) tuple to set the high-water and low-water marks.""" diff --git a/tests/api_resources/beta/test_realtime.py b/tests/api_resources/beta/test_realtime.py new file mode 100644 index 0000000000..537017ffd3 --- /dev/null +++ b/tests/api_resources/beta/test_realtime.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os + +import pytest + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") + + +class TestRealtime: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + +class TestAsyncRealtime: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) From 7a9e01aafcd3ed85f17a6323298cbfed3c621ffd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 22:17:00 +0000 Subject: [PATCH 134/192] chore(realtime): update docstrings (#1964) --- .stats.yml | 2 +- src/openai/types/beta/realtime/conversation_item_content.py | 5 +++-- .../types/beta/realtime/conversation_item_content_param.py | 5 +++-- src/openai/types/beta/realtime/response_create_event.py | 3 ++- .../types/beta/realtime/response_create_event_param.py | 3 ++- 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.stats.yml b/.stats.yml index 12219ccaa1..1a7a7a5269 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0d64ca9e45f51b4279f87b205eeb3a3576df98407698ce053f2e2302c1c08df1.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-a39aca84ed97ebafb707ebd5221e2787c5a42ff3d98f2ffaea8a0dcd84cbcbcb.yml diff --git a/src/openai/types/beta/realtime/conversation_item_content.py b/src/openai/types/beta/realtime/conversation_item_content.py index b854aa0e0f..ab40a4a1a7 100644 --- a/src/openai/types/beta/realtime/conversation_item_content.py +++ b/src/openai/types/beta/realtime/conversation_item_content.py @@ -11,8 +11,9 @@ class ConversationItemContent(BaseModel): id: Optional[str] = None """ - ID of a previous conversation item (like a model response), used for - `item_reference` content types. + ID of a previous conversation item to reference (for `item_reference` content + types in `response.create` events). These can reference both client and server + created items. """ audio: Optional[str] = None diff --git a/src/openai/types/beta/realtime/conversation_item_content_param.py b/src/openai/types/beta/realtime/conversation_item_content_param.py index b354d78971..7a3a92a39d 100644 --- a/src/openai/types/beta/realtime/conversation_item_content_param.py +++ b/src/openai/types/beta/realtime/conversation_item_content_param.py @@ -10,8 +10,9 @@ class ConversationItemContentParam(TypedDict, total=False): id: str """ - ID of a previous conversation item (like a model response), used for - `item_reference` content types. + ID of a previous conversation item to reference (for `item_reference` content + types in `response.create` events). These can reference both client and server + created items. """ audio: str diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py index 00ba1e5dad..e4e5e7c68f 100644 --- a/src/openai/types/beta/realtime/response_create_event.py +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -89,7 +89,8 @@ class Response(BaseModel): tool_choice: Optional[str] = None """How the model chooses tools. - Options are `auto`, `none`, `required`, or specify a function. + Options are `auto`, `none`, `required`, or specify a function, like + `{"type": "function", "function": {"name": "my_function"}}`. """ tools: Optional[List[ResponseTool]] = None diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py index 7c92b32df1..7a4b5f086a 100644 --- a/src/openai/types/beta/realtime/response_create_event_param.py +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -90,7 +90,8 @@ class Response(TypedDict, total=False): tool_choice: str """How the model chooses tools. - Options are `auto`, `none`, `required`, or specify a function. + Options are `auto`, `none`, `required`, or specify a function, like + `{"type": "function", "function": {"name": "my_function"}}`. """ tools: Iterable[ResponseTool] From bf69f0ad3e515ed2ceaab650594eab8d64ce037f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Jan 2025 01:44:32 +0000 Subject: [PATCH 135/192] chore: bump license year (#1981) --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 621a6becfb..f011417af6 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2024 OpenAI + Copyright 2025 OpenAI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 78e811acd69493da29271f6ba619488ee1d31421 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 22:57:16 +0000 Subject: [PATCH 136/192] chore(api): bump spec version (#1985) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 1a7a7a5269..1ac7a94471 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-a39aca84ed97ebafb707ebd5221e2787c5a42ff3d98f2ffaea8a0dcd84cbcbcb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-02200a58ed631064b6419711da99fefd6e97bdbbeb577a80a1a6e0c8dbcb18f5.yml From 928a1d29e1be9f17b357b42146d5826e6be20d1c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 17:15:50 +0000 Subject: [PATCH 137/192] chore: add missing isclass check (#1988) --- src/openai/_models.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index 7a547ce5c4..d56ea1d9e5 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -488,7 +488,11 @@ def construct_type(*, value: object, type_: object) -> object: _, items_type = get_args(type_) # Dict[_, items_type] return {key: construct_type(value=item, type_=items_type) for key, item in value.items()} - if not is_literal_type(type_) and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel)): + if ( + not is_literal_type(type_) + and inspect.isclass(origin) + and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel)) + ): if is_list(value): return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value] From 759505d863c0c0fd4bba22c2de51ed060be4f157 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 09:22:54 +0000 Subject: [PATCH 138/192] chore(internal): bump httpx dependency (#1990) --- pyproject.toml | 2 +- requirements-dev.lock | 5 ++--- requirements.lock | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c5c3799356..ad30166f35 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ dev-dependencies = [ "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", "rich>=13.7.1", - "nest_asyncio==1.6.0" + "nest_asyncio==1.6.0", ] [tool.rye.scripts] diff --git a/requirements-dev.lock b/requirements-dev.lock index 45cd4c23ef..280c93d374 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -35,7 +35,7 @@ h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx -httpx==0.25.2 +httpx==0.28.1 # via openai # via respx idna==3.4 @@ -76,7 +76,7 @@ python-dateutil==2.8.2 # via time-machine pytz==2023.3.post1 # via dirty-equals -respx==0.20.2 +respx==0.22.0 rich==13.7.1 ruff==0.6.9 setuptools==68.2.2 @@ -85,7 +85,6 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via httpx # via openai time-machine==2.9.0 tomli==2.0.2 diff --git a/requirements.lock b/requirements.lock index 0eea0124ed..5dcc368039 100644 --- a/requirements.lock +++ b/requirements.lock @@ -25,7 +25,7 @@ h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx -httpx==0.25.2 +httpx==0.28.1 # via openai idna==3.4 # via anyio @@ -36,7 +36,6 @@ pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via httpx # via openai typing-extensions==4.12.2 # via anyio From f64bec697a6154ca3c67e50d5c0c0c3838683686 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 20:09:36 +0000 Subject: [PATCH 139/192] fix(client): only call .close() when needed (#1992) --- src/openai/_base_client.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 8bdad99feb..e9516fc6cd 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -768,6 +768,9 @@ def __init__(self, **kwargs: Any) -> None: class SyncHttpxClientWrapper(DefaultHttpxClient): def __del__(self) -> None: + if self.is_closed: + return + try: self.close() except Exception: @@ -1350,6 +1353,9 @@ def __init__(self, **kwargs: Any) -> None: class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): def __del__(self) -> None: + if self.is_closed: + return + try: # TODO(someday): support non asyncio runtimes here asyncio.get_running_loop().create_task(self.aclose()) From 3d92ba98cec181e72059f5800882647964220a33 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:45:55 +0000 Subject: [PATCH 140/192] docs: fix typos (#1996) --- README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d8eb0aebf5..46e71ce427 100644 --- a/README.md +++ b/README.md @@ -270,7 +270,7 @@ except openai.APIStatusError as e: print(e.response) ``` -Error codes are as followed: +Error codes are as follows: | Status Code | Error Type | | ----------- | -------------------------- | @@ -394,7 +394,7 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion) ``` -These methods return an [`LegacyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version. +These methods return a [`LegagcyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version. For the sync client this will mostly be the same with the exception of `content` & `text` will be methods instead of properties. In the @@ -438,8 +438,7 @@ If you need to access undocumented endpoints, params, or response properties, th #### Undocumented endpoints To make requests to undocumented endpoints, you can make requests using `client.get`, `client.post`, and other -http verbs. Options on the client will be respected (such as retries) will be respected when making this -request. +http verbs. Options on the client will be respected (such as retries) when making this request. ```py import httpx From e7b908ce906e12d55ed5c1fa3b341b085f11a4e6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 14:08:24 +0000 Subject: [PATCH 141/192] docs: more typo fixes (#1998) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 46e71ce427..ea8a23e74c 100644 --- a/README.md +++ b/README.md @@ -394,7 +394,7 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion) ``` -These methods return a [`LegagcyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version. +These methods return a [`LegacyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version. For the sync client this will mostly be the same with the exception of `content` & `text` will be methods instead of properties. In the From 20569b5f0e0ea0ff032270758983fcf44737187f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:22:41 +0000 Subject: [PATCH 142/192] docs(readme): fix misplaced period (#1999) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ea8a23e74c..b4924be8e6 100644 --- a/README.md +++ b/README.md @@ -510,7 +510,7 @@ with OpenAI() as client: This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: 1. Changes that only affect static types, without breaking runtime behavior. -2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. +2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ 3. Changes that we do not expect to impact the vast majority of users in practice. We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. From 2684cd150ad8545d2013f414ab1693dc3b347ef8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:41:34 +0000 Subject: [PATCH 143/192] chore(internal): spec update (#2000) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 1ac7a94471..9600edae3b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-02200a58ed631064b6419711da99fefd6e97bdbbeb577a80a1a6e0c8dbcb18f5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml From 2a9d5fb4c617bc86d250df7596cf21e174a35b6f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 9 Jan 2025 10:56:47 +0000 Subject: [PATCH 144/192] fix: correctly handle deserialising `cls` fields (#2002) --- src/openai/_models.py | 8 ++++---- tests/test_models.py | 10 ++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index d56ea1d9e5..9a918aabf3 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -179,14 +179,14 @@ def __str__(self) -> str: @classmethod @override def construct( # pyright: ignore[reportIncompatibleMethodOverride] - cls: Type[ModelT], + __cls: Type[ModelT], _fields_set: set[str] | None = None, **values: object, ) -> ModelT: - m = cls.__new__(cls) + m = __cls.__new__(__cls) fields_values: dict[str, object] = {} - config = get_model_config(cls) + config = get_model_config(__cls) populate_by_name = ( config.allow_population_by_field_name if isinstance(config, _ConfigProtocol) @@ -196,7 +196,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] if _fields_set is None: _fields_set = set() - model_fields = get_model_fields(cls) + model_fields = get_model_fields(__cls) for name, field in model_fields.items(): key = field.alias if key is None or (key not in values and populate_by_name): diff --git a/tests/test_models.py b/tests/test_models.py index 19a71f13ba..30b17e3ac0 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -844,3 +844,13 @@ class Model(BaseModel): assert m.alias == "foo" assert isinstance(m.union, str) assert m.union == "bar" + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +def test_field_named_cls() -> None: + class Model(BaseModel): + cls: str + + m = construct_type(value={"cls": "foo"}, type_=Model) + assert isinstance(m, Model) + assert isinstance(m.cls, str) From e233bd4edd4dd868e4a8eafd0ab64c56a536cca0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:20:02 +0000 Subject: [PATCH 145/192] chore(internal): streaming refactors (#2012) --- src/openai/_streaming.py | 66 +++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index 3a5c9571a1..7aa7b62f6b 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -59,23 +59,22 @@ def __stream__(self) -> Iterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed for _sse in iterator: @@ -142,23 +141,22 @@ async def __stream__(self) -> AsyncIterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed async for _sse in iterator: From b3e167a1fd9173fbaa497f0f6bc4a8500586fc59 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:51:43 +0000 Subject: [PATCH 146/192] chore(internal): update deps (#2015) --- mypy.ini | 2 +- requirements-dev.lock | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mypy.ini b/mypy.ini index 215128e026..91d06cb38f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -41,7 +41,7 @@ cache_fine_grained = True # ``` # Changing this codegen to make mypy happy would increase complexity # and would not be worth it. -disable_error_code = func-returns-value +disable_error_code = func-returns-value,overload-cannot-match # https://github.com/python/mypy/issues/12162 [mypy.overrides] diff --git a/requirements-dev.lock b/requirements-dev.lock index 280c93d374..7cbffdca00 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -48,7 +48,7 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -mypy==1.13.0 +mypy==1.14.1 mypy-extensions==1.0.0 # via mypy nest-asyncio==1.6.0 @@ -68,7 +68,7 @@ pydantic-core==2.27.1 # via pydantic pygments==2.18.0 # via rich -pyright==1.1.390 +pyright==1.1.391 pytest==8.3.3 # via pytest-asyncio pytest-asyncio==0.24.0 From 21ff2e6f1d635e19ae5a64c1f0e9ffb0b733e647 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:59:51 +0000 Subject: [PATCH 147/192] fix(types): correct type for vector store chunking strategy (#2017) --- api.md | 2 +- src/openai/types/beta/__init__.py | 3 +++ .../types/beta/file_chunking_strategy_param.py | 4 ++-- ...static_file_chunking_strategy_object_param.py | 16 ++++++++++++++++ 4 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 src/openai/types/beta/static_file_chunking_strategy_object_param.py diff --git a/api.md b/api.md index cf3b01cf90..d290906766 100644 --- a/api.md +++ b/api.md @@ -313,7 +313,7 @@ from openai.types.beta import ( OtherFileChunkingStrategyObject, StaticFileChunkingStrategy, StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, + StaticFileChunkingStrategyObjectParam, VectorStore, VectorStoreDeleted, ) diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index 7f76fed0cd..b9ea792bfa 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -43,3 +43,6 @@ from .assistant_response_format_option_param import ( AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam, ) +from .static_file_chunking_strategy_object_param import ( + StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, +) diff --git a/src/openai/types/beta/file_chunking_strategy_param.py b/src/openai/types/beta/file_chunking_strategy_param.py index 46383358e5..25d94286d8 100644 --- a/src/openai/types/beta/file_chunking_strategy_param.py +++ b/src/openai/types/beta/file_chunking_strategy_param.py @@ -6,8 +6,8 @@ from typing_extensions import TypeAlias from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam -from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam +from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam __all__ = ["FileChunkingStrategyParam"] -FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam] +FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam] diff --git a/src/openai/types/beta/static_file_chunking_strategy_object_param.py b/src/openai/types/beta/static_file_chunking_strategy_object_param.py new file mode 100644 index 0000000000..0cdf35c0df --- /dev/null +++ b/src/openai/types/beta/static_file_chunking_strategy_object_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam + +__all__ = ["StaticFileChunkingStrategyObjectParam"] + + +class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): + static: Required[StaticFileChunkingStrategyParam] + + type: Required[Literal["static"]] + """Always `static`.""" From 2c73a8b1d43fc23921cf239d234a3b1a0d74a38d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 15:47:35 +0000 Subject: [PATCH 148/192] chore(internal): bump pyright dependency (#2021) --- requirements-dev.lock | 2 +- src/openai/_legacy_response.py | 12 ++++++++++-- src/openai/_response.py | 8 +++++++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 7cbffdca00..c011de1f4a 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -68,7 +68,7 @@ pydantic-core==2.27.1 # via pydantic pygments==2.18.0 # via rich -pyright==1.1.391 +pyright==1.1.392.post0 pytest==8.3.3 # via pytest-asyncio pytest-asyncio==0.24.0 diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index b4c8891cfc..5b34227783 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -266,7 +266,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if origin == LegacyAPIResponse: raise RuntimeError("Unexpected state - cast_to is `APIResponse`") - if inspect.isclass(origin) and issubclass(origin, httpx.Response): + if inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) and issubclass(origin, httpx.Response): # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response # and pass that class to our request functions. We cannot change the variance to be either # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct @@ -276,7 +278,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( diff --git a/src/openai/_response.py b/src/openai/_response.py index 37ec61cc3f..fe61e4e0ca 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -214,7 +214,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( From 1fdaea45a4c63b28a3cca1ddee0c0bffe17a8e07 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 04:09:27 +0000 Subject: [PATCH 149/192] fix: flush stream response when done event is sent (#2036) --- requirements-dev.lock | 2 +- requirements.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index c011de1f4a..593091cb04 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -99,7 +99,7 @@ typing-extensions==4.12.2 # via pyright virtualenv==20.24.5 # via nox -websockets==14.1 +websockets==14.2 # via openai zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index 5dcc368039..749d24f2cd 100644 --- a/requirements.lock +++ b/requirements.lock @@ -42,5 +42,5 @@ typing-extensions==4.12.2 # via openai # via pydantic # via pydantic-core -websockets==14.1 +websockets==14.2 # via openai From 818de7b9114d0b8a90c36026ee6a14fafada1499 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:13:38 +0000 Subject: [PATCH 150/192] docs(raw responses): fix duplicate `the` (#2039) --- src/openai/resources/audio/audio.py | 4 ++-- src/openai/resources/audio/speech.py | 4 ++-- src/openai/resources/audio/transcriptions.py | 4 ++-- src/openai/resources/audio/translations.py | 4 ++-- src/openai/resources/batches.py | 4 ++-- src/openai/resources/beta/assistants.py | 4 ++-- src/openai/resources/beta/beta.py | 4 ++-- src/openai/resources/beta/realtime/realtime.py | 4 ++-- src/openai/resources/beta/realtime/sessions.py | 4 ++-- src/openai/resources/beta/threads/messages.py | 4 ++-- src/openai/resources/beta/threads/runs/runs.py | 4 ++-- src/openai/resources/beta/threads/runs/steps.py | 4 ++-- src/openai/resources/beta/threads/threads.py | 4 ++-- src/openai/resources/beta/vector_stores/file_batches.py | 4 ++-- src/openai/resources/beta/vector_stores/files.py | 4 ++-- src/openai/resources/beta/vector_stores/vector_stores.py | 4 ++-- src/openai/resources/chat/chat.py | 4 ++-- src/openai/resources/chat/completions.py | 4 ++-- src/openai/resources/completions.py | 4 ++-- src/openai/resources/embeddings.py | 4 ++-- src/openai/resources/files.py | 4 ++-- src/openai/resources/fine_tuning/fine_tuning.py | 4 ++-- src/openai/resources/fine_tuning/jobs/checkpoints.py | 4 ++-- src/openai/resources/fine_tuning/jobs/jobs.py | 4 ++-- src/openai/resources/images.py | 4 ++-- src/openai/resources/models.py | 4 ++-- src/openai/resources/moderations.py | 4 ++-- src/openai/resources/uploads/parts.py | 4 ++-- src/openai/resources/uploads/uploads.py | 4 ++-- 29 files changed, 58 insertions(+), 58 deletions(-) diff --git a/src/openai/resources/audio/audio.py b/src/openai/resources/audio/audio.py index 18bd7b812c..383b7073bf 100644 --- a/src/openai/resources/audio/audio.py +++ b/src/openai/resources/audio/audio.py @@ -48,7 +48,7 @@ def speech(self) -> Speech: @cached_property def with_raw_response(self) -> AudioWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -81,7 +81,7 @@ def speech(self) -> AsyncSpeech: @cached_property def with_raw_response(self) -> AsyncAudioWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 09faaddda6..805a8c19c9 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -32,7 +32,7 @@ class Speech(SyncAPIResource): @cached_property def with_raw_response(self) -> SpeechWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -116,7 +116,7 @@ class AsyncSpeech(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSpeechWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 9fad66ed35..6a09825e59 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -32,7 +32,7 @@ class Transcriptions(SyncAPIResource): @cached_property def with_raw_response(self) -> TranscriptionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -144,7 +144,7 @@ class AsyncTranscriptions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index feaeea6e09..77e5c2a543 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -31,7 +31,7 @@ class Translations(SyncAPIResource): @cached_property def with_raw_response(self) -> TranslationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -128,7 +128,7 @@ class AsyncTranslations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranslationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index d359c84360..a496645a42 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -28,7 +28,7 @@ class Batches(SyncAPIResource): @cached_property def with_raw_response(self) -> BatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -233,7 +233,7 @@ class AsyncBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 7df212f155..2f2482b648 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -36,7 +36,7 @@ class Assistants(SyncAPIResource): @cached_property def with_raw_response(self) -> AssistantsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -422,7 +422,7 @@ class AsyncAssistants(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncAssistantsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 42ea9b88e5..5946985519 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -60,7 +60,7 @@ def threads(self) -> Threads: @cached_property def with_raw_response(self) -> BetaWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -97,7 +97,7 @@ def threads(self) -> AsyncThreads: @cached_property def with_raw_response(self) -> AsyncBetaWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index c79fd46217..abdb33d4e0 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -56,7 +56,7 @@ def sessions(self) -> Sessions: @cached_property def with_raw_response(self) -> RealtimeWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -108,7 +108,7 @@ def sessions(self) -> AsyncSessions: @cached_property def with_raw_response(self) -> AsyncRealtimeWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 1d1ee701e5..8d2df30753 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -27,7 +27,7 @@ class Sessions(SyncAPIResource): @cached_property def with_raw_response(self) -> SessionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -166,7 +166,7 @@ class AsyncSessions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSessionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index 3c25449664..8be4883189 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -30,7 +30,7 @@ class Messages(SyncAPIResource): @cached_property def with_raw_response(self) -> MessagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -304,7 +304,7 @@ class AsyncMessages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncMessagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 5c97af0e2e..ca354297c6 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -53,7 +53,7 @@ def steps(self) -> Steps: @cached_property def with_raw_response(self) -> RunsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -901,7 +901,7 @@ def steps(self) -> AsyncSteps: @cached_property def with_raw_response(self) -> AsyncRunsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 9bd91e39e0..709c729d45 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -29,7 +29,7 @@ class Steps(SyncAPIResource): @cached_property def with_raw_response(self) -> StepsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -183,7 +183,7 @@ class AsyncSteps(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncStepsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 6d76a70232..bd8205d933 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -63,7 +63,7 @@ def messages(self) -> Messages: @cached_property def with_raw_response(self) -> ThreadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -717,7 +717,7 @@ def messages(self) -> AsyncMessages: @cached_property def with_raw_response(self) -> AsyncThreadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index 2d4cec3ce8..279e59c135 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -31,7 +31,7 @@ class FileBatches(SyncAPIResource): @cached_property def with_raw_response(self) -> FileBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -252,7 +252,7 @@ class AsyncFileBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFileBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index d633985e0d..51545229c4 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -30,7 +30,7 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -252,7 +252,7 @@ class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 61a2eadc7b..6b44c602f1 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -59,7 +59,7 @@ def file_batches(self) -> FileBatches: @cached_property def with_raw_response(self) -> VectorStoresWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -337,7 +337,7 @@ def file_batches(self) -> AsyncFileBatches: @cached_property def with_raw_response(self) -> AsyncVectorStoresWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/chat/chat.py b/src/openai/resources/chat/chat.py index dc23a15a8e..9c4aacc953 100644 --- a/src/openai/resources/chat/chat.py +++ b/src/openai/resources/chat/chat.py @@ -24,7 +24,7 @@ def completions(self) -> Completions: @cached_property def with_raw_response(self) -> ChatWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -49,7 +49,7 @@ def completions(self) -> AsyncCompletions: @cached_property def with_raw_response(self) -> AsyncChatWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index f13b8c0b45..af76caf401 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -43,7 +43,7 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -903,7 +903,7 @@ class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 7e95f79607..46ed113ec9 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -30,7 +30,7 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -572,7 +572,7 @@ class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 2197c4d280..58efdcefa8 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -28,7 +28,7 @@ class Embeddings(SyncAPIResource): @cached_property def with_raw_response(self) -> EmbeddingsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -118,7 +118,7 @@ class AsyncEmbeddings(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index f86917c61d..f76f70e0bc 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -40,7 +40,7 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -333,7 +333,7 @@ class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/fine_tuning.py b/src/openai/resources/fine_tuning/fine_tuning.py index d2bce87c48..eebde07d81 100644 --- a/src/openai/resources/fine_tuning/fine_tuning.py +++ b/src/openai/resources/fine_tuning/fine_tuning.py @@ -24,7 +24,7 @@ def jobs(self) -> Jobs: @cached_property def with_raw_response(self) -> FineTuningWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -49,7 +49,7 @@ def jobs(self) -> AsyncJobs: @cached_property def with_raw_response(self) -> AsyncFineTuningWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py index be08b6ea9e..799efe88fd 100644 --- a/src/openai/resources/fine_tuning/jobs/checkpoints.py +++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py @@ -22,7 +22,7 @@ class Checkpoints(SyncAPIResource): @cached_property def with_raw_response(self) -> CheckpointsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -93,7 +93,7 @@ class AsyncCheckpoints(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCheckpointsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 21773fbf96..bd08552835 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -41,7 +41,7 @@ def checkpoints(self) -> Checkpoints: @cached_property def with_raw_response(self) -> JobsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -339,7 +339,7 @@ def checkpoints(self) -> AsyncCheckpoints: @cached_property def with_raw_response(self) -> AsyncJobsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 2fbc077dd9..30473c14f7 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -30,7 +30,7 @@ class Images(SyncAPIResource): @cached_property def with_raw_response(self) -> ImagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -287,7 +287,7 @@ class AsyncImages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncImagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py index b3d185b553..945f0acc1a 100644 --- a/src/openai/resources/models.py +++ b/src/openai/resources/models.py @@ -21,7 +21,7 @@ class Models(SyncAPIResource): @cached_property def with_raw_response(self) -> ModelsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -134,7 +134,7 @@ class AsyncModels(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModelsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index ce80bb7d55..a8f03142bc 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -28,7 +28,7 @@ class Moderations(SyncAPIResource): @cached_property def with_raw_response(self) -> ModerationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -98,7 +98,7 @@ class AsyncModerations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModerationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py index d46e5ea1bb..777469ac8e 100644 --- a/src/openai/resources/uploads/parts.py +++ b/src/openai/resources/uploads/parts.py @@ -28,7 +28,7 @@ class Parts(SyncAPIResource): @cached_property def with_raw_response(self) -> PartsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -103,7 +103,7 @@ class AsyncParts(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncPartsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index 2384716bdd..297ea98c45 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -39,7 +39,7 @@ def parts(self) -> Parts: @cached_property def with_raw_response(self) -> UploadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -233,7 +233,7 @@ def parts(self) -> AsyncParts: @cached_property def with_raw_response(self) -> AsyncUploadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers From f7c814b70f63a4ad36c2a380e1df29ba2f0948bc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:22:16 +0000 Subject: [PATCH 151/192] fix(tests): make test_get_platform less flaky (#2040) --- tests/test_client.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index e0d23403b1..41da2d5d04 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -6,6 +6,7 @@ import os import sys import json +import time import asyncio import inspect import subprocess @@ -1797,10 +1798,20 @@ async def test_main() -> None: [sys.executable, "-c", test_code], text=True, ) as process: - try: - process.wait(2) - if process.returncode: - raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") - except subprocess.TimeoutExpired as e: - process.kill() - raise AssertionError("calling get_platform using asyncify resulted in a hung process") from e + timeout = 10 # seconds + + start_time = time.monotonic() + while True: + return_code = process.poll() + if return_code is not None: + if return_code != 0: + raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") + + # success + break + + if time.monotonic() - start_time > timeout: + process.kill() + raise AssertionError("calling get_platform using asyncify resulted in a hung process") + + time.sleep(0.1) From 9b5ea1b8ecceb1fe4b27f83c98bfc0c12f42d243 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:11:31 +0000 Subject: [PATCH 152/192] chore(internal): avoid pytest-asyncio deprecation warning (#2041) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index ad30166f35..a745ecec37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -130,6 +130,7 @@ testpaths = ["tests"] addopts = "--tb=short" xfail_strict = true asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "session" filterwarnings = [ "error" ] From 7f81d21bec369c7bb19a95d5b370f607461f42d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 16:08:31 +0000 Subject: [PATCH 153/192] chore(internal): minor style changes (#2043) --- src/openai/_legacy_response.py | 4 ++-- src/openai/_response.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index 5b34227783..37151fc9a9 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -202,6 +202,8 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to and is_annotated_type(cast_to): cast_to = extract_type_arg(cast_to, 0) + origin = get_origin(cast_to) or cast_to + if self._stream: if to: if not is_stream_class_type(to): @@ -258,8 +260,6 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == bool: return cast(R, response.text.lower() == "true") - origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent): return cast(R, cast_to(response)) # type: ignore diff --git a/src/openai/_response.py b/src/openai/_response.py index fe61e4e0ca..c43fe39e56 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -136,6 +136,8 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to and is_annotated_type(cast_to): cast_to = extract_type_arg(cast_to, 0) + origin = get_origin(cast_to) or cast_to + if self._is_sse_stream: if to: if not is_stream_class_type(to): @@ -195,8 +197,6 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == bool: return cast(R, response.text.lower() == "true") - origin = get_origin(cast_to) or cast_to - # handle the legacy binary response case if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent": return cast(R, cast_to(response)) # type: ignore From b9824d29310957b2a56ecf7a4f68107b59b8263c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:20:56 +0000 Subject: [PATCH 154/192] feat(api): update enum values, comments, and examples (#2045) --- .stats.yml | 2 +- src/openai/resources/audio/speech.py | 16 +++---- .../resources/beta/realtime/sessions.py | 48 +++++++++++-------- src/openai/resources/chat/completions.py | 18 ------- src/openai/resources/embeddings.py | 6 ++- .../types/audio/speech_create_params.py | 6 +-- .../conversation_item_create_event.py | 11 +++-- .../conversation_item_create_event_param.py | 11 +++-- src/openai/types/beta/realtime/session.py | 13 ++++- .../beta/realtime/session_create_params.py | 35 ++++++++------ .../beta/realtime/session_update_event.py | 33 ++++++++----- .../realtime/session_update_event_param.py | 33 ++++++++----- src/openai/types/chat/chat_completion.py | 6 +-- ...chat_completion_assistant_message_param.py | 4 +- .../types/chat/chat_completion_chunk.py | 6 +-- .../types/chat/completion_create_params.py | 3 -- src/openai/types/embedding_create_params.py | 3 +- .../beta/realtime/test_sessions.py | 28 ++++------- tests/api_resources/chat/test_completions.py | 8 ++-- tests/api_resources/test_completions.py | 8 ++-- 20 files changed, 152 insertions(+), 146 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9600edae3b..d518bac586 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 805a8c19c9..ad01118161 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -53,7 +53,7 @@ def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -73,9 +73,9 @@ def create( One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` - voice: The voice to use when generating the audio. Supported voices are `alloy`, - `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - available in the + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, @@ -137,7 +137,7 @@ async def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -157,9 +157,9 @@ async def create( One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` - voice: The voice to use when generating the audio. Supported voices are `alloy`, - `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - available in the + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 8d2df30753..b920c89207 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -46,18 +46,19 @@ def with_streaming_response(self) -> SessionsWithStreamingResponse: def create( self, *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", - ], - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + ] + | NotGiven = NOT_GIVEN, output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, @@ -81,9 +82,9 @@ def create( the Realtime API. Args: - model: The Realtime model used for this session. - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the @@ -110,7 +111,10 @@ def create( modalities: The set of modalities the model can respond with. To disable audio, set this to ["text"]. + model: The Realtime model used for this session. + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. @@ -140,12 +144,12 @@ def create( "/realtime/sessions", body=maybe_transform( { - "model": model, "input_audio_format": input_audio_format, "input_audio_transcription": input_audio_transcription, "instructions": instructions, "max_response_output_tokens": max_response_output_tokens, "modalities": modalities, + "model": model, "output_audio_format": output_audio_format, "temperature": temperature, "tool_choice": tool_choice, @@ -185,18 +189,19 @@ def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse: async def create( self, *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", - ], - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + ] + | NotGiven = NOT_GIVEN, output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, @@ -220,9 +225,9 @@ async def create( the Realtime API. Args: - model: The Realtime model used for this session. - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the @@ -249,7 +254,10 @@ async def create( modalities: The set of modalities the model can respond with. To disable audio, set this to ["text"]. + model: The Realtime model used for this session. + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. @@ -279,12 +287,12 @@ async def create( "/realtime/sessions", body=await async_maybe_transform( { - "model": model, "input_audio_format": input_audio_format, "input_audio_transcription": input_audio_transcription, "instructions": instructions, "max_response_output_tokens": max_response_output_tokens, "modalities": modalities, + "model": model, "output_audio_format": output_audio_format, "temperature": temperature, "tool_choice": tool_choice, diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index af76caf401..c44b9d0c30 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -249,9 +249,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -507,9 +504,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -758,9 +752,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1109,9 +1100,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1367,9 +1355,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1618,9 +1603,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 58efdcefa8..e6c09f1374 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -69,7 +69,8 @@ def create( `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to @@ -159,7 +160,8 @@ async def create( `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index a60d000708..ed1a1ce748 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -20,11 +20,11 @@ class SpeechCreateParams(TypedDict, total=False): `tts-1` or `tts-1-hd` """ - voice: Required[Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]] + voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]] """The voice to use when generating the audio. - Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. - Previews of the voices are available in the + Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, + `sage` and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event.py b/src/openai/types/beta/realtime/conversation_item_create_event.py index 50d309675b..c4f72b9aff 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event.py @@ -20,9 +20,10 @@ class ConversationItemCreateEvent(BaseModel): """Optional client-generated ID used to identify this event.""" previous_item_id: Optional[str] = None - """The ID of the preceding item after which the new item will be inserted. - - If not set, the new item will be appended to the end of the conversation. If - set, it allows an item to be inserted mid-conversation. If the ID cannot be - found, an error will be returned and the item will not be added. + """ + The ID of the preceding item after which the new item will be inserted. If not + set, the new item will be appended to the end of the conversation. If set to + `root`, the new item will be added to the beginning of the conversation. If set + to an existing ID, it allows an item to be inserted mid-conversation. If the ID + cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event_param.py b/src/openai/types/beta/realtime/conversation_item_create_event_param.py index b8c8bbc251..6da5a63a9d 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event_param.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event_param.py @@ -20,9 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False): """Optional client-generated ID used to identify this event.""" previous_item_id: str - """The ID of the preceding item after which the new item will be inserted. - - If not set, the new item will be appended to the end of the conversation. If - set, it allows an item to be inserted mid-conversation. If the ID cannot be - found, an error will be returned and the item will not be added. + """ + The ID of the preceding item after which the new item will be inserted. If not + set, the new item will be appended to the end of the conversation. If set to + `root`, the new item will be added to the beginning of the conversation. If set + to an existing ID, it allows an item to be inserted mid-conversation. If the ID + cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index 09cdbb02bc..2d028f817c 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -63,7 +63,12 @@ class Session(BaseModel): """Unique identifier for the session object.""" input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: Optional[InputAudioTranscription] = None """ @@ -117,7 +122,11 @@ class Session(BaseModel): """The Realtime model used for this session.""" output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: Optional[float] = None """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index f56f2c5c22..3708efeecd 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -3,25 +3,19 @@ from __future__ import annotations from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, TypedDict __all__ = ["SessionCreateParams", "InputAudioTranscription", "Tool", "TurnDetection"] class SessionCreateParams(TypedDict, total=False): - model: Required[ - Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - ] - """The Realtime model used for this session.""" - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: InputAudioTranscription """ @@ -61,8 +55,21 @@ class SessionCreateParams(TypedDict, total=False): To disable audio, set this to ["text"]. """ + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: float """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index c04220aa25..322e588a4e 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -65,17 +65,13 @@ class SessionTurnDetection(BaseModel): class Session(BaseModel): - model: Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - """The Realtime model used for this session.""" - input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: Optional[SessionInputAudioTranscription] = None """ @@ -115,8 +111,23 @@ class Session(BaseModel): To disable audio, set this to ["text"]. """ + model: Optional[ + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + ] = None + """The Realtime model used for this session.""" + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: Optional[float] = None """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index aa06069b04..c01d9b6887 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -71,19 +71,13 @@ class SessionTurnDetection(TypedDict, total=False): class Session(TypedDict, total=False): - model: Required[ - Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - ] - """The Realtime model used for this session.""" - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: SessionInputAudioTranscription """ @@ -123,8 +117,21 @@ class Session(TypedDict, total=False): To disable audio, set this to ["text"]. """ + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: float """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 4b53e70890..cb812a2702 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -60,11 +60,7 @@ class ChatCompletion(BaseModel): """The object type, which is always `chat.completion`.""" service_tier: Optional[Literal["scale", "default"]] = None - """The service tier used for processing the request. - - This field is only included if the `service_tier` parameter is specified in the - request. - """ + """The service tier used for processing the request.""" system_fingerprint: Optional[str] = None """This fingerprint represents the backend configuration that the model runs with. diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 35e3a3d784..229fb822f4 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): """The role of the messages author, in this case `assistant`.""" audio: Optional[Audio] - """Data about a previous audio response from the model. - + """ + Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio). """ diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 9ec6dc4bdb..7b0ae2e121 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -129,11 +129,7 @@ class ChatCompletionChunk(BaseModel): """The object type, which is always `chat.completion.chunk`.""" service_tier: Optional[Literal["scale", "default"]] = None - """The service tier used for processing the request. - - This field is only included if the `service_tier` parameter is specified in the - request. - """ + """The service tier used for processing the request.""" system_fingerprint: Optional[str] = None """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index f168ddea6e..30d930b120 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -220,9 +220,6 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` - utilized. """ stop: Union[Optional[str], List[str]] diff --git a/src/openai/types/embedding_create_params.py b/src/openai/types/embedding_create_params.py index 1385762885..a90566449b 100644 --- a/src/openai/types/embedding_create_params.py +++ b/src/openai/types/embedding_create_params.py @@ -19,7 +19,8 @@ class EmbeddingCreateParams(TypedDict, total=False): (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. """ model: Required[Union[str, EmbeddingModel]] diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index 65bfa27572..908aa983be 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -19,20 +19,18 @@ class TestSessions: @parametrize def test_method_create(self, client: OpenAI) -> None: - session = client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", - ) + session = client.beta.realtime.sessions.create() assert_matches_type(SessionCreateResponse, session, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: session = client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", input_audio_format="pcm16", input_audio_transcription={"model": "model"}, instructions="instructions", max_response_output_tokens=0, modalities=["text"], + model="gpt-4o-realtime-preview", output_audio_format="pcm16", temperature=0, tool_choice="tool_choice", @@ -57,9 +55,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: - response = client.beta.realtime.sessions.with_raw_response.create( - model="gpt-4o-realtime-preview", - ) + response = client.beta.realtime.sessions.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -68,9 +64,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: - with client.beta.realtime.sessions.with_streaming_response.create( - model="gpt-4o-realtime-preview", - ) as response: + with client.beta.realtime.sessions.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -85,20 +79,18 @@ class TestAsyncSessions: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: - session = await async_client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", - ) + session = await async_client.beta.realtime.sessions.create() assert_matches_type(SessionCreateResponse, session, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: session = await async_client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", input_audio_format="pcm16", input_audio_transcription={"model": "model"}, instructions="instructions", max_response_output_tokens=0, modalities=["text"], + model="gpt-4o-realtime-preview", output_audio_format="pcm16", temperature=0, tool_choice="tool_choice", @@ -123,9 +115,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.realtime.sessions.with_raw_response.create( - model="gpt-4o-realtime-preview", - ) + response = await async_client.beta.realtime.sessions.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,9 +124,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.realtime.sessions.with_streaming_response.create( - model="gpt-4o-realtime-preview", - ) as response: + async with async_client.beta.realtime.sessions.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 523fcc6ed9..cb899502b4 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -71,7 +71,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -186,7 +186,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -303,7 +303,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -418,7 +418,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index ad2679cabe..9ec503c1e3 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -38,7 +38,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -98,7 +98,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream_options={"include_usage": True}, suffix="test.", @@ -160,7 +160,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -220,7 +220,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream_options={"include_usage": True}, suffix="test.", From 46ad497f38e3082f97616cb769f71b054ae22dcb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 23 Jan 2025 12:39:04 +0000 Subject: [PATCH 155/192] chore(internal): minor formatting changes (#2050) --- .github/workflows/ci.yml | 2 +- scripts/bootstrap | 2 +- scripts/lint | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c390431e79..2dd3585f74 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,6 +30,7 @@ jobs: - name: Run lints run: ./scripts/lint + test: name: test runs-on: ubuntu-latest @@ -51,4 +52,3 @@ jobs: - name: Run tests run: ./scripts/test - diff --git a/scripts/bootstrap b/scripts/bootstrap index 8c5c60eba3..e84fe62c38 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/scripts/lint b/scripts/lint index 64495ee345..55bc1dd711 100755 --- a/scripts/lint +++ b/scripts/lint @@ -9,4 +9,3 @@ rye run lint echo "==> Making sure it imports" rye run python -c 'import openai' - From 01c5ec6d3d3ea578ff2178f796be3e50630e544e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 15:28:16 +0000 Subject: [PATCH 156/192] chore: update api.md (#2063) --- api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api.md b/api.md index d290906766..e593464bf3 100644 --- a/api.md +++ b/api.md @@ -99,7 +99,7 @@ Methods: - client.files.list(\*\*params) -> SyncCursorPage[FileObject] - client.files.delete(file_id) -> FileDeleted - client.files.content(file_id) -> HttpxBinaryResponseContent -- client.files.retrieve_content(file_id) -> str +- client.files.retrieve_content(file_id) -> str # Images From f394685e62e857695cc45e6f4bc54549a70213b2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 19:02:47 +0000 Subject: [PATCH 157/192] feat(api): add o3-mini (#2067) fix(types): correct metadata type + other fixes --- .stats.yml | 2 +- api.md | 1 + src/openai/resources/audio/transcriptions.py | 8 +- src/openai/resources/batches.py | 21 ++- src/openai/resources/beta/assistants.py | 41 +++--- .../resources/beta/realtime/sessions.py | 14 +- src/openai/resources/beta/threads/messages.py | 41 +++--- .../resources/beta/threads/runs/runs.py | 85 +++++++----- src/openai/resources/beta/threads/threads.py | 123 +++++++++++------- .../beta/vector_stores/vector_stores.py | 41 +++--- src/openai/resources/chat/completions.py | 89 ++++++++----- src/openai/types/__init__.py | 1 + .../audio/transcription_create_params.py | 4 +- src/openai/types/batch.py | 10 +- src/openai/types/batch_create_params.py | 15 ++- src/openai/types/beta/assistant.py | 9 +- .../types/beta/assistant_create_params.py | 21 +-- .../types/beta/assistant_update_params.py | 9 +- .../conversation_item_create_event.py | 12 +- .../conversation_item_create_event_param.py | 12 +- .../types/beta/realtime/realtime_response.py | 51 +++++++- .../beta/realtime/response_create_event.py | 9 +- .../realtime/response_create_event_param.py | 9 +- .../beta/realtime/session_create_params.py | 23 +++- .../beta/realtime/session_create_response.py | 6 +- .../beta/realtime/session_update_event.py | 23 +++- .../realtime/session_update_event_param.py | 23 +++- src/openai/types/beta/thread.py | 9 +- .../beta/thread_create_and_run_params.py | 43 +++--- src/openai/types/beta/thread_create_params.py | 29 +++-- src/openai/types/beta/thread_update_params.py | 10 +- src/openai/types/beta/threads/message.py | 9 +- .../beta/threads/message_create_params.py | 9 +- .../beta/threads/message_update_params.py | 10 +- src/openai/types/beta/threads/run.py | 9 +- .../types/beta/threads/run_create_params.py | 17 ++- .../types/beta/threads/run_update_params.py | 10 +- .../types/beta/threads/runs/run_step.py | 9 +- src/openai/types/beta/vector_store.py | 9 +- .../types/beta/vector_store_create_params.py | 9 +- .../types/beta/vector_store_update_params.py | 10 +- ...chat_completion_assistant_message_param.py | 4 +- .../types/chat/completion_create_params.py | 17 ++- src/openai/types/chat_model.py | 2 + src/openai/types/shared/__init__.py | 1 + src/openai/types/shared/metadata.py | 8 ++ src/openai/types/shared_params/__init__.py | 1 + src/openai/types/shared_params/metadata.py | 10 ++ src/openai/types/upload.py | 2 +- .../beta/realtime/test_sessions.py | 12 +- tests/api_resources/beta/test_assistants.py | 12 +- tests/api_resources/beta/test_threads.py | 48 +++---- .../api_resources/beta/test_vector_stores.py | 8 +- .../beta/threads/test_messages.py | 8 +- tests/api_resources/beta/threads/test_runs.py | 20 +-- 55 files changed, 695 insertions(+), 353 deletions(-) create mode 100644 src/openai/types/shared/metadata.py create mode 100644 src/openai/types/shared_params/metadata.py diff --git a/.stats.yml b/.stats.yml index d518bac586..e49b5c56e8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml diff --git a/api.md b/api.md index e593464bf3..630f003fe8 100644 --- a/api.md +++ b/api.md @@ -5,6 +5,7 @@ from openai.types import ( ErrorObject, FunctionDefinition, FunctionParameters, + Metadata, ResponseFormatJSONObject, ResponseFormatJSONSchema, ResponseFormatText, diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 6a09825e59..6cc3b9881c 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -77,8 +77,8 @@ def create( Whisper V2 model) is currently available. language: The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - improve accuracy and latency. + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. prompt: An optional text to guide the model's style or continue a previous audio segment. The @@ -189,8 +189,8 @@ async def create( Whisper V2 model) is currently available. language: The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - improve accuracy and latency. + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. prompt: An optional text to guide the model's style or continue a previous audio segment. The diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index a496645a42..7e7ec19ec2 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, Optional +from typing import Optional from typing_extensions import Literal import httpx @@ -20,6 +20,7 @@ from ..pagination import SyncCursorPage, AsyncCursorPage from ..types.batch import Batch from .._base_client import AsyncPaginator, make_request_options +from ..types.shared_params.metadata import Metadata __all__ = ["Batches", "AsyncBatches"] @@ -50,7 +51,7 @@ def create( completion_window: Literal["24h"], endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -80,7 +81,12 @@ def create( and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. - metadata: Optional custom metadata for the batch. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -255,7 +261,7 @@ async def create( completion_window: Literal["24h"], endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -285,7 +291,12 @@ async def create( and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. - metadata: Optional custom metadata for the batch. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 2f2482b648..65b7c9cfc2 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -26,6 +26,7 @@ from ...types.chat_model import ChatModel from ...types.beta.assistant import Assistant from ...types.beta.assistant_deleted import AssistantDeleted +from ...types.shared_params.metadata import Metadata from ...types.beta.assistant_tool_param import AssistantToolParam from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -58,7 +59,7 @@ def create( model: Union[str, ChatModel], description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -88,9 +89,11 @@ def create( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the assistant. The maximum length is 256 characters. @@ -206,7 +209,7 @@ def update( *, description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -232,9 +235,11 @@ def update( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to @@ -444,7 +449,7 @@ async def create( model: Union[str, ChatModel], description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -474,9 +479,11 @@ async def create( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the assistant. The maximum length is 256 characters. @@ -592,7 +599,7 @@ async def update( *, description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -618,9 +625,11 @@ async def update( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index b920c89207..4b337b7c19 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -89,8 +89,11 @@ def create( input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. instructions: The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model @@ -232,8 +235,11 @@ async def create( input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. instructions: The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index 8be4883189..403f95443f 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -20,6 +20,7 @@ from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.threads import message_list_params, message_create_params, message_update_params from ....types.beta.threads.message import Message +from ....types.shared_params.metadata import Metadata from ....types.beta.threads.message_deleted import MessageDeleted from ....types.beta.threads.message_content_part_param import MessageContentPartParam @@ -53,7 +54,7 @@ def create( content: Union[str, Iterable[MessageContentPartParam]], role: Literal["user", "assistant"], attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -78,9 +79,11 @@ def create( attachments: A list of files attached to the message, and the tools they should be added to. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -152,7 +155,7 @@ def update( message_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -165,9 +168,11 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -327,7 +332,7 @@ async def create( content: Union[str, Iterable[MessageContentPartParam]], role: Literal["user", "assistant"], attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -352,9 +357,11 @@ async def create( attachments: A list of files attached to the message, and the tools they should be added to. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -426,7 +433,7 @@ async def update( message_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -439,9 +446,11 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index ca354297c6..a1c93d0382 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -36,6 +36,7 @@ run_submit_tool_outputs_params, ) from .....types.beta.threads.run import Run +from .....types.shared_params.metadata import Metadata from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent from .....types.beta.threads.runs.run_step_include import RunStepInclude @@ -81,7 +82,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -137,9 +138,11 @@ def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -222,7 +225,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -281,9 +284,11 @@ def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -362,7 +367,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -421,9 +426,11 @@ def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -501,7 +508,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -598,7 +605,7 @@ def update( run_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -611,9 +618,11 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -929,7 +938,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -985,9 +994,11 @@ async def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1070,7 +1081,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1129,9 +1140,11 @@ async def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1210,7 +1223,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1269,9 +1282,11 @@ async def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1349,7 +1364,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1446,7 +1461,7 @@ async def update( run_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1459,9 +1474,11 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index bd8205d933..a9c473e28e 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -44,6 +44,7 @@ from ....types.beta.thread import Thread from ....types.beta.threads.run import Run from ....types.beta.thread_deleted import ThreadDeleted +from ....types.shared_params.metadata import Metadata from ....types.beta.assistant_stream_event import AssistantStreamEvent from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -83,7 +84,7 @@ def create( self, *, messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -100,9 +101,11 @@ def create( start the thread with. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -172,7 +175,7 @@ def update( self, thread_id: str, *, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -186,9 +189,11 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -263,7 +268,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -306,9 +311,11 @@ def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -348,7 +355,8 @@ def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -394,7 +402,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -440,9 +448,11 @@ def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -478,7 +488,8 @@ def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -524,7 +535,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -570,9 +581,11 @@ def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -608,7 +621,8 @@ def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -653,7 +667,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -737,7 +751,7 @@ async def create( self, *, messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -754,9 +768,11 @@ async def create( start the thread with. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -826,7 +842,7 @@ async def update( self, thread_id: str, *, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -840,9 +856,11 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -917,7 +935,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -960,9 +978,11 @@ async def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1002,7 +1022,8 @@ async def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -1048,7 +1069,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1094,9 +1115,11 @@ async def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1132,7 +1155,8 @@ async def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -1178,7 +1202,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1224,9 +1248,11 @@ async def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1262,7 +1288,8 @@ async def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -1307,7 +1334,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 6b44c602f1..1da52fb3c7 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -41,6 +41,7 @@ ) from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_store import VectorStore +from ....types.shared_params.metadata import Metadata from ....types.beta.vector_store_deleted import VectorStoreDeleted from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam @@ -81,7 +82,7 @@ def create( chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -104,9 +105,11 @@ def create( files. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. @@ -176,7 +179,7 @@ def update( vector_store_id: str, *, expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -192,9 +195,11 @@ def update( expires_after: The expiration policy for a vector store. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. @@ -359,7 +364,7 @@ async def create( chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -382,9 +387,11 @@ async def create( files. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. @@ -454,7 +461,7 @@ async def update( vector_store_id: str, *, expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -470,9 +477,11 @@ async def update( expires_after: The expiration policy for a vector store. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index c44b9d0c30..2b91659950 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -26,6 +26,7 @@ from ..._base_client import make_request_options from ...types.chat_model import ChatModel from ...types.chat.chat_completion import ChatCompletion +from ...types.shared_params.metadata import Metadata from ...types.chat.chat_completion_chunk import ChatCompletionChunk from ...types.chat.chat_completion_modality import ChatCompletionModality from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam @@ -73,7 +74,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -177,8 +178,12 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -244,9 +249,9 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -322,7 +327,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -432,8 +437,12 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -499,9 +508,9 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -570,7 +579,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -680,8 +689,12 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -747,9 +760,9 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -817,7 +830,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -924,7 +937,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1028,8 +1041,12 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -1095,9 +1112,9 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -1173,7 +1190,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1283,8 +1300,12 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -1350,9 +1371,9 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -1421,7 +1442,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1531,8 +1552,12 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -1598,9 +1623,9 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -1668,7 +1693,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 72950f2491..7abb22f239 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -6,6 +6,7 @@ from .image import Image as Image from .model import Model as Model from .shared import ( + Metadata as Metadata, ErrorObject as ErrorObject, FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index 88805affbd..f1779c35e6 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -30,8 +30,8 @@ class TranscriptionCreateParams(TypedDict, total=False): """The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - improve accuracy and latency. + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. """ prompt: str diff --git a/src/openai/types/batch.py b/src/openai/types/batch.py index ac3d7ea119..35de90ac85 100644 --- a/src/openai/types/batch.py +++ b/src/openai/types/batch.py @@ -1,11 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import builtins from typing import List, Optional from typing_extensions import Literal from .._models import BaseModel from .batch_error import BatchError +from .shared.metadata import Metadata from .batch_request_counts import BatchRequestCounts __all__ = ["Batch", "Errors"] @@ -70,12 +70,14 @@ class Batch(BaseModel): in_progress_at: Optional[int] = None """The Unix timestamp (in seconds) for when the batch started processing.""" - metadata: Optional[builtins.object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ output_file_id: Optional[str] = None diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index b30c4d4658..e5be1d2bac 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import Dict, Optional +from typing import Optional from typing_extensions import Literal, Required, TypedDict +from .shared_params.metadata import Metadata + __all__ = ["BatchCreateParams"] @@ -35,5 +37,12 @@ class BatchCreateParams(TypedDict, total=False): requests, and can be up to 200 MB in size. """ - metadata: Optional[Dict[str, str]] - """Optional custom metadata for the batch.""" + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index 3c8b8e403b..58421e0f66 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -5,6 +5,7 @@ from ..._models import BaseModel from .assistant_tool import AssistantTool +from ..shared.metadata import Metadata from .assistant_response_format_option import AssistantResponseFormatOption __all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -51,12 +52,14 @@ class Assistant(BaseModel): The maximum length is 256,000 characters. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: str diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 568b223ce7..e205856395 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -7,6 +7,7 @@ from ..chat_model import ChatModel from .assistant_tool_param import AssistantToolParam +from ..shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -39,12 +40,14 @@ class AssistantCreateParams(TypedDict, total=False): The maximum length is 256,000 characters. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: Optional[str] @@ -130,12 +133,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): store. """ - metadata: object - """Set of 16 key-value pairs that can be attached to a vector store. + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. - This can be useful for storing additional information about the vector store in - a structured format. Keys can be a maximum of 64 characters long and values can - be a maximum of 512 characters long. + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 9a66e41ab3..35065ef61b 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -6,6 +6,7 @@ from typing_extensions import TypedDict from .assistant_tool_param import AssistantToolParam +from ..shared_params.metadata import Metadata from .assistant_response_format_option_param import AssistantResponseFormatOptionParam __all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -21,12 +22,14 @@ class AssistantUpdateParams(TypedDict, total=False): The maximum length is 256,000 characters. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: str diff --git a/src/openai/types/beta/realtime/conversation_item_create_event.py b/src/openai/types/beta/realtime/conversation_item_create_event.py index c4f72b9aff..f19d552a92 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event.py @@ -20,10 +20,10 @@ class ConversationItemCreateEvent(BaseModel): """Optional client-generated ID used to identify this event.""" previous_item_id: Optional[str] = None - """ - The ID of the preceding item after which the new item will be inserted. If not - set, the new item will be appended to the end of the conversation. If set to - `root`, the new item will be added to the beginning of the conversation. If set - to an existing ID, it allows an item to be inserted mid-conversation. If the ID - cannot be found, an error will be returned and the item will not be added. + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If set + to `root`, the new item will be added to the beginning of the conversation. If + set to an existing ID, it allows an item to be inserted mid-conversation. If the + ID cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event_param.py b/src/openai/types/beta/realtime/conversation_item_create_event_param.py index 6da5a63a9d..693d0fd54d 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event_param.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event_param.py @@ -20,10 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False): """Optional client-generated ID used to identify this event.""" previous_item_id: str - """ - The ID of the preceding item after which the new item will be inserted. If not - set, the new item will be appended to the end of the conversation. If set to - `root`, the new item will be added to the beginning of the conversation. If set - to an existing ID, it allows an item to be inserted mid-conversation. If the ID - cannot be found, an error will be returned and the item will not be added. + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If set + to `root`, the new item will be added to the beginning of the conversation. If + set to an existing ID, it allows an item to be inserted mid-conversation. If the + ID cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py index 3e1b1406c0..4c3c83d666 100644 --- a/src/openai/types/beta/realtime/realtime_response.py +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -1,9 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import List, Union, Optional from typing_extensions import Literal from ...._models import BaseModel +from ...shared.metadata import Metadata from .conversation_item import ConversationItem from .realtime_response_usage import RealtimeResponseUsage from .realtime_response_status import RealtimeResponseStatus @@ -15,8 +16,40 @@ class RealtimeResponse(BaseModel): id: Optional[str] = None """The unique ID of the response.""" - metadata: Optional[object] = None - """Developer-provided string key-value pairs associated with this response.""" + conversation_id: Optional[str] = None + """ + Which conversation the response is added to, determined by the `conversation` + field in the `response.create` event. If `auto`, the response will be added to + the default conversation and the value of `conversation_id` will be an id like + `conv_1234`. If `none`, the response will not be added to any conversation and + the value of `conversation_id` will be `null`. If responses are being triggered + by server VAD, the response will be added to the default conversation, thus the + `conversation_id` will be an id like `conv_1234`. + """ + + max_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls, that was used in this response. + """ + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model used to respond. + + If there are multiple modalities, the model will pick one, for example if + `modalities` is `["text", "audio"]`, the model could be responding in either + text or audio. + """ object: Optional[Literal["realtime.response"]] = None """The object type, must be `realtime.response`.""" @@ -24,6 +57,9 @@ class RealtimeResponse(BaseModel): output: Optional[List[ConversationItem]] = None """The list of output items generated by the response.""" + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None """ The final status of the response (`completed`, `cancelled`, `failed`, or @@ -33,6 +69,9 @@ class RealtimeResponse(BaseModel): status_details: Optional[RealtimeResponseStatus] = None """Additional details about the status.""" + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + usage: Optional[RealtimeResponseUsage] = None """Usage statistics for the Response, this will correspond to billing. @@ -40,3 +79,9 @@ class RealtimeResponse(BaseModel): to the Conversation, thus output from previous turns (text and audio tokens) will become the input for later turns. """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """ + The voice the model used to respond. Current voice options are `alloy`, `ash`, + `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + """ diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py index e4e5e7c68f..0801654bd8 100644 --- a/src/openai/types/beta/realtime/response_create_event.py +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ...._models import BaseModel +from ...shared.metadata import Metadata from .conversation_item import ConversationItem __all__ = ["ResponseCreateEvent", "Response", "ResponseTool"] @@ -66,12 +67,14 @@ class Response(BaseModel): `inf` for the maximum available tokens for a given model. Defaults to `inf`. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ modalities: Optional[List[Literal["text", "audio"]]] = None diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py index 7a4b5f086a..a87ef955e8 100644 --- a/src/openai/types/beta/realtime/response_create_event_param.py +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypedDict from .conversation_item_param import ConversationItemParam +from ...shared_params.metadata import Metadata __all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"] @@ -67,12 +68,14 @@ class Response(TypedDict, total=False): `inf` for the maximum available tokens for a given model. Defaults to `inf`. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ modalities: List[Literal["text", "audio"]] diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index 3708efeecd..1502d83d39 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -22,8 +22,11 @@ class SessionCreateParams(TypedDict, total=False): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. """ instructions: str @@ -101,12 +104,28 @@ class SessionCreateParams(TypedDict, total=False): class InputAudioTranscription(TypedDict, total=False): + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + model: str """ The model to use for transcription, `whisper-1` is the only currently supported model. """ + prompt: str + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + """ + class Tool(TypedDict, total=False): description: str diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py index 31f591b261..c26e62bef1 100644 --- a/src/openai/types/beta/realtime/session_create_response.py +++ b/src/openai/types/beta/realtime/session_create_response.py @@ -9,13 +9,13 @@ class ClientSecret(BaseModel): - expires_at: Optional[int] = None + expires_at: int """Timestamp for when the token expires. Currently, all tokens expire after one minute. """ - value: Optional[str] = None + value: str """ Ephemeral key usable in client environments to authenticate connections to the Realtime API. Use this in client-side environments rather than a standard API @@ -74,7 +74,7 @@ class TurnDetection(BaseModel): class SessionCreateResponse(BaseModel): - client_secret: Optional[ClientSecret] = None + client_secret: ClientSecret """Ephemeral key returned by the API.""" input_audio_format: Optional[str] = None diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 322e588a4e..62fb0a3998 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -9,12 +9,28 @@ class SessionInputAudioTranscription(BaseModel): + language: Optional[str] = None + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + model: Optional[str] = None """ The model to use for transcription, `whisper-1` is the only currently supported model. """ + prompt: Optional[str] = None + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + """ + class SessionTool(BaseModel): description: Optional[str] = None @@ -78,8 +94,11 @@ class Session(BaseModel): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. """ instructions: Optional[str] = None diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index c01d9b6887..133cdd91a1 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -15,12 +15,28 @@ class SessionInputAudioTranscription(TypedDict, total=False): + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + model: str """ The model to use for transcription, `whisper-1` is the only currently supported model. """ + prompt: str + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + """ + class SessionTool(TypedDict, total=False): description: str @@ -84,8 +100,11 @@ class Session(TypedDict, total=False): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. """ instructions: str diff --git a/src/openai/types/beta/thread.py b/src/openai/types/beta/thread.py index 37d50ccb93..789f66e48b 100644 --- a/src/openai/types/beta/thread.py +++ b/src/openai/types/beta/thread.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from ..shared.metadata import Metadata __all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -40,12 +41,14 @@ class Thread(BaseModel): created_at: int """The Unix timestamp (in seconds) for when the thread was created.""" - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ object: Literal["thread"] diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 8310ba12f4..08f044c1be 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -8,6 +8,7 @@ from ..chat_model import ChatModel from .function_tool_param import FunctionToolParam from .file_search_tool_param import FileSearchToolParam +from ..shared_params.metadata import Metadata from .code_interpreter_tool_param import CodeInterpreterToolParam from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -67,12 +68,14 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): `incomplete_details` for more info. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: Union[str, ChatModel, None] @@ -122,7 +125,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): """ thread: Thread - """If no thread is provided, an empty thread will be created.""" + """Options to create a new thread. + + If no thread is provided when running a request, an empty thread will be + created. + """ tool_choice: Optional[AssistantToolChoiceOptionParam] """ @@ -197,12 +204,14 @@ class ThreadMessage(TypedDict, total=False): attachments: Optional[Iterable[ThreadMessageAttachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ @@ -230,12 +239,14 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): store. """ - metadata: object - """Set of 16 key-value pairs that can be attached to a vector store. + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. - This can be useful for storing additional information about the vector store in - a structured format. Keys can be a maximum of 64 characters long and values can - be a maximum of 512 characters long. + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ @@ -270,12 +281,14 @@ class Thread(TypedDict, total=False): start the thread with. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ tool_resources: Optional[ThreadToolResources] diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index 3ac6c7d69b..127202753c 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -5,6 +5,7 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..shared_params.metadata import Metadata from .code_interpreter_tool_param import CodeInterpreterToolParam from .file_chunking_strategy_param import FileChunkingStrategyParam from .threads.message_content_part_param import MessageContentPartParam @@ -29,12 +30,14 @@ class ThreadCreateParams(TypedDict, total=False): start the thread with. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ tool_resources: Optional[ToolResources] @@ -78,12 +81,14 @@ class Message(TypedDict, total=False): attachments: Optional[Iterable[MessageAttachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ @@ -111,12 +116,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): store. """ - metadata: object - """Set of 16 key-value pairs that can be attached to a vector store. + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. - This can be useful for storing additional information about the vector store in - a structured format. Keys can be a maximum of 64 characters long and values can - be a maximum of 512 characters long. + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/thread_update_params.py b/src/openai/types/beta/thread_update_params.py index 78c5ec4f2e..b47ea8f3b0 100644 --- a/src/openai/types/beta/thread_update_params.py +++ b/src/openai/types/beta/thread_update_params.py @@ -5,16 +5,20 @@ from typing import List, Optional from typing_extensions import TypedDict +from ..shared_params.metadata import Metadata + __all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] class ThreadUpdateParams(TypedDict, total=False): - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ tool_resources: Optional[ToolResources] diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py index 63c5c4800a..4a05a128eb 100644 --- a/src/openai/types/beta/threads/message.py +++ b/src/openai/types/beta/threads/message.py @@ -5,6 +5,7 @@ from ...._models import BaseModel from .message_content import MessageContent +from ...shared.metadata import Metadata from ..code_interpreter_tool import CodeInterpreterTool __all__ = [ @@ -66,12 +67,14 @@ class Message(BaseModel): incomplete_details: Optional[IncompleteDetails] = None """On an incomplete message, details about why the message is incomplete.""" - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ object: Literal["thread.message"] diff --git a/src/openai/types/beta/threads/message_create_params.py b/src/openai/types/beta/threads/message_create_params.py index 2c4edfdf71..b52386824a 100644 --- a/src/openai/types/beta/threads/message_create_params.py +++ b/src/openai/types/beta/threads/message_create_params.py @@ -5,6 +5,7 @@ from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ...shared_params.metadata import Metadata from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam @@ -27,12 +28,14 @@ class MessageCreateParams(TypedDict, total=False): attachments: Optional[Iterable[Attachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/message_update_params.py b/src/openai/types/beta/threads/message_update_params.py index e8f8cc910c..bb078281e6 100644 --- a/src/openai/types/beta/threads/message_update_params.py +++ b/src/openai/types/beta/threads/message_update_params.py @@ -5,16 +5,20 @@ from typing import Optional from typing_extensions import Required, TypedDict +from ...shared_params.metadata import Metadata + __all__ = ["MessageUpdateParams"] class MessageUpdateParams(TypedDict, total=False): thread_id: Required[str] - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index ad32135b7d..da9418d6f9 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -6,6 +6,7 @@ from ...._models import BaseModel from .run_status import RunStatus from ..assistant_tool import AssistantTool +from ...shared.metadata import Metadata from ..assistant_tool_choice_option import AssistantToolChoiceOption from ..assistant_response_format_option import AssistantResponseFormatOption from .required_action_function_tool_call import RequiredActionFunctionToolCall @@ -133,12 +134,14 @@ class Run(BaseModel): of the run. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: str diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 88dc39645e..091dd3da66 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -8,6 +8,7 @@ from ...chat_model import ChatModel from ..assistant_tool_param import AssistantToolParam from .runs.run_step_include import RunStepInclude +from ...shared_params.metadata import Metadata from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -80,12 +81,14 @@ class RunCreateParamsBase(TypedDict, total=False): `incomplete_details` for more info. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: Union[str, ChatModel, None] @@ -199,12 +202,14 @@ class AdditionalMessage(TypedDict, total=False): attachments: Optional[Iterable[AdditionalMessageAttachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/run_update_params.py b/src/openai/types/beta/threads/run_update_params.py index cb4f053645..fbcbd3fb14 100644 --- a/src/openai/types/beta/threads/run_update_params.py +++ b/src/openai/types/beta/threads/run_update_params.py @@ -5,16 +5,20 @@ from typing import Optional from typing_extensions import Required, TypedDict +from ...shared_params.metadata import Metadata + __all__ = ["RunUpdateParams"] class RunUpdateParams(TypedDict, total=False): thread_id: Required[str] - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/runs/run_step.py b/src/openai/types/beta/threads/runs/run_step.py index 0445ae360d..b5f380c7b1 100644 --- a/src/openai/types/beta/threads/runs/run_step.py +++ b/src/openai/types/beta/threads/runs/run_step.py @@ -5,6 +5,7 @@ from ....._utils import PropertyInfo from ....._models import BaseModel +from ....shared.metadata import Metadata from .tool_calls_step_details import ToolCallsStepDetails from .message_creation_step_details import MessageCreationStepDetails @@ -70,12 +71,14 @@ class RunStep(BaseModel): Will be `null` if there are no errors. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ object: Literal["thread.run.step"] diff --git a/src/openai/types/beta/vector_store.py b/src/openai/types/beta/vector_store.py index 2d3ceea80c..b947dfb79d 100644 --- a/src/openai/types/beta/vector_store.py +++ b/src/openai/types/beta/vector_store.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from ..shared.metadata import Metadata __all__ = ["VectorStore", "FileCounts", "ExpiresAfter"] @@ -48,12 +49,14 @@ class VectorStore(BaseModel): last_active_at: Optional[int] = None """The Unix timestamp (in seconds) for when the vector store was last active.""" - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: str diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py index 4fc7c38927..faca6d9000 100644 --- a/src/openai/types/beta/vector_store_create_params.py +++ b/src/openai/types/beta/vector_store_create_params.py @@ -5,6 +5,7 @@ from typing import List, Optional from typing_extensions import Literal, Required, TypedDict +from ..shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam __all__ = ["VectorStoreCreateParams", "ExpiresAfter"] @@ -28,12 +29,14 @@ class VectorStoreCreateParams(TypedDict, total=False): files. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: str diff --git a/src/openai/types/beta/vector_store_update_params.py b/src/openai/types/beta/vector_store_update_params.py index ff6c068efb..e91b3ba5ad 100644 --- a/src/openai/types/beta/vector_store_update_params.py +++ b/src/openai/types/beta/vector_store_update_params.py @@ -5,6 +5,8 @@ from typing import Optional from typing_extensions import Literal, Required, TypedDict +from ..shared_params.metadata import Metadata + __all__ = ["VectorStoreUpdateParams", "ExpiresAfter"] @@ -12,12 +14,14 @@ class VectorStoreUpdateParams(TypedDict, total=False): expires_after: Optional[ExpiresAfter] """The expiration policy for a vector store.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: Optional[str] diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 229fb822f4..35e3a3d784 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): """The role of the messages author, in this case `assistant`.""" audio: Optional[Audio] - """ - Data about a previous audio response from the model. + """Data about a previous audio response from the model. + [Learn more](https://platform.openai.com/docs/guides/audio). """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 30d930b120..ec88ea1fb0 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..chat_model import ChatModel +from ..shared_params.metadata import Metadata from .chat_completion_modality import ChatCompletionModality from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam @@ -122,10 +123,14 @@ class CompletionCreateParamsBase(TypedDict, total=False): [o1 series models](https://platform.openai.com/docs/guides/reasoning). """ - metadata: Optional[Dict[str, str]] - """ - Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ modalities: Optional[List[ChatCompletionModality]] @@ -216,9 +221,9 @@ class CompletionCreateParamsBase(TypedDict, total=False): utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. """ diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index e1ac464320..c191cb9734 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -5,6 +5,8 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "o3-mini", + "o3-mini-2025-01-31", "o1", "o1-2024-12-17", "o1-preview", diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index c8776bca0e..74bf304904 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .metadata import Metadata as Metadata from .error_object import ErrorObject as ErrorObject from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters diff --git a/src/openai/types/shared/metadata.py b/src/openai/types/shared/metadata.py new file mode 100644 index 0000000000..0da88c679c --- /dev/null +++ b/src/openai/types/shared/metadata.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict +from typing_extensions import TypeAlias + +__all__ = ["Metadata"] + +Metadata: TypeAlias = Dict[str, str] diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index ab4057d59f..68a8db75fe 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .metadata import Metadata as Metadata from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText diff --git a/src/openai/types/shared_params/metadata.py b/src/openai/types/shared_params/metadata.py new file mode 100644 index 0000000000..821650b48b --- /dev/null +++ b/src/openai/types/shared_params/metadata.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import TypeAlias + +__all__ = ["Metadata"] + +Metadata: TypeAlias = Dict[str, str] diff --git a/src/openai/types/upload.py b/src/openai/types/upload.py index 1cf8ee97f8..d8108c62f9 100644 --- a/src/openai/types/upload.py +++ b/src/openai/types/upload.py @@ -39,4 +39,4 @@ class Upload(BaseModel): """The status of the Upload.""" file: Optional[FileObject] = None - """The ready File object after the Upload is completed.""" + """The `File` object represents a document that has been uploaded to OpenAI.""" diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index 908aa983be..5a17088ce6 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -26,7 +26,11 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: session = client.beta.realtime.sessions.create( input_audio_format="pcm16", - input_audio_transcription={"model": "model"}, + input_audio_transcription={ + "language": "language", + "model": "model", + "prompt": "prompt", + }, instructions="instructions", max_response_output_tokens=0, modalities=["text"], @@ -86,7 +90,11 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: session = await async_client.beta.realtime.sessions.create( input_audio_format="pcm16", - input_audio_transcription={"model": "model"}, + input_audio_transcription={ + "language": "language", + "model": "model", + "prompt": "prompt", + }, instructions="instructions", max_response_output_tokens=0, modalities=["text"], diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index d9944448b7..458e3f5e90 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -34,7 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: model="gpt-4o", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, name="name", response_format="auto", temperature=1, @@ -46,7 +46,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -131,7 +131,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: assistant_id="assistant_id", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, model="model", name="name", response_format="auto", @@ -266,7 +266,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> model="gpt-4o", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, name="name", response_format="auto", temperature=1, @@ -278,7 +278,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -363,7 +363,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> assistant_id="assistant_id", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, model="model", name="name", response_format="auto", diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 72c5cc0f19..ea89213e95 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -39,10 +39,10 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -51,7 +51,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -128,7 +128,7 @@ def test_method_update(self, client: OpenAI) -> None: def test_method_update_with_all_params(self, client: OpenAI) -> None: thread = client.beta.threads.update( thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, @@ -219,7 +219,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -236,10 +236,10 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -248,7 +248,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -308,7 +308,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -324,10 +324,10 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -336,7 +336,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -403,10 +403,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -415,7 +415,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -492,7 +492,7 @@ async def test_method_update(self, async_client: AsyncOpenAI) -> None: async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.update( thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, @@ -583,7 +583,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -600,10 +600,10 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -612,7 +612,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -672,7 +672,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -688,10 +688,10 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -700,7 +700,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py index 162241a13d..3216df907b 100644 --- a/tests/api_resources/beta/test_vector_stores.py +++ b/tests/api_resources/beta/test_vector_stores.py @@ -35,7 +35,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "days": 1, }, file_ids=["string"], - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -113,7 +113,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: "anchor": "last_active_at", "days": 1, }, - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -240,7 +240,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "days": 1, }, file_ids=["string"], - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -318,7 +318,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> "anchor": "last_active_at", "days": 1, }, - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py index 1d50c73e92..c965f0ab90 100644 --- a/tests/api_resources/beta/threads/test_messages.py +++ b/tests/api_resources/beta/threads/test_messages.py @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) @@ -144,7 +144,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.update( message_id="message_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) @@ -311,7 +311,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "tools": [{"type": "code_interpreter"}], } ], - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) @@ -413,7 +413,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> message = await async_client.beta.threads.messages.update( message_id="message_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index ecce003a85..1509d5e9ab 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -45,13 +45,13 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -128,13 +128,13 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -246,7 +246,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: run = client.beta.threads.runs.update( run_id="run_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Run, run, path=["response"]) @@ -541,13 +541,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -624,13 +624,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -742,7 +742,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> run = await async_client.beta.threads.runs.update( run_id="run_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Run, run, path=["response"]) From f703831cff8493bbcbd73a729799da33e547b625 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 14:43:52 +0000 Subject: [PATCH 158/192] chore(internal): change default timeout to an int (#2079) --- src/openai/_constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/openai/_constants.py b/src/openai/_constants.py index 3f82bed037..7029dc72b0 100644 --- a/src/openai/_constants.py +++ b/src/openai/_constants.py @@ -6,7 +6,7 @@ OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to" # default timeout is 10 minutes -DEFAULT_TIMEOUT = httpx.Timeout(timeout=600.0, connect=5.0) +DEFAULT_TIMEOUT = httpx.Timeout(timeout=600, connect=5.0) DEFAULT_MAX_RETRIES = 2 DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=100) From 9c0763ffec6ec68a80487da38166c1428c72d4c7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 15:25:21 +0000 Subject: [PATCH 159/192] chore(internal): bummp ruff dependency (#2080) --- pyproject.toml | 2 +- requirements-dev.lock | 2 +- scripts/utils/ruffen-docs.py | 4 ++-- src/openai/_models.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a745ecec37..5f2176755b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -178,7 +178,7 @@ select = [ "T201", "T203", # misuse of typing.TYPE_CHECKING - "TCH004", + "TC004", # import rules "TID251", ] diff --git a/requirements-dev.lock b/requirements-dev.lock index 593091cb04..3e56c5090e 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -78,7 +78,7 @@ pytz==2023.3.post1 # via dirty-equals respx==0.22.0 rich==13.7.1 -ruff==0.6.9 +ruff==0.9.4 setuptools==68.2.2 # via nodeenv six==1.16.0 diff --git a/scripts/utils/ruffen-docs.py b/scripts/utils/ruffen-docs.py index 37b3d94f0f..0cf2bd2fd9 100644 --- a/scripts/utils/ruffen-docs.py +++ b/scripts/utils/ruffen-docs.py @@ -47,7 +47,7 @@ def _md_match(match: Match[str]) -> str: with _collect_error(match): code = format_code_block(code) code = textwrap.indent(code, match["indent"]) - return f'{match["before"]}{code}{match["after"]}' + return f"{match['before']}{code}{match['after']}" def _pycon_match(match: Match[str]) -> str: code = "" @@ -97,7 +97,7 @@ def finish_fragment() -> None: def _md_pycon_match(match: Match[str]) -> str: code = _pycon_match(match) code = textwrap.indent(code, match["indent"]) - return f'{match["before"]}{code}{match["after"]}' + return f"{match['before']}{code}{match['after']}" src = MD_RE.sub(_md_match, src) src = MD_PYCON_RE.sub(_md_pycon_match, src) diff --git a/src/openai/_models.py b/src/openai/_models.py index 9a918aabf3..12c34b7d17 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -172,7 +172,7 @@ def to_json( @override def __str__(self) -> str: # mypy complains about an invalid self arg - return f'{self.__repr_name__()}({self.__repr_str__(", ")})' # type: ignore[misc] + return f"{self.__repr_name__()}({self.__repr_str__(', ')})" # type: ignore[misc] # Override the 'construct' method in a way that supports recursive parsing without validation. # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. From 76081933c7bf9d5e97c31efa1955e575abd2e1a1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 11:26:48 +0000 Subject: [PATCH 160/192] fix(api/types): correct audio duration & role types (#2091) --- .stats.yml | 2 +- api.md | 1 + .../types/audio/transcription_verbose.py | 2 +- src/openai/types/audio/translation_verbose.py | 2 +- src/openai/types/beta/realtime/__init__.py | 4 ++ .../conversation_item_with_reference.py | 67 ++++++++++++++++++ .../conversation_item_with_reference_param.py | 68 +++++++++++++++++++ .../beta/realtime/response_create_event.py | 10 +-- .../realtime/response_create_event_param.py | 10 +-- .../types/chat/chat_completion_chunk.py | 2 +- src/openai/types/chat/chat_completion_role.py | 2 +- 11 files changed, 157 insertions(+), 13 deletions(-) create mode 100644 src/openai/types/beta/realtime/conversation_item_with_reference.py create mode 100644 src/openai/types/beta/realtime/conversation_item_with_reference_param.py diff --git a/.stats.yml b/.stats.yml index e49b5c56e8..df7877dfd0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml diff --git a/api.md b/api.md index 630f003fe8..c357818064 100644 --- a/api.md +++ b/api.md @@ -254,6 +254,7 @@ from openai.types.beta.realtime import ( ConversationItemInputAudioTranscriptionFailedEvent, ConversationItemTruncateEvent, ConversationItemTruncatedEvent, + ConversationItemWithReference, ErrorEvent, InputAudioBufferAppendEvent, InputAudioBufferClearEvent, diff --git a/src/openai/types/audio/transcription_verbose.py b/src/openai/types/audio/transcription_verbose.py index 3b18fa4871..2a670189e0 100644 --- a/src/openai/types/audio/transcription_verbose.py +++ b/src/openai/types/audio/transcription_verbose.py @@ -10,7 +10,7 @@ class TranscriptionVerbose(BaseModel): - duration: str + duration: float """The duration of the input audio.""" language: str diff --git a/src/openai/types/audio/translation_verbose.py b/src/openai/types/audio/translation_verbose.py index 5901ae7535..27cb02d64f 100644 --- a/src/openai/types/audio/translation_verbose.py +++ b/src/openai/types/audio/translation_verbose.py @@ -9,7 +9,7 @@ class TranslationVerbose(BaseModel): - duration: str + duration: float """The duration of the input audio.""" language: str diff --git a/src/openai/types/beta/realtime/__init__.py b/src/openai/types/beta/realtime/__init__.py index 372d4ec19d..cd0616dcfa 100644 --- a/src/openai/types/beta/realtime/__init__.py +++ b/src/openai/types/beta/realtime/__init__.py @@ -42,6 +42,7 @@ from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent as InputAudioBufferCommitEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .conversation_item_truncate_event import ConversationItemTruncateEvent as ConversationItemTruncateEvent +from .conversation_item_with_reference import ConversationItemWithReference as ConversationItemWithReference from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent as InputAudioBufferClearedEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent @@ -60,6 +61,9 @@ from .conversation_item_truncate_event_param import ( ConversationItemTruncateEventParam as ConversationItemTruncateEventParam, ) +from .conversation_item_with_reference_param import ( + ConversationItemWithReferenceParam as ConversationItemWithReferenceParam, +) from .input_audio_buffer_speech_started_event import ( InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, ) diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference.py b/src/openai/types/beta/realtime/conversation_item_with_reference.py new file mode 100644 index 0000000000..31806afc33 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_with_reference.py @@ -0,0 +1,67 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item_content import ConversationItemContent + +__all__ = ["ConversationItemWithReference"] + + +class ConversationItemWithReference(BaseModel): + id: Optional[str] = None + """ + For an item of type (`message` | `function_call` | `function_call_output`) this + field allows the client to assign the unique ID of the item. It is not required + because the server will generate one if not provided. + + For an item of type `item_reference`, this field is required and is a reference + to any item that has previously existed in the conversation. + """ + + arguments: Optional[str] = None + """The arguments of the function call (for `function_call` items).""" + + call_id: Optional[str] = None + """ + The ID of the function call (for `function_call` and `function_call_output` + items). If passed on a `function_call_output` item, the server will check that a + `function_call` item with the same ID exists in the conversation history. + """ + + content: Optional[List[ConversationItemContent]] = None + """The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + - Message items of role `user` support `input_text` and `input_audio` content + - Message items of role `assistant` support `text` content. + """ + + name: Optional[str] = None + """The name of the function being called (for `function_call` items).""" + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`.""" + + output: Optional[str] = None + """The output of the function call (for `function_call_output` items).""" + + role: Optional[Literal["user", "assistant", "system"]] = None + """ + The role of the message sender (`user`, `assistant`, `system`), only applicable + for `message` items. + """ + + status: Optional[Literal["completed", "incomplete"]] = None + """The status of the item (`completed`, `incomplete`). + + These have no effect on the conversation, but are accepted for consistency with + the `conversation.item.created` event. + """ + + type: Optional[Literal["message", "function_call", "function_call_output", "item_reference"]] = None + """ + The type of the item (`message`, `function_call`, `function_call_output`, + `item_reference`). + """ diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py new file mode 100644 index 0000000000..e266cdce32 --- /dev/null +++ b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py @@ -0,0 +1,68 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, TypedDict + +from .conversation_item_content_param import ConversationItemContentParam + +__all__ = ["ConversationItemWithReferenceParam"] + + +class ConversationItemWithReferenceParam(TypedDict, total=False): + id: str + """ + For an item of type (`message` | `function_call` | `function_call_output`) this + field allows the client to assign the unique ID of the item. It is not required + because the server will generate one if not provided. + + For an item of type `item_reference`, this field is required and is a reference + to any item that has previously existed in the conversation. + """ + + arguments: str + """The arguments of the function call (for `function_call` items).""" + + call_id: str + """ + The ID of the function call (for `function_call` and `function_call_output` + items). If passed on a `function_call_output` item, the server will check that a + `function_call` item with the same ID exists in the conversation history. + """ + + content: Iterable[ConversationItemContentParam] + """The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + - Message items of role `user` support `input_text` and `input_audio` content + - Message items of role `assistant` support `text` content. + """ + + name: str + """The name of the function being called (for `function_call` items).""" + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`.""" + + output: str + """The output of the function call (for `function_call_output` items).""" + + role: Literal["user", "assistant", "system"] + """ + The role of the message sender (`user`, `assistant`, `system`), only applicable + for `message` items. + """ + + status: Literal["completed", "incomplete"] + """The status of the item (`completed`, `incomplete`). + + These have no effect on the conversation, but are accepted for consistency with + the `conversation.item.created` event. + """ + + type: Literal["message", "function_call", "function_call_output", "item_reference"] + """ + The type of the item (`message`, `function_call`, `function_call_output`, + `item_reference`). + """ diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py index 0801654bd8..d6c5fda926 100644 --- a/src/openai/types/beta/realtime/response_create_event.py +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -5,7 +5,7 @@ from ...._models import BaseModel from ...shared.metadata import Metadata -from .conversation_item import ConversationItem +from .conversation_item_with_reference import ConversationItemWithReference __all__ = ["ResponseCreateEvent", "Response", "ResponseTool"] @@ -37,11 +37,13 @@ class Response(BaseModel): will not add items to default conversation. """ - input: Optional[List[ConversationItem]] = None + input: Optional[List[ConversationItemWithReference]] = None """Input items to include in the prompt for the model. - Creates a new context for this response, without including the default - conversation. Can include references to items from the default conversation. + Using this field creates a new context for this Response instead of using the + default conversation. An empty array `[]` will clear the context for this + Response. Note that this can include references to items from the default + conversation. """ instructions: Optional[str] = None diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py index a87ef955e8..c02fe1b34e 100644 --- a/src/openai/types/beta/realtime/response_create_event_param.py +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -5,8 +5,8 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict -from .conversation_item_param import ConversationItemParam from ...shared_params.metadata import Metadata +from .conversation_item_with_reference_param import ConversationItemWithReferenceParam __all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"] @@ -38,11 +38,13 @@ class Response(TypedDict, total=False): will not add items to default conversation. """ - input: Iterable[ConversationItemParam] + input: Iterable[ConversationItemWithReferenceParam] """Input items to include in the prompt for the model. - Creates a new context for this response, without including the default - conversation. Can include references to items from the default conversation. + Using this field creates a new context for this Response instead of using the + default conversation. An empty array `[]` will clear the context for this + Response. Note that this can include references to items from the default + conversation. """ instructions: str diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 7b0ae2e121..dede513f1e 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -70,7 +70,7 @@ class ChoiceDelta(BaseModel): refusal: Optional[str] = None """The refusal message generated by the model.""" - role: Optional[Literal["system", "user", "assistant", "tool"]] = None + role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None """The role of the author of this message.""" tool_calls: Optional[List[ChoiceDeltaToolCall]] = None diff --git a/src/openai/types/chat/chat_completion_role.py b/src/openai/types/chat/chat_completion_role.py index c2ebef74c8..3ec5e9ad87 100644 --- a/src/openai/types/chat/chat_completion_role.py +++ b/src/openai/types/chat/chat_completion_role.py @@ -4,4 +4,4 @@ __all__ = ["ChatCompletionRole"] -ChatCompletionRole: TypeAlias = Literal["system", "user", "assistant", "tool", "function"] +ChatCompletionRole: TypeAlias = Literal["developer", "system", "user", "assistant", "tool", "function"] From 9b4ca5c7844888e0f5d8e25636fbe3a08b1ded17 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 17:15:22 +0000 Subject: [PATCH 161/192] feat(client): send `X-Stainless-Read-Timeout` header (#2094) --- src/openai/_base_client.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index e9516fc6cd..803b9a8dbc 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -419,10 +419,17 @@ def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0 if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers: headers[idempotency_header] = options.idempotency_key or self._idempotency_key() - # Don't set the retry count header if it was already set or removed by the caller. We check + # Don't set these headers if they were already set or removed by the caller. We check # `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case. - if "x-stainless-retry-count" not in (header.lower() for header in custom_headers): + lower_custom_headers = [header.lower() for header in custom_headers] + if "x-stainless-retry-count" not in lower_custom_headers: headers["x-stainless-retry-count"] = str(retries_taken) + if "x-stainless-read-timeout" not in lower_custom_headers: + timeout = self.timeout if isinstance(options.timeout, NotGiven) else options.timeout + if isinstance(timeout, Timeout): + timeout = timeout.read + if timeout is not None: + headers["x-stainless-read-timeout"] = str(timeout) return headers From 7e7744f29f35f195992238edbb94a7499377d3ce Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 21:25:56 +0000 Subject: [PATCH 162/192] fix(api): add missing reasoning effort + model enums (#2096) --- .stats.yml | 2 +- src/openai/resources/beta/assistants.py | 106 +++++++++++++++++- .../resources/beta/threads/runs/runs.py | 52 +++++++++ src/openai/resources/chat/completions.py | 28 ++--- .../types/beta/assistant_create_params.py | 11 +- .../types/beta/assistant_update_params.py | 47 +++++++- .../types/beta/threads/run_create_params.py | 9 ++ .../chat/chat_completion_reasoning_effort.py | 3 +- .../types/chat/completion_create_params.py | 4 +- tests/api_resources/beta/test_assistants.py | 8 +- tests/api_resources/beta/threads/test_runs.py | 4 + 11 files changed, 248 insertions(+), 26 deletions(-) diff --git a/.stats.yml b/.stats.yml index df7877dfd0..8a5d2c06b2 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7c699d4503077d06a4a44f52c0c1f902d19a87c766b8be75b97c8dfd484ad4aa.yml diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 65b7c9cfc2..462086f74b 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -61,6 +61,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -97,6 +98,13 @@ def create( name: The name of the assistant. The maximum length is 256 characters. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -155,6 +163,7 @@ def create( "instructions": instructions, "metadata": metadata, "name": name, + "reasoning_effort": reasoning_effort, "response_format": response_format, "temperature": temperature, "tool_resources": tool_resources, @@ -210,8 +219,42 @@ def update( description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, + model: Union[ + str, + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ], + ] + | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -249,6 +292,13 @@ def update( name: The name of the assistant. The maximum length is 256 characters. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -309,6 +359,7 @@ def update( "metadata": metadata, "model": model, "name": name, + "reasoning_effort": reasoning_effort, "response_format": response_format, "temperature": temperature, "tool_resources": tool_resources, @@ -451,6 +502,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -487,6 +539,13 @@ async def create( name: The name of the assistant. The maximum length is 256 characters. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -545,6 +604,7 @@ async def create( "instructions": instructions, "metadata": metadata, "name": name, + "reasoning_effort": reasoning_effort, "response_format": response_format, "temperature": temperature, "tool_resources": tool_resources, @@ -600,8 +660,42 @@ async def update( description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, + model: Union[ + str, + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ], + ] + | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -639,6 +733,13 @@ async def update( name: The name of the assistant. The maximum length is 256 characters. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -699,6 +800,7 @@ async def update( "metadata": metadata, "model": model, "name": name, + "reasoning_effort": reasoning_effort, "response_format": response_format, "temperature": temperature, "tool_resources": tool_resources, diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index a1c93d0382..3a9add5b3c 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -85,6 +85,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -153,6 +154,13 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -228,6 +236,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -299,6 +308,13 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -370,6 +386,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -441,6 +458,13 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -511,6 +535,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -541,6 +566,7 @@ def create( "metadata": metadata, "model": model, "parallel_tool_calls": parallel_tool_calls, + "reasoning_effort": reasoning_effort, "response_format": response_format, "stream": stream, "temperature": temperature, @@ -941,6 +967,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1009,6 +1036,13 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -1084,6 +1118,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1155,6 +1190,13 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -1226,6 +1268,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1297,6 +1340,13 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + reasoning_effort: **o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -1367,6 +1417,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1397,6 +1448,7 @@ async def create( "metadata": metadata, "model": model, "parallel_tool_calls": parallel_tool_calls, + "reasoning_effort": reasoning_effort, "response_format": response_format, "stream": stream, "temperature": temperature, diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 2b91659950..99f26238d9 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -80,7 +80,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -211,7 +211,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 models only** + reasoning_effort: **o1 and o3-mini models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -333,7 +333,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -470,7 +470,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 models only** + reasoning_effort: **o1 and o3-mini models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -585,7 +585,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -722,7 +722,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 models only** + reasoning_effort: **o1 and o3-mini models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -836,7 +836,7 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -943,7 +943,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -1074,7 +1074,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 models only** + reasoning_effort: **o1 and o3-mini models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1196,7 +1196,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -1333,7 +1333,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 models only** + reasoning_effort: **o1 and o3-mini models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1448,7 +1448,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, @@ -1585,7 +1585,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 models only** + reasoning_effort: **o1 and o3-mini models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1699,7 +1699,7 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index e205856395..66bef02ced 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List, Union, Iterable, Optional -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict from ..chat_model import ChatModel from .assistant_tool_param import AssistantToolParam @@ -53,6 +53,15 @@ class AssistantCreateParams(TypedDict, total=False): name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" + reasoning_effort: Optional[Literal["low", "medium", "high"]] + """**o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 35065ef61b..80fec110cd 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -2,8 +2,8 @@ from __future__ import annotations -from typing import List, Iterable, Optional -from typing_extensions import TypedDict +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, TypedDict from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata @@ -32,7 +32,39 @@ class AssistantUpdateParams(TypedDict, total=False): a maximum length of 512 characters. """ - model: str + model: Union[ + str, + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ], + ] """ID of the model to use. You can use the @@ -45,6 +77,15 @@ class AssistantUpdateParams(TypedDict, total=False): name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" + reasoning_effort: Optional[Literal["low", "medium", "high"]] + """**o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 091dd3da66..093b4ce321 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -106,6 +106,15 @@ class RunCreateParamsBase(TypedDict, total=False): during tool use. """ + reasoning_effort: Optional[Literal["low", "medium", "high"]] + """**o1 and o3-mini models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. diff --git a/src/openai/types/chat/chat_completion_reasoning_effort.py b/src/openai/types/chat/chat_completion_reasoning_effort.py index 9e7946974a..85249c53b1 100644 --- a/src/openai/types/chat/chat_completion_reasoning_effort.py +++ b/src/openai/types/chat/chat_completion_reasoning_effort.py @@ -1,7 +1,8 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal, TypeAlias __all__ = ["ChatCompletionReasoningEffort"] -ChatCompletionReasoningEffort: TypeAlias = Literal["low", "medium", "high"] +ChatCompletionReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index ec88ea1fb0..c761cbe07b 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -174,8 +174,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ - reasoning_effort: ChatCompletionReasoningEffort - """**o1 models only** + reasoning_effort: Optional[ChatCompletionReasoningEffort] + """**o1 and o3-mini models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index 458e3f5e90..82aaf87b1c 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -36,6 +36,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: instructions="instructions", metadata={"foo": "string"}, name="name", + reasoning_effort="low", response_format="auto", temperature=1, tool_resources={ @@ -132,8 +133,9 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: description="description", instructions="instructions", metadata={"foo": "string"}, - model="model", + model="string", name="name", + reasoning_effort="low", response_format="auto", temperature=1, tool_resources={ @@ -268,6 +270,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> instructions="instructions", metadata={"foo": "string"}, name="name", + reasoning_effort="low", response_format="auto", temperature=1, tool_resources={ @@ -364,8 +367,9 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> description="description", instructions="instructions", metadata={"foo": "string"}, - model="model", + model="string", name="name", + reasoning_effort="low", response_format="auto", temperature=1, tool_resources={ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index 1509d5e9ab..01a1ce9ea4 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -54,6 +54,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, + reasoning_effort="low", response_format="auto", stream=False, temperature=1, @@ -137,6 +138,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, + reasoning_effort="low", response_format="auto", temperature=1, tool_choice="none", @@ -550,6 +552,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, + reasoning_effort="low", response_format="auto", stream=False, temperature=1, @@ -633,6 +636,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, + reasoning_effort="low", response_format="auto", temperature=1, tool_choice="none", From 48d0c38750597fc9c0361fff780b74158f103adf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 13:00:22 +0000 Subject: [PATCH 163/192] chore(internal): fix type traversing dictionary params (#2097) --- src/openai/_utils/_transform.py | 12 +++++++++++- tests/test_transform.py | 11 ++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py index a6b62cad0c..18afd9d8bd 100644 --- a/src/openai/_utils/_transform.py +++ b/src/openai/_utils/_transform.py @@ -25,7 +25,7 @@ is_annotated_type, strip_annotated_type, ) -from .._compat import model_dump, is_typeddict +from .._compat import get_origin, model_dump, is_typeddict _T = TypeVar("_T") @@ -164,9 +164,14 @@ def _transform_recursive( inner_type = annotation stripped_type = strip_annotated_type(inner_type) + origin = get_origin(stripped_type) or stripped_type if is_typeddict(stripped_type) and is_mapping(data): return _transform_typeddict(data, stripped_type) + if origin == dict and is_mapping(data): + items_type = get_args(stripped_type)[1] + return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()} + if ( # List[T] (is_list_type(stripped_type) and is_list(data)) @@ -307,9 +312,14 @@ async def _async_transform_recursive( inner_type = annotation stripped_type = strip_annotated_type(inner_type) + origin = get_origin(stripped_type) or stripped_type if is_typeddict(stripped_type) and is_mapping(data): return await _async_transform_typeddict(data, stripped_type) + if origin == dict and is_mapping(data): + items_type = get_args(stripped_type)[1] + return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()} + if ( # List[T] (is_list_type(stripped_type) and is_list(data)) diff --git a/tests/test_transform.py b/tests/test_transform.py index 8c6aba6448..385fbe2b2c 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -2,7 +2,7 @@ import io import pathlib -from typing import Any, List, Union, TypeVar, Iterable, Optional, cast +from typing import Any, Dict, List, Union, TypeVar, Iterable, Optional, cast from datetime import date, datetime from typing_extensions import Required, Annotated, TypedDict @@ -388,6 +388,15 @@ def my_iter() -> Iterable[Baz8]: } +@parametrize +@pytest.mark.asyncio +async def test_dictionary_items(use_async: bool) -> None: + class DictItems(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + assert await transform({"foo": {"foo_baz": "bar"}}, Dict[str, DictItems], use_async) == {"foo": {"fooBaz": "bar"}} + + class TypedDictIterableUnionStr(TypedDict): foo: Annotated[Union[str, Iterable[Baz8]], PropertyInfo(alias="FOO")] From 1d807f07a07b1800d226134c5f77a0242d43dfc8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 15:16:44 +0000 Subject: [PATCH 164/192] feat(pagination): avoid fetching when has_more: false (#2098) --- .stats.yml | 2 +- src/openai/pagination.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 8a5d2c06b2..d59a86d22e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7c699d4503077d06a4a44f52c0c1f902d19a87c766b8be75b97c8dfd484ad4aa.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml diff --git a/src/openai/pagination.py b/src/openai/pagination.py index 8293638269..a59cced854 100644 --- a/src/openai/pagination.py +++ b/src/openai/pagination.py @@ -61,6 +61,7 @@ def next_page_info(self) -> None: class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): data: List[_T] + has_more: Optional[bool] = None @override def _get_page_items(self) -> List[_T]: @@ -69,6 +70,14 @@ def _get_page_items(self) -> List[_T]: return [] return data + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + @override def next_page_info(self) -> Optional[PageInfo]: data = self.data @@ -85,6 +94,7 @@ def next_page_info(self) -> Optional[PageInfo]: class AsyncCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]): data: List[_T] + has_more: Optional[bool] = None @override def _get_page_items(self) -> List[_T]: @@ -93,6 +103,14 @@ def _get_page_items(self) -> List[_T]: return [] return data + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + @override def next_page_info(self) -> Optional[PageInfo]: data = self.data From 1063234ab7ad13668584548b6a4f68995eedb962 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 16:22:46 +0000 Subject: [PATCH 165/192] chore(internal): minor type handling changes (#2099) --- src/openai/_models.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index 12c34b7d17..c4401ff868 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -426,10 +426,16 @@ def construct_type(*, value: object, type_: object) -> object: If the given value does not match the expected type then it is returned as-is. """ + + # store a reference to the original type we were given before we extract any inner + # types so that we can properly resolve forward references in `TypeAliasType` annotations + original_type = None + # we allow `object` as the input type because otherwise, passing things like # `Literal['value']` will be reported as a type error by type checkers type_ = cast("type[object]", type_) if is_type_alias_type(type_): + original_type = type_ # type: ignore[unreachable] type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` @@ -446,7 +452,7 @@ def construct_type(*, value: object, type_: object) -> object: if is_union(origin): try: - return validate_type(type_=cast("type[object]", type_), value=value) + return validate_type(type_=cast("type[object]", original_type or type_), value=value) except Exception: pass From 6e8aaf5de61ad175cf03e307164c4afbc456b2a2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 19:42:01 +0000 Subject: [PATCH 166/192] feat(api): add support for storing chat completions (#2117) --- .stats.yml | 4 +- api.md | 14 +- src/openai/_utils/_sync.py | 19 +- src/openai/resources/chat/chat.py | 2 +- .../resources/chat/completions/__init__.py | 33 ++ .../chat/{ => completions}/completions.py | 486 +++++++++++++++++- .../resources/chat/completions/messages.py | 212 ++++++++ src/openai/types/chat/__init__.py | 4 + .../types/chat/chat_completion_deleted.py | 18 + .../chat/chat_completion_store_message.py | 11 + .../types/chat/completion_list_params.py | 33 ++ .../types/chat/completion_update_params.py | 22 + src/openai/types/chat/completions/__init__.py | 5 + .../chat/completions/message_list_params.py | 21 + src/openai/types/moderation.py | 6 +- .../chat/completions/__init__.py | 1 + .../chat/completions/test_messages.py | 119 +++++ tests/api_resources/chat/test_completions.py | 310 +++++++++++ tests/test_client.py | 78 +-- 19 files changed, 1336 insertions(+), 62 deletions(-) create mode 100644 src/openai/resources/chat/completions/__init__.py rename src/openai/resources/chat/{ => completions}/completions.py (83%) create mode 100644 src/openai/resources/chat/completions/messages.py create mode 100644 src/openai/types/chat/chat_completion_deleted.py create mode 100644 src/openai/types/chat/chat_completion_store_message.py create mode 100644 src/openai/types/chat/completion_list_params.py create mode 100644 src/openai/types/chat/completion_update_params.py create mode 100644 src/openai/types/chat/completions/__init__.py create mode 100644 src/openai/types/chat/completions/message_list_params.py create mode 100644 tests/api_resources/chat/completions/__init__.py create mode 100644 tests/api_resources/chat/completions/test_messages.py diff --git a/.stats.yml b/.stats.yml index d59a86d22e..658877d3b0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml +configured_endpoints: 74 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml diff --git a/api.md b/api.md index c357818064..153521145c 100644 --- a/api.md +++ b/api.md @@ -48,6 +48,7 @@ from openai.types.chat import ( ChatCompletionContentPartInputAudio, ChatCompletionContentPartRefusal, ChatCompletionContentPartText, + ChatCompletionDeleted, ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, @@ -59,6 +60,7 @@ from openai.types.chat import ( ChatCompletionPredictionContent, ChatCompletionReasoningEffort, ChatCompletionRole, + ChatCompletionStoreMessage, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, @@ -71,7 +73,17 @@ from openai.types.chat import ( Methods: -- client.chat.completions.create(\*\*params) -> ChatCompletion +- client.chat.completions.create(\*\*params) -> ChatCompletion +- client.chat.completions.retrieve(completion_id) -> ChatCompletion +- client.chat.completions.update(completion_id, \*\*params) -> ChatCompletion +- client.chat.completions.list(\*\*params) -> SyncCursorPage[ChatCompletion] +- client.chat.completions.delete(completion_id) -> ChatCompletionDeleted + +### Messages + +Methods: + +- client.chat.completions.messages.list(completion_id, \*\*params) -> SyncCursorPage[ChatCompletionStoreMessage] # Embeddings diff --git a/src/openai/_utils/_sync.py b/src/openai/_utils/_sync.py index 8b3aaf2b5d..ad7ec71b76 100644 --- a/src/openai/_utils/_sync.py +++ b/src/openai/_utils/_sync.py @@ -7,16 +7,20 @@ from typing import Any, TypeVar, Callable, Awaitable from typing_extensions import ParamSpec +import anyio +import sniffio +import anyio.to_thread + T_Retval = TypeVar("T_Retval") T_ParamSpec = ParamSpec("T_ParamSpec") if sys.version_info >= (3, 9): - to_thread = asyncio.to_thread + _asyncio_to_thread = asyncio.to_thread else: # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread # for Python 3.8 support - async def to_thread( + async def _asyncio_to_thread( func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs ) -> Any: """Asynchronously run function *func* in a separate thread. @@ -34,6 +38,17 @@ async def to_thread( return await loop.run_in_executor(None, func_call) +async def to_thread( + func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs +) -> T_Retval: + if sniffio.current_async_library() == "asyncio": + return await _asyncio_to_thread(func, *args, **kwargs) + + return await anyio.to_thread.run_sync( + functools.partial(func, *args, **kwargs), + ) + + # inspired by `asyncer`, https://github.com/tiangolo/asyncer def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: """ diff --git a/src/openai/resources/chat/chat.py b/src/openai/resources/chat/chat.py index 9c4aacc953..14f9224b41 100644 --- a/src/openai/resources/chat/chat.py +++ b/src/openai/resources/chat/chat.py @@ -4,7 +4,7 @@ from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource -from .completions import ( +from .completions.completions import ( Completions, AsyncCompletions, CompletionsWithRawResponse, diff --git a/src/openai/resources/chat/completions/__init__.py b/src/openai/resources/chat/completions/__init__.py new file mode 100644 index 0000000000..12d3b3aa28 --- /dev/null +++ b/src/openai/resources/chat/completions/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .messages import ( + Messages, + AsyncMessages, + MessagesWithRawResponse, + AsyncMessagesWithRawResponse, + MessagesWithStreamingResponse, + AsyncMessagesWithStreamingResponse, +) +from .completions import ( + Completions, + AsyncCompletions, + CompletionsWithRawResponse, + AsyncCompletionsWithRawResponse, + CompletionsWithStreamingResponse, + AsyncCompletionsWithStreamingResponse, +) + +__all__ = [ + "Messages", + "AsyncMessages", + "MessagesWithRawResponse", + "AsyncMessagesWithRawResponse", + "MessagesWithStreamingResponse", + "AsyncMessagesWithStreamingResponse", + "Completions", + "AsyncCompletions", + "CompletionsWithRawResponse", + "AsyncCompletionsWithRawResponse", + "CompletionsWithStreamingResponse", + "AsyncCompletionsWithStreamingResponse", +] diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions/completions.py similarity index 83% rename from src/openai/resources/chat/completions.py rename to src/openai/resources/chat/completions/completions.py index 99f26238d9..f659cfdeeb 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -7,40 +7,56 @@ import httpx -from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import ( +from .... import _legacy_response +from .messages import ( + Messages, + AsyncMessages, + MessagesWithRawResponse, + AsyncMessagesWithRawResponse, + MessagesWithStreamingResponse, + AsyncMessagesWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import ( required_args, maybe_transform, async_maybe_transform, ) -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from ..._streaming import Stream, AsyncStream -from ...types.chat import ( +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...._streaming import Stream, AsyncStream +from ....pagination import SyncCursorPage, AsyncCursorPage +from ....types.chat import ( ChatCompletionAudioParam, ChatCompletionReasoningEffort, + completion_list_params, completion_create_params, + completion_update_params, ) -from ..._base_client import make_request_options -from ...types.chat_model import ChatModel -from ...types.chat.chat_completion import ChatCompletion -from ...types.shared_params.metadata import Metadata -from ...types.chat.chat_completion_chunk import ChatCompletionChunk -from ...types.chat.chat_completion_modality import ChatCompletionModality -from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam -from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam -from ...types.chat.chat_completion_message_param import ChatCompletionMessageParam -from ...types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort -from ...types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam -from ...types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam -from ...types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam +from ...._base_client import AsyncPaginator, make_request_options +from ....types.chat_model import ChatModel +from ....types.chat.chat_completion import ChatCompletion +from ....types.shared_params.metadata import Metadata +from ....types.chat.chat_completion_chunk import ChatCompletionChunk +from ....types.chat.chat_completion_deleted import ChatCompletionDeleted +from ....types.chat.chat_completion_modality import ChatCompletionModality +from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam +from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam +from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam +from ....types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort +from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam +from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam __all__ = ["Completions", "AsyncCompletions"] class Completions(SyncAPIResource): + @cached_property + def messages(self) -> Messages: + return Messages(self._client) + @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: """ @@ -902,8 +918,192 @@ def create( stream_cls=Stream[ChatCompletionChunk], ) + def retrieve( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChatCompletion: + """Get a stored chat completion. + + Only chat completions that have been created with + the `store` parameter set to `true` will be returned. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._get( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatCompletion, + ) + + def update( + self, + completion_id: str, + *, + metadata: Optional[Metadata], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChatCompletion: + """Modify a stored chat completion. + + Only chat completions that have been created + with the `store` parameter set to `true` can be modified. Currently, the only + supported modification is to update the `metadata` field. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._post( + f"/chat/completions/{completion_id}", + body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatCompletion, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: str | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[ChatCompletion]: + """List stored chat completions. + + Only chat completions that have been stored with + the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last chat completion from the previous pagination request. + + limit: Number of chat completions to retrieve. + + metadata: + A list of metadata keys to filter the chat completions by. Example: + + `metadata[key1]=value1&metadata[key2]=value2` + + model: The model used to generate the chat completions. + + order: Sort order for chat completions by timestamp. Use `asc` for ascending order or + `desc` for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get_api_list( + "/chat/completions", + page=SyncCursorPage[ChatCompletion], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "metadata": metadata, + "model": model, + "order": order, + }, + completion_list_params.CompletionListParams, + ), + ), + model=ChatCompletion, + ) + + def delete( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChatCompletionDeleted: + """Delete a stored chat completion. + + Only chat completions that have been created + with the `store` parameter set to `true` can be deleted. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._delete( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatCompletionDeleted, + ) + class AsyncCompletions(AsyncAPIResource): + @cached_property + def messages(self) -> AsyncMessages: + return AsyncMessages(self._client) + @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: """ @@ -1765,6 +1965,186 @@ async def create( stream_cls=AsyncStream[ChatCompletionChunk], ) + async def retrieve( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChatCompletion: + """Get a stored chat completion. + + Only chat completions that have been created with + the `store` parameter set to `true` will be returned. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return await self._get( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatCompletion, + ) + + async def update( + self, + completion_id: str, + *, + metadata: Optional[Metadata], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChatCompletion: + """Modify a stored chat completion. + + Only chat completions that have been created + with the `store` parameter set to `true` can be modified. Currently, the only + supported modification is to update the `metadata` field. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return await self._post( + f"/chat/completions/{completion_id}", + body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatCompletion, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: str | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]: + """List stored chat completions. + + Only chat completions that have been stored with + the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last chat completion from the previous pagination request. + + limit: Number of chat completions to retrieve. + + metadata: + A list of metadata keys to filter the chat completions by. Example: + + `metadata[key1]=value1&metadata[key2]=value2` + + model: The model used to generate the chat completions. + + order: Sort order for chat completions by timestamp. Use `asc` for ascending order or + `desc` for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get_api_list( + "/chat/completions", + page=AsyncCursorPage[ChatCompletion], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "metadata": metadata, + "model": model, + "order": order, + }, + completion_list_params.CompletionListParams, + ), + ), + model=ChatCompletion, + ) + + async def delete( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChatCompletionDeleted: + """Delete a stored chat completion. + + Only chat completions that have been created + with the `store` parameter set to `true` can be deleted. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return await self._delete( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatCompletionDeleted, + ) + class CompletionsWithRawResponse: def __init__(self, completions: Completions) -> None: @@ -1773,6 +2153,22 @@ def __init__(self, completions: Completions) -> None: self.create = _legacy_response.to_raw_response_wrapper( completions.create, ) + self.retrieve = _legacy_response.to_raw_response_wrapper( + completions.retrieve, + ) + self.update = _legacy_response.to_raw_response_wrapper( + completions.update, + ) + self.list = _legacy_response.to_raw_response_wrapper( + completions.list, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + completions.delete, + ) + + @cached_property + def messages(self) -> MessagesWithRawResponse: + return MessagesWithRawResponse(self._completions.messages) class AsyncCompletionsWithRawResponse: @@ -1782,6 +2178,22 @@ def __init__(self, completions: AsyncCompletions) -> None: self.create = _legacy_response.async_to_raw_response_wrapper( completions.create, ) + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + completions.retrieve, + ) + self.update = _legacy_response.async_to_raw_response_wrapper( + completions.update, + ) + self.list = _legacy_response.async_to_raw_response_wrapper( + completions.list, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + completions.delete, + ) + + @cached_property + def messages(self) -> AsyncMessagesWithRawResponse: + return AsyncMessagesWithRawResponse(self._completions.messages) class CompletionsWithStreamingResponse: @@ -1791,6 +2203,22 @@ def __init__(self, completions: Completions) -> None: self.create = to_streamed_response_wrapper( completions.create, ) + self.retrieve = to_streamed_response_wrapper( + completions.retrieve, + ) + self.update = to_streamed_response_wrapper( + completions.update, + ) + self.list = to_streamed_response_wrapper( + completions.list, + ) + self.delete = to_streamed_response_wrapper( + completions.delete, + ) + + @cached_property + def messages(self) -> MessagesWithStreamingResponse: + return MessagesWithStreamingResponse(self._completions.messages) class AsyncCompletionsWithStreamingResponse: @@ -1800,3 +2228,19 @@ def __init__(self, completions: AsyncCompletions) -> None: self.create = async_to_streamed_response_wrapper( completions.create, ) + self.retrieve = async_to_streamed_response_wrapper( + completions.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + completions.update, + ) + self.list = async_to_streamed_response_wrapper( + completions.list, + ) + self.delete = async_to_streamed_response_wrapper( + completions.delete, + ) + + @cached_property + def messages(self) -> AsyncMessagesWithStreamingResponse: + return AsyncMessagesWithStreamingResponse(self._completions.messages) diff --git a/src/openai/resources/chat/completions/messages.py b/src/openai/resources/chat/completions/messages.py new file mode 100644 index 0000000000..b71d670927 --- /dev/null +++ b/src/openai/resources/chat/completions/messages.py @@ -0,0 +1,212 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal + +import httpx + +from .... import _legacy_response +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ....pagination import SyncCursorPage, AsyncCursorPage +from ...._base_client import AsyncPaginator, make_request_options +from ....types.chat.completions import message_list_params +from ....types.chat.chat_completion_store_message import ChatCompletionStoreMessage + +__all__ = ["Messages", "AsyncMessages"] + + +class Messages(SyncAPIResource): + @cached_property + def with_raw_response(self) -> MessagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return MessagesWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> MessagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return MessagesWithStreamingResponse(self) + + def list( + self, + completion_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[ChatCompletionStoreMessage]: + """Get the messages in a stored chat completion. + + Only chat completions that have + been created with the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last message from the previous pagination request. + + limit: Number of messages to retrieve. + + order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._get_api_list( + f"/chat/completions/{completion_id}/messages", + page=SyncCursorPage[ChatCompletionStoreMessage], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + message_list_params.MessageListParams, + ), + ), + model=ChatCompletionStoreMessage, + ) + + +class AsyncMessages(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncMessagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncMessagesWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncMessagesWithStreamingResponse(self) + + def list( + self, + completion_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]: + """Get the messages in a stored chat completion. + + Only chat completions that have + been created with the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last message from the previous pagination request. + + limit: Number of messages to retrieve. + + order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._get_api_list( + f"/chat/completions/{completion_id}/messages", + page=AsyncCursorPage[ChatCompletionStoreMessage], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + message_list_params.MessageListParams, + ), + ), + model=ChatCompletionStoreMessage, + ) + + +class MessagesWithRawResponse: + def __init__(self, messages: Messages) -> None: + self._messages = messages + + self.list = _legacy_response.to_raw_response_wrapper( + messages.list, + ) + + +class AsyncMessagesWithRawResponse: + def __init__(self, messages: AsyncMessages) -> None: + self._messages = messages + + self.list = _legacy_response.async_to_raw_response_wrapper( + messages.list, + ) + + +class MessagesWithStreamingResponse: + def __init__(self, messages: Messages) -> None: + self._messages = messages + + self.list = to_streamed_response_wrapper( + messages.list, + ) + + +class AsyncMessagesWithStreamingResponse: + def __init__(self, messages: AsyncMessages) -> None: + self._messages = messages + + self.list = async_to_streamed_response_wrapper( + messages.list, + ) diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 962dc51da0..e34e2a4177 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -6,12 +6,16 @@ from .chat_completion_role import ChatCompletionRole as ChatCompletionRole from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk +from .completion_list_params import CompletionListParams as CompletionListParams +from .chat_completion_deleted import ChatCompletionDeleted as ChatCompletionDeleted from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality from .completion_create_params import CompletionCreateParams as CompletionCreateParams +from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam +from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall diff --git a/src/openai/types/chat/chat_completion_deleted.py b/src/openai/types/chat/chat_completion_deleted.py new file mode 100644 index 0000000000..0a541cb23d --- /dev/null +++ b/src/openai/types/chat/chat_completion_deleted.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionDeleted"] + + +class ChatCompletionDeleted(BaseModel): + id: str + """The ID of the chat completion that was deleted.""" + + deleted: bool + """Whether the chat completion was deleted.""" + + object: Literal["chat.completion.deleted"] + """The type of object being deleted.""" diff --git a/src/openai/types/chat/chat_completion_store_message.py b/src/openai/types/chat/chat_completion_store_message.py new file mode 100644 index 0000000000..95adc08af8 --- /dev/null +++ b/src/openai/types/chat/chat_completion_store_message.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from .chat_completion_message import ChatCompletionMessage + +__all__ = ["ChatCompletionStoreMessage"] + + +class ChatCompletionStoreMessage(ChatCompletionMessage): + id: str + """The identifier of the chat message.""" diff --git a/src/openai/types/chat/completion_list_params.py b/src/openai/types/chat/completion_list_params.py new file mode 100644 index 0000000000..a8fce900ce --- /dev/null +++ b/src/openai/types/chat/completion_list_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, TypedDict + +from ..shared_params.metadata import Metadata + +__all__ = ["CompletionListParams"] + + +class CompletionListParams(TypedDict, total=False): + after: str + """Identifier for the last chat completion from the previous pagination request.""" + + limit: int + """Number of chat completions to retrieve.""" + + metadata: Optional[Metadata] + """A list of metadata keys to filter the chat completions by. Example: + + `metadata[key1]=value1&metadata[key2]=value2` + """ + + model: str + """The model used to generate the chat completions.""" + + order: Literal["asc", "desc"] + """Sort order for chat completions by timestamp. + + Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. + """ diff --git a/src/openai/types/chat/completion_update_params.py b/src/openai/types/chat/completion_update_params.py new file mode 100644 index 0000000000..fc71733f07 --- /dev/null +++ b/src/openai/types/chat/completion_update_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Required, TypedDict + +from ..shared_params.metadata import Metadata + +__all__ = ["CompletionUpdateParams"] + + +class CompletionUpdateParams(TypedDict, total=False): + metadata: Required[Optional[Metadata]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/openai/types/chat/completions/__init__.py b/src/openai/types/chat/completions/__init__.py new file mode 100644 index 0000000000..b8e62d6a64 --- /dev/null +++ b/src/openai/types/chat/completions/__init__.py @@ -0,0 +1,5 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .message_list_params import MessageListParams as MessageListParams diff --git a/src/openai/types/chat/completions/message_list_params.py b/src/openai/types/chat/completions/message_list_params.py new file mode 100644 index 0000000000..4e694e83ea --- /dev/null +++ b/src/openai/types/chat/completions/message_list_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["MessageListParams"] + + +class MessageListParams(TypedDict, total=False): + after: str + """Identifier for the last message from the previous pagination request.""" + + limit: int + """Number of messages to retrieve.""" + + order: Literal["asc", "desc"] + """Sort order for messages by timestamp. + + Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. + """ diff --git a/src/openai/types/moderation.py b/src/openai/types/moderation.py index e4ec182ce2..608f562218 100644 --- a/src/openai/types/moderation.py +++ b/src/openai/types/moderation.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List +from typing import List, Optional from typing_extensions import Literal from pydantic import Field as FieldInfo @@ -38,14 +38,14 @@ class Categories(BaseModel): orientation, disability status, or caste. """ - illicit: bool + illicit: Optional[bool] = None """ Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category. """ - illicit_violent: bool = FieldInfo(alias="illicit/violent") + illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None) """ Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or diff --git a/tests/api_resources/chat/completions/__init__.py b/tests/api_resources/chat/completions/__init__.py new file mode 100644 index 0000000000..fd8019a9a1 --- /dev/null +++ b/tests/api_resources/chat/completions/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/chat/completions/test_messages.py b/tests/api_resources/chat/completions/test_messages.py new file mode 100644 index 0000000000..5caac9ec6c --- /dev/null +++ b/tests/api_resources/chat/completions/test_messages.py @@ -0,0 +1,119 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.pagination import SyncCursorPage, AsyncCursorPage +from openai.types.chat import ChatCompletionStoreMessage + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") + + +class TestMessages: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_list(self, client: OpenAI) -> None: + message = client.chat.completions.messages.list( + completion_id="completion_id", + ) + assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: OpenAI) -> None: + message = client.chat.completions.messages.list( + completion_id="completion_id", + after="after", + limit=0, + order="asc", + ) + assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: OpenAI) -> None: + response = client.chat.completions.messages.with_raw_response.list( + completion_id="completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: OpenAI) -> None: + with client.chat.completions.messages.with_streaming_response.list( + completion_id="completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = response.parse() + assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_list(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.messages.with_raw_response.list( + completion_id="", + ) + + +class TestAsyncMessages: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + async def test_method_list(self, async_client: AsyncOpenAI) -> None: + message = await async_client.chat.completions.messages.list( + completion_id="completion_id", + ) + assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: + message = await async_client.chat.completions.messages.list( + completion_id="completion_id", + after="after", + limit=0, + order="asc", + ) + assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: + response = await async_client.chat.completions.messages.with_raw_response.list( + completion_id="completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: + async with async_client.chat.completions.messages.with_streaming_response.list( + completion_id="completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = await response.parse() + assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.messages.with_raw_response.list( + completion_id="", + ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index cb899502b4..760fba0a37 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -9,8 +9,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type +from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.chat import ( ChatCompletion, + ChatCompletionDeleted, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") @@ -247,6 +249,160 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_method_retrieve(self, client: OpenAI) -> None: + completion = client.chat.completions.retrieve( + "completion_id", + ) + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: OpenAI) -> None: + response = client.chat.completions.with_raw_response.retrieve( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: OpenAI) -> None: + with client.chat.completions.with_streaming_response.retrieve( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.with_raw_response.retrieve( + "", + ) + + @parametrize + def test_method_update(self, client: OpenAI) -> None: + completion = client.chat.completions.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + def test_raw_response_update(self, client: OpenAI) -> None: + response = client.chat.completions.with_raw_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + def test_streaming_response_update(self, client: OpenAI) -> None: + with client.chat.completions.with_streaming_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_update(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.with_raw_response.update( + completion_id="", + metadata={"foo": "string"}, + ) + + @parametrize + def test_method_list(self, client: OpenAI) -> None: + completion = client.chat.completions.list() + assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: OpenAI) -> None: + completion = client.chat.completions.list( + after="after", + limit=0, + metadata={"foo": "string"}, + model="model", + order="asc", + ) + assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: OpenAI) -> None: + response = client.chat.completions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: OpenAI) -> None: + with client.chat.completions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_delete(self, client: OpenAI) -> None: + completion = client.chat.completions.delete( + "completion_id", + ) + assert_matches_type(ChatCompletionDeleted, completion, path=["response"]) + + @parametrize + def test_raw_response_delete(self, client: OpenAI) -> None: + response = client.chat.completions.with_raw_response.delete( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(ChatCompletionDeleted, completion, path=["response"]) + + @parametrize + def test_streaming_response_delete(self, client: OpenAI) -> None: + with client.chat.completions.with_streaming_response.delete( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(ChatCompletionDeleted, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.with_raw_response.delete( + "", + ) + class TestAsyncCompletions: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -478,3 +634,157 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe await stream.close() assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: + completion = await async_client.chat.completions.retrieve( + "completion_id", + ) + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: + response = await async_client.chat.completions.with_raw_response.retrieve( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: + async with async_client.chat.completions.with_streaming_response.retrieve( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.with_raw_response.retrieve( + "", + ) + + @parametrize + async def test_method_update(self, async_client: AsyncOpenAI) -> None: + completion = await async_client.chat.completions.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: + response = await async_client.chat.completions.with_raw_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + @parametrize + async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: + async with async_client.chat.completions.with_streaming_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(ChatCompletion, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.with_raw_response.update( + completion_id="", + metadata={"foo": "string"}, + ) + + @parametrize + async def test_method_list(self, async_client: AsyncOpenAI) -> None: + completion = await async_client.chat.completions.list() + assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: + completion = await async_client.chat.completions.list( + after="after", + limit=0, + metadata={"foo": "string"}, + model="model", + order="asc", + ) + assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: + response = await async_client.chat.completions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: + async with async_client.chat.completions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_delete(self, async_client: AsyncOpenAI) -> None: + completion = await async_client.chat.completions.delete( + "completion_id", + ) + assert_matches_type(ChatCompletionDeleted, completion, path=["response"]) + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.chat.completions.with_raw_response.delete( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(ChatCompletionDeleted, completion, path=["response"]) + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: + async with async_client.chat.completions.with_streaming_response.delete( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(ChatCompletionDeleted, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.with_raw_response.delete( + "", + ) diff --git a/tests/test_client.py b/tests/test_client.py index 41da2d5d04..62654afe1e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,11 +23,13 @@ from openai import OpenAI, AsyncOpenAI, APIResponseValidationError from openai._types import Omit +from openai._utils import maybe_transform from openai._models import BaseModel, FinalRequestOptions from openai._constants import RAW_RESPONSE_HEADER from openai._streaming import Stream, AsyncStream from openai._exceptions import OpenAIError, APIStatusError, APITimeoutError, APIResponseValidationError from openai._base_client import DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, make_request_options +from openai.types.chat.completion_create_params import CompletionCreateParamsNonStreaming from .utils import update_env @@ -724,14 +726,17 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> No "/chat/completions", body=cast( object, - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", + maybe_transform( + dict( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + ), + CompletionCreateParamsNonStreaming, ), ), cast_to=httpx.Response, @@ -750,14 +755,17 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> Non "/chat/completions", body=cast( object, - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", + maybe_transform( + dict( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + ), + CompletionCreateParamsNonStreaming, ), ), cast_to=httpx.Response, @@ -1591,14 +1599,17 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) "/chat/completions", body=cast( object, - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", + maybe_transform( + dict( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + ), + CompletionCreateParamsNonStreaming, ), ), cast_to=httpx.Response, @@ -1617,14 +1628,17 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) "/chat/completions", body=cast( object, - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", + maybe_transform( + dict( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + ), + CompletionCreateParamsNonStreaming, ), ), cast_to=httpx.Response, From a2736dff4c2258851db4aa5bc33490bd4221161f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 11:11:51 +0000 Subject: [PATCH 167/192] chore(internal): temporary commit (#2121) --- .github/ISSUE_TEMPLATE/bug_report.yml | 64 ---------------------- .github/ISSUE_TEMPLATE/config.yml | 7 --- .github/ISSUE_TEMPLATE/feature_request.yml | 28 ---------- .github/pull_request_template.md | 10 ---- 4 files changed, 109 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml delete mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml delete mode 100644 .github/pull_request_template.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml deleted file mode 100644 index fa09dbe5b0..0000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Bug report -description: Report an issue or bug with this library -labels: ['bug'] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this bug report! - - type: checkboxes - id: non_api - attributes: - label: Confirm this is an issue with the Python library and not an underlying OpenAI API - description: Issues with the underlying OpenAI API should be reported on our [Developer Community](https://community.openai.com/c/api/7) - options: - - label: This is an issue with the Python library - required: true - - type: textarea - id: what-happened - attributes: - label: Describe the bug - description: A clear and concise description of what the bug is, and any additional context. - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: repro-steps - attributes: - label: To Reproduce - description: Steps to reproduce the behavior. - placeholder: | - 1. Fetch a '...' - 2. Update the '....' - 3. See error - validations: - required: true - - type: textarea - id: code-snippets - attributes: - label: Code snippets - description: If applicable, add code snippets to help explain your problem. - render: Python - validations: - required: false - - type: input - id: os - attributes: - label: OS - placeholder: macOS - validations: - required: true - - type: input - id: language-version - attributes: - label: Python version - placeholder: Python v3.11.4 - validations: - required: true - - type: input - id: lib-version - attributes: - label: Library version - placeholder: openai v1.0.1 - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 0498cf7f6f..0000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,7 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: OpenAI support - url: https://help.openai.com/ - about: | - Please only file issues here that you believe represent actual bugs or feature requests for the OpenAI Python library. - If you're having general trouble with the OpenAI API, please visit our help center to get support. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml deleted file mode 100644 index b529547d08..0000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Feature request -description: Suggest an idea for this library -labels: ['feature-request'] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this feature request! - - type: checkboxes - id: non_api - attributes: - label: Confirm this is a feature request for the Python library and not the underlying OpenAI API. - description: Feature requests for the underlying OpenAI API should be reported on our [Developer Community](https://community.openai.com/c/api/7) - options: - - label: This is a feature request for the Python library - required: true - - type: textarea - id: feature - attributes: - label: Describe the feature or improvement you're requesting - description: A clear and concise description of what you want to happen. - validations: - required: true - - type: textarea - id: context - attributes: - label: Additional context - description: Add any other context about the feature request here. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 4416b1e547..0000000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,10 +0,0 @@ - - - - - -- [ ] I understand that this repository is auto-generated and my pull request may not be merged - -## Changes being requested - -## Additional context & links From 0672808413d380289c0bf43d355b93c46969cb98 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 22:37:49 +0000 Subject: [PATCH 168/192] feat(client): allow passing `NotGiven` for body (#2135) fix(client): mark some request bodies as optional --- src/openai/_base_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 803b9a8dbc..9ca6e0a045 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -519,7 +519,7 @@ def _build_request( # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, - json=json_data, + json=json_data if is_given(json_data) else None, files=files, **kwargs, ) From 9aa36796025172f38a1f427a9b02e0dd5d8dcd81 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 21 Feb 2025 15:08:25 +0000 Subject: [PATCH 169/192] chore(internal): fix devcontainers setup (#2137) --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index ac9a2e7521..55d20255c9 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -6,4 +6,4 @@ USER vscode RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.35.0" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH -RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc +RUN echo "[[ -d .venv ]] && source .venv/bin/activate || export PATH=\$PATH" >> /home/vscode/.bashrc diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index bbeb30b148..c17fdc169f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -24,6 +24,9 @@ } } } + }, + "features": { + "ghcr.io/devcontainers/features/node:1": {} } // Features to add to the dev container. More info: https://containers.dev/features. From 98477addff5e73d58697ad500f57f1da76333307 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 25 Feb 2025 11:04:28 +0000 Subject: [PATCH 170/192] chore(internal): properly set __pydantic_private__ (#2144) --- src/openai/_base_client.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 9ca6e0a045..8a161567ca 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -63,7 +63,7 @@ ModelBuilderProtocol, ) from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping -from ._compat import model_copy, model_dump +from ._compat import PYDANTIC_V2, model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( APIResponse, @@ -208,6 +208,9 @@ def _set_private_attributes( model: Type[_T], options: FinalRequestOptions, ) -> None: + if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + self.__pydantic_private__ = {} + self._model = model self._client = client self._options = options @@ -293,6 +296,9 @@ def _set_private_attributes( client: AsyncAPIClient, options: FinalRequestOptions, ) -> None: + if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + self.__pydantic_private__ = {} + self._model = model self._client = client self._options = options From 808f52eec778100d011f1fee2bf467226c826e11 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 20:05:36 +0000 Subject: [PATCH 171/192] feat(api): add gpt-4.5-preview (#2149) --- .stats.yml | 2 +- src/openai/resources/beta/assistants.py | 4 +++ .../resources/beta/realtime/realtime.py | 36 +++++++++++-------- .../types/beta/assistant_update_params.py | 2 ++ src/openai/types/beta/realtime/session.py | 14 ++++++++ .../beta/realtime/session_create_params.py | 10 +++++- .../beta/realtime/session_update_event.py | 10 +++++- .../realtime/session_update_event_param.py | 10 +++++- src/openai/types/chat_model.py | 2 ++ src/openai/types/file_object.py | 3 ++ src/openai/types/upload.py | 2 +- .../beta/realtime/test_sessions.py | 2 ++ 12 files changed, 77 insertions(+), 20 deletions(-) diff --git a/.stats.yml b/.stats.yml index 658877d3b0..163146e38d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 462086f74b..d2bb8d7b92 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -232,6 +232,8 @@ def update( "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", @@ -673,6 +675,8 @@ async def update( "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index abdb33d4e0..a2dd143bfc 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -549,14 +549,17 @@ def __init__(self, connection: RealtimeConnection) -> None: class RealtimeSessionResource(BaseRealtimeConnectionResource): def update(self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN) -> None: - """Send this event to update the session’s default configuration. + """ + Send this event to update the session’s default configuration. + The client may send this event at any time to update any field, + except for `voice`. However, note that once a session has been + initialized with a particular `model`, it can’t be changed to + another model using `session.update`. - The client may - send this event at any time to update the session configuration, and any - field may be updated at any time, except for "voice". The server will respond - with a `session.updated` event that shows the full effective configuration. - Only fields that are present are updated, thus the correct way to clear a - field like "instructions" is to pass an empty string. + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present are updated. To clear a field like + `instructions`, pass an empty string. """ self._connection.send( cast( @@ -756,14 +759,17 @@ class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource): async def update( self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN ) -> None: - """Send this event to update the session’s default configuration. - - The client may - send this event at any time to update the session configuration, and any - field may be updated at any time, except for "voice". The server will respond - with a `session.updated` event that shows the full effective configuration. - Only fields that are present are updated, thus the correct way to clear a - field like "instructions" is to pass an empty string. + """ + Send this event to update the session’s default configuration. + The client may send this event at any time to update any field, + except for `voice`. However, note that once a session has been + initialized with a particular `model`, it can’t be changed to + another model using `session.update`. + + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present are updated. To clear a field like + `instructions`, pass an empty string. """ await self._connection.send( cast( diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 80fec110cd..12a57a4063 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -45,6 +45,8 @@ class AssistantUpdateParams(TypedDict, total=False): "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index 2d028f817c..aee20fa906 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -34,6 +34,20 @@ class Tool(BaseModel): class TurnDetection(BaseModel): + create_response: Optional[bool] = None + """Whether or not to automatically generate a response when a VAD stop event + occurs. + + `true` by default. + """ + + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. `true` by default. + """ + prefix_padding_ms: Optional[int] = None """Amount of audio to include before the VAD detected speech (in milliseconds). diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index 1502d83d39..bbc86d7c7d 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -146,11 +146,19 @@ class Tool(TypedDict, total=False): class TurnDetection(TypedDict, total=False): create_response: bool - """Whether or not to automatically generate a response when VAD is enabled. + """Whether or not to automatically generate a response when a VAD stop event + occurs. `true` by default. """ + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. `true` by default. + """ + prefix_padding_ms: int """Amount of audio to include before the VAD detected speech (in milliseconds). diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 62fb0a3998..999cd8d660 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -51,11 +51,19 @@ class SessionTool(BaseModel): class SessionTurnDetection(BaseModel): create_response: Optional[bool] = None - """Whether or not to automatically generate a response when VAD is enabled. + """Whether or not to automatically generate a response when a VAD stop event + occurs. `true` by default. """ + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. `true` by default. + """ + prefix_padding_ms: Optional[int] = None """Amount of audio to include before the VAD detected speech (in milliseconds). diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index 133cdd91a1..07fdba9d85 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -57,11 +57,19 @@ class SessionTool(TypedDict, total=False): class SessionTurnDetection(TypedDict, total=False): create_response: bool - """Whether or not to automatically generate a response when VAD is enabled. + """Whether or not to automatically generate a response when a VAD stop event + occurs. `true` by default. """ + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. `true` by default. + """ + prefix_padding_ms: int """Amount of audio to include before the VAD detected speech (in milliseconds). diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index c191cb9734..6fe705a0b4 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -13,6 +13,8 @@ "o1-preview-2024-09-12", "o1-mini", "o1-mini-2024-09-12", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", "gpt-4o", "gpt-4o-2024-11-20", "gpt-4o-2024-08-06", diff --git a/src/openai/types/file_object.py b/src/openai/types/file_object.py index 6e2bf310a4..1d65e6987d 100644 --- a/src/openai/types/file_object.py +++ b/src/openai/types/file_object.py @@ -40,6 +40,9 @@ class FileObject(BaseModel): `error`. """ + expires_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the file will expire.""" + status_details: Optional[str] = None """Deprecated. diff --git a/src/openai/types/upload.py b/src/openai/types/upload.py index d8108c62f9..914b69a863 100644 --- a/src/openai/types/upload.py +++ b/src/openai/types/upload.py @@ -20,7 +20,7 @@ class Upload(BaseModel): """The Unix timestamp (in seconds) for when the Upload was created.""" expires_at: int - """The Unix timestamp (in seconds) for when the Upload was created.""" + """The Unix timestamp (in seconds) for when the Upload will expire.""" filename: str """The name of the file to be uploaded.""" diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index 5a17088ce6..5ea308ca0d 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -48,6 +48,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: ], turn_detection={ "create_response": True, + "interrupt_response": True, "prefix_padding_ms": 0, "silence_duration_ms": 0, "threshold": 0, @@ -112,6 +113,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> ], turn_detection={ "create_response": True, + "interrupt_response": True, "prefix_padding_ms": 0, "silence_duration_ms": 0, "threshold": 0, From 760e4f45ffc5fc21978e3c63f1713ebc1f0f560b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:01:07 +0000 Subject: [PATCH 172/192] docs: update URLs from stainlessapi.com to stainless.com (#2150) More details at https://www.stainless.com/changelog/stainless-com --- SECURITY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index c54acaf331..3b3bd8a662 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,9 @@ ## Reporting Security Issues -This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. -To report a security issue, please contact the Stainless team at security@stainlessapi.com. +To report a security issue, please contact the Stainless team at security@stainless.com. ## Responsible Disclosure From d137ed7c5b99515cdabd11fd6fc7bb3edef4c18c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:51:14 +0000 Subject: [PATCH 173/192] chore(docs): update client docstring (#2152) --- src/openai/_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/openai/_client.py b/src/openai/_client.py index c784694f20..2464c6504c 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -97,7 +97,7 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new synchronous openai client instance. + """Construct a new synchronous OpenAI client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `OPENAI_API_KEY` @@ -324,7 +324,7 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new async openai client instance. + """Construct a new async AsyncOpenAI client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `OPENAI_API_KEY` From 62969070922e65fb6190f6790beaca52854156ef Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 20:43:26 +0000 Subject: [PATCH 174/192] chore(internal): remove unused http client options forwarding (#2158) --- src/openai/_base_client.py | 97 +------------------------------------- 1 file changed, 1 insertion(+), 96 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 8a161567ca..2fe1b61a18 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -9,7 +9,6 @@ import inspect import logging import platform -import warnings import email.utils from types import TracebackType from random import random @@ -36,7 +35,7 @@ import httpx import distro import pydantic -from httpx import URL, Limits +from httpx import URL from pydantic import PrivateAttr from . import _exceptions @@ -51,13 +50,10 @@ Timeout, NotGiven, ResponseT, - Transport, AnyMapping, PostParser, - ProxiesTypes, RequestFiles, HttpxSendArgs, - AsyncTransport, RequestOptions, HttpxRequestFiles, ModelBuilderProtocol, @@ -338,9 +334,6 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]): _base_url: URL max_retries: int timeout: Union[float, Timeout, None] - _limits: httpx.Limits - _proxies: ProxiesTypes | None - _transport: Transport | AsyncTransport | None _strict_response_validation: bool _idempotency_header: str | None _default_stream_cls: type[_DefaultStreamT] | None = None @@ -353,9 +346,6 @@ def __init__( _strict_response_validation: bool, max_retries: int = DEFAULT_MAX_RETRIES, timeout: float | Timeout | None = DEFAULT_TIMEOUT, - limits: httpx.Limits, - transport: Transport | AsyncTransport | None, - proxies: ProxiesTypes | None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, ) -> None: @@ -363,9 +353,6 @@ def __init__( self._base_url = self._enforce_trailing_slash(URL(base_url)) self.max_retries = max_retries self.timeout = timeout - self._limits = limits - self._proxies = proxies - self._transport = transport self._custom_headers = custom_headers or {} self._custom_query = custom_query or {} self._strict_response_validation = _strict_response_validation @@ -801,46 +788,11 @@ def __init__( base_url: str | URL, max_retries: int = DEFAULT_MAX_RETRIES, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - transport: Transport | None = None, - proxies: ProxiesTypes | None = None, - limits: Limits | None = None, http_client: httpx.Client | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, _strict_response_validation: bool, ) -> None: - kwargs: dict[str, Any] = {} - if limits is not None: - warnings.warn( - "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", - category=DeprecationWarning, - stacklevel=3, - ) - if http_client is not None: - raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`") - else: - limits = DEFAULT_CONNECTION_LIMITS - - if transport is not None: - kwargs["transport"] = transport - warnings.warn( - "The `transport` argument is deprecated. The `http_client` argument should be passed instead", - category=DeprecationWarning, - stacklevel=3, - ) - if http_client is not None: - raise ValueError("The `http_client` argument is mutually exclusive with `transport`") - - if proxies is not None: - kwargs["proxies"] = proxies - warnings.warn( - "The `proxies` argument is deprecated. The `http_client` argument should be passed instead", - category=DeprecationWarning, - stacklevel=3, - ) - if http_client is not None: - raise ValueError("The `http_client` argument is mutually exclusive with `proxies`") - if not is_given(timeout): # if the user passed in a custom http client with a non-default # timeout set then we use that timeout. @@ -861,12 +813,9 @@ def __init__( super().__init__( version=version, - limits=limits, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), - proxies=proxies, base_url=base_url, - transport=transport, max_retries=max_retries, custom_query=custom_query, custom_headers=custom_headers, @@ -876,9 +825,6 @@ def __init__( base_url=base_url, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), - limits=limits, - follow_redirects=True, - **kwargs, # type: ignore ) def is_closed(self) -> bool: @@ -1388,45 +1334,10 @@ def __init__( _strict_response_validation: bool, max_retries: int = DEFAULT_MAX_RETRIES, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - transport: AsyncTransport | None = None, - proxies: ProxiesTypes | None = None, - limits: Limits | None = None, http_client: httpx.AsyncClient | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, ) -> None: - kwargs: dict[str, Any] = {} - if limits is not None: - warnings.warn( - "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", - category=DeprecationWarning, - stacklevel=3, - ) - if http_client is not None: - raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`") - else: - limits = DEFAULT_CONNECTION_LIMITS - - if transport is not None: - kwargs["transport"] = transport - warnings.warn( - "The `transport` argument is deprecated. The `http_client` argument should be passed instead", - category=DeprecationWarning, - stacklevel=3, - ) - if http_client is not None: - raise ValueError("The `http_client` argument is mutually exclusive with `transport`") - - if proxies is not None: - kwargs["proxies"] = proxies - warnings.warn( - "The `proxies` argument is deprecated. The `http_client` argument should be passed instead", - category=DeprecationWarning, - stacklevel=3, - ) - if http_client is not None: - raise ValueError("The `http_client` argument is mutually exclusive with `proxies`") - if not is_given(timeout): # if the user passed in a custom http client with a non-default # timeout set then we use that timeout. @@ -1448,11 +1359,8 @@ def __init__( super().__init__( version=version, base_url=base_url, - limits=limits, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), - proxies=proxies, - transport=transport, max_retries=max_retries, custom_query=custom_query, custom_headers=custom_headers, @@ -1462,9 +1370,6 @@ def __init__( base_url=base_url, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), - limits=limits, - follow_redirects=True, - **kwargs, # type: ignore ) def is_closed(self) -> bool: From 346561f0b130de38e91ed8bd1895450685c8282f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Mar 2025 21:09:25 +0000 Subject: [PATCH 175/192] chore(internal): run example files in CI (#2160) --- .github/workflows/ci.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2dd3585f74..25b0c0286d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,3 +52,30 @@ jobs: - name: Run tests run: ./scripts/test + + examples: + name: examples + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.35.0' + RYE_INSTALL_OPTION: '--yes' + - name: Install dependencies + run: | + rye sync --all-features + + - env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + rye run python examples/demo.py + - env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + rye run python examples/async_demo.py From 1985b7d733263629d13ff28f53ed055947e5233e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 21:22:02 +0000 Subject: [PATCH 176/192] fix(api): add missing file rank enum + more metadata (#2164) --- .stats.yml | 2 +- src/openai/resources/fine_tuning/jobs/jobs.py | 31 ++++++++++++++++++- .../threads/runs/file_search_tool_call.py | 7 +++-- .../types/fine_tuning/fine_tuning_job.py | 11 +++++++ .../types/fine_tuning/job_create_params.py | 12 +++++++ .../types/fine_tuning/job_list_params.py | 8 +++++ tests/api_resources/fine_tuning/test_jobs.py | 4 +++ 7 files changed, 71 insertions(+), 4 deletions(-) diff --git a/.stats.yml b/.stats.yml index 163146e38d..0d7e83be4f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b524aed1c2c5c928aa4e2c546f5dbb364e7b4d5027daf05e42e210b05a97c3c6.yml diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index bd08552835..49629ca6a7 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal import httpx @@ -27,6 +27,7 @@ from ....pagination import SyncCursorPage, AsyncCursorPage from ...._base_client import AsyncPaginator, make_request_options from ....types.fine_tuning import job_list_params, job_create_params, job_list_events_params +from ....types.shared_params.metadata import Metadata from ....types.fine_tuning.fine_tuning_job import FineTuningJob from ....types.fine_tuning.fine_tuning_job_event import FineTuningJobEvent @@ -64,6 +65,7 @@ def create( training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, method: job_create_params.Method | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -111,6 +113,13 @@ def create( integrations: A list of integrations to enable for your fine-tuning job. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + method: The method used for fine-tuning. seed: The seed controls the reproducibility of the job. Passing in the same seed and @@ -152,6 +161,7 @@ def create( "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, + "metadata": metadata, "method": method, "seed": seed, "suffix": suffix, @@ -205,6 +215,7 @@ def list( *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -220,6 +231,9 @@ def list( limit: Number of fine-tuning jobs to retrieve. + metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + Alternatively, set `metadata=null` to indicate no metadata. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -240,6 +254,7 @@ def list( { "after": after, "limit": limit, + "metadata": metadata, }, job_list_params.JobListParams, ), @@ -362,6 +377,7 @@ async def create( training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, method: job_create_params.Method | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -409,6 +425,13 @@ async def create( integrations: A list of integrations to enable for your fine-tuning job. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + method: The method used for fine-tuning. seed: The seed controls the reproducibility of the job. Passing in the same seed and @@ -450,6 +473,7 @@ async def create( "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, + "metadata": metadata, "method": method, "seed": seed, "suffix": suffix, @@ -503,6 +527,7 @@ def list( *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -518,6 +543,9 @@ def list( limit: Number of fine-tuning jobs to retrieve. + metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + Alternatively, set `metadata=null` to indicate no metadata. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -538,6 +566,7 @@ def list( { "after": after, "limit": limit, + "metadata": metadata, }, job_list_params.JobListParams, ), diff --git a/src/openai/types/beta/threads/runs/file_search_tool_call.py b/src/openai/types/beta/threads/runs/file_search_tool_call.py index da4d58dc37..a2068daad1 100644 --- a/src/openai/types/beta/threads/runs/file_search_tool_call.py +++ b/src/openai/types/beta/threads/runs/file_search_tool_call.py @@ -15,8 +15,11 @@ class FileSearchRankingOptions(BaseModel): - ranker: Literal["default_2024_08_21"] - """The ranker used for the file search.""" + ranker: Literal["auto", "default_2024_08_21"] + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ score_threshold: float """The score threshold for the file search. diff --git a/src/openai/types/fine_tuning/fine_tuning_job.py b/src/openai/types/fine_tuning/fine_tuning_job.py index f5a11c2107..c7fff2b7b1 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job.py +++ b/src/openai/types/fine_tuning/fine_tuning_job.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from ..shared.metadata import Metadata from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject __all__ = [ @@ -208,5 +209,15 @@ class FineTuningJob(BaseModel): integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None """A list of integrations to enable for this fine-tuning job.""" + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + method: Optional[Method] = None """The method used for fine-tuning.""" diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index 09c3f8571c..f4cf980b08 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -5,6 +5,8 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from ..shared_params.metadata import Metadata + __all__ = [ "JobCreateParams", "Hyperparameters", @@ -55,6 +57,16 @@ class JobCreateParams(TypedDict, total=False): integrations: Optional[Iterable[Integration]] """A list of integrations to enable for your fine-tuning job.""" + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + method: Method """The method used for fine-tuning.""" diff --git a/src/openai/types/fine_tuning/job_list_params.py b/src/openai/types/fine_tuning/job_list_params.py index 5c075ca33f..b79f3ce86a 100644 --- a/src/openai/types/fine_tuning/job_list_params.py +++ b/src/openai/types/fine_tuning/job_list_params.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Dict, Optional from typing_extensions import TypedDict __all__ = ["JobListParams"] @@ -13,3 +14,10 @@ class JobListParams(TypedDict, total=False): limit: int """Number of fine-tuning jobs to retrieve.""" + + metadata: Optional[Dict[str, str]] + """Optional metadata filter. + + To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to + indicate no metadata. + """ diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index 050edba367..342a70dfd8 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -50,6 +50,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: }, } ], + metadata={"foo": "string"}, method={ "dpo": { "hyperparameters": { @@ -148,6 +149,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list( after="after", limit=0, + metadata={"foo": "string"}, ) assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"]) @@ -289,6 +291,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> }, } ], + metadata={"foo": "string"}, method={ "dpo": { "hyperparameters": { @@ -387,6 +390,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N job = await async_client.fine_tuning.jobs.list( after="after", limit=0, + metadata={"foo": "string"}, ) assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"]) From e680715d1da2918bd7f3a96512a52d7c338af179 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Mar 2025 14:49:54 +0000 Subject: [PATCH 177/192] chore: move ChatModel type to shared (#2167) --- api.md | 3 +- src/openai/resources/beta/assistants.py | 2 +- .../resources/beta/threads/runs/runs.py | 2 +- src/openai/resources/beta/threads/threads.py | 2 +- .../resources/chat/completions/completions.py | 2 +- src/openai/types/__init__.py | 2 +- .../types/beta/assistant_create_params.py | 2 +- .../beta/thread_create_and_run_params.py | 2 +- .../types/beta/threads/run_create_params.py | 2 +- .../types/chat/completion_create_params.py | 2 +- src/openai/types/chat_model.py | 47 ++--------------- src/openai/types/shared/__init__.py | 1 + src/openai/types/shared/chat_model.py | 49 ++++++++++++++++++ src/openai/types/shared_params/__init__.py | 1 + src/openai/types/shared_params/chat_model.py | 51 +++++++++++++++++++ 15 files changed, 116 insertions(+), 54 deletions(-) create mode 100644 src/openai/types/shared/chat_model.py create mode 100644 src/openai/types/shared_params/chat_model.py diff --git a/api.md b/api.md index 153521145c..c130d478f8 100644 --- a/api.md +++ b/api.md @@ -2,6 +2,7 @@ ```python from openai.types import ( + ChatModel, ErrorObject, FunctionDefinition, FunctionParameters, @@ -221,9 +222,9 @@ Types: from openai.types.fine_tuning import ( FineTuningJob, FineTuningJobEvent, - FineTuningJobIntegration, FineTuningJobWandbIntegration, FineTuningJobWandbIntegrationObject, + FineTuningJobIntegration, ) ``` diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index d2bb8d7b92..ffecd8f9e9 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -23,8 +23,8 @@ assistant_update_params, ) from ..._base_client import AsyncPaginator, make_request_options -from ...types.chat_model import ChatModel from ...types.beta.assistant import Assistant +from ...types.shared.chat_model import ChatModel from ...types.beta.assistant_deleted import AssistantDeleted from ...types.shared_params.metadata import Metadata from ...types.beta.assistant_tool_param import AssistantToolParam diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 3a9add5b3c..e96e70fc5a 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -28,7 +28,6 @@ from ....._streaming import Stream, AsyncStream from .....pagination import SyncCursorPage, AsyncCursorPage from ....._base_client import AsyncPaginator, make_request_options -from .....types.chat_model import ChatModel from .....types.beta.threads import ( run_list_params, run_create_params, @@ -36,6 +35,7 @@ run_submit_tool_outputs_params, ) from .....types.beta.threads.run import Run +from .....types.shared.chat_model import ChatModel from .....types.shared_params.metadata import Metadata from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index a9c473e28e..299b23f375 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -40,9 +40,9 @@ thread_create_and_run_params, ) from ...._base_client import make_request_options -from ....types.chat_model import ChatModel from ....types.beta.thread import Thread from ....types.beta.threads.run import Run +from ....types.shared.chat_model import ChatModel from ....types.beta.thread_deleted import ThreadDeleted from ....types.shared_params.metadata import Metadata from ....types.beta.assistant_stream_event import AssistantStreamEvent diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index f659cfdeeb..9c2a0821a3 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -35,7 +35,7 @@ completion_update_params, ) from ...._base_client import AsyncPaginator, make_request_options -from ....types.chat_model import ChatModel +from ....types.shared.chat_model import ChatModel from ....types.chat.chat_completion import ChatCompletion from ....types.shared_params.metadata import Metadata from ....types.chat.chat_completion_chunk import ChatCompletionChunk diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 7abb22f239..5785877c8a 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -7,6 +7,7 @@ from .model import Model as Model from .shared import ( Metadata as Metadata, + ChatModel as ChatModel, ErrorObject as ErrorObject, FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, @@ -16,7 +17,6 @@ ) from .upload import Upload as Upload from .embedding import Embedding as Embedding -from .chat_model import ChatModel as ChatModel from .completion import Completion as Completion from .moderation import Moderation as Moderation from .audio_model import AudioModel as AudioModel diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 66bef02ced..e90aabfd3f 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -5,7 +5,7 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict -from ..chat_model import ChatModel +from ..shared.chat_model import ChatModel from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 08f044c1be..d888fb3eee 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -5,7 +5,7 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ..chat_model import ChatModel +from ..shared.chat_model import ChatModel from .function_tool_param import FunctionToolParam from .file_search_tool_param import FileSearchToolParam from ..shared_params.metadata import Metadata diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 093b4ce321..098e50a1d9 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -5,7 +5,7 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ...chat_model import ChatModel +from ...shared.chat_model import ChatModel from ..assistant_tool_param import AssistantToolParam from .runs.run_step_include import RunStepInclude from ...shared_params.metadata import Metadata diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index c761cbe07b..4dd2812aba 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -5,7 +5,7 @@ from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ..chat_model import ChatModel +from ..shared.chat_model import ChatModel from ..shared_params.metadata import Metadata from .chat_completion_modality import ChatCompletionModality from .chat_completion_tool_param import ChatCompletionToolParam diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index 6fe705a0b4..9304d195d6 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -1,49 +1,8 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal, TypeAlias + +from .shared import chat_model __all__ = ["ChatModel"] -ChatModel: TypeAlias = Literal[ - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4.5-preview", - "gpt-4.5-preview-2025-02-27", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-audio-preview", - "gpt-4o-audio-preview-2024-10-01", - "gpt-4o-audio-preview-2024-12-17", - "gpt-4o-mini-audio-preview", - "gpt-4o-mini-audio-preview-2024-12-17", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", -] +ChatModel = chat_model.ChatModel diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index 74bf304904..4cf367b1cc 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata +from .chat_model import ChatModel as ChatModel from .error_object import ErrorObject as ErrorObject from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters diff --git a/src/openai/types/shared/chat_model.py b/src/openai/types/shared/chat_model.py new file mode 100644 index 0000000000..6fe705a0b4 --- /dev/null +++ b/src/openai/types/shared/chat_model.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatModel"] + +ChatModel: TypeAlias = Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", +] diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index 68a8db75fe..47a747b2d4 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata +from .chat_model import ChatModel as ChatModel from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText diff --git a/src/openai/types/shared_params/chat_model.py b/src/openai/types/shared_params/chat_model.py new file mode 100644 index 0000000000..0ac3f31611 --- /dev/null +++ b/src/openai/types/shared_params/chat_model.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatModel"] + +ChatModel: TypeAlias = Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", +] From 08f54f5f53b3156548bdea2a70d5371738b7442d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 01:09:36 +0000 Subject: [PATCH 178/192] test: add DEFER_PYDANTIC_BUILD=false flag to tests (#2174) --- scripts/test | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/test b/scripts/test index 4fa5698b8f..2b87845670 100755 --- a/scripts/test +++ b/scripts/test @@ -52,6 +52,8 @@ else echo fi +export DEFER_PYDANTIC_BUILD=false + echo "==> Running tests" rye run pytest "$@" From 44b61bdb2dfef36d481475faf87e07a40a33a98d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 13:17:10 +0000 Subject: [PATCH 179/192] chore: export more types (#2176) --- src/openai/types/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 5785877c8a..eb71ac6ccc 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -17,6 +17,7 @@ ) from .upload import Upload as Upload from .embedding import Embedding as Embedding +from .chat_model import ChatModel as ChatModel from .completion import Completion as Completion from .moderation import Moderation as Moderation from .audio_model import AudioModel as AudioModel From 7b6f11865ab11f892c95be516575a61426724f03 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 16:38:03 +0000 Subject: [PATCH 180/192] feat(api): add /v1/responses and built-in tools (#2177) [platform.openai.com/docs/changelog](http://platform.openai.com/docs/changelog) --- .stats.yml | 4 +- api.md | 218 ++- src/openai/_client.py | 18 + src/openai/resources/__init__.py | 28 + src/openai/resources/beta/__init__.py | 14 - src/openai/resources/beta/assistants.py | 17 +- src/openai/resources/beta/beta.py | 32 - .../resources/beta/threads/runs/runs.py | 29 +- .../resources/chat/completions/completions.py | 496 +++--- .../resources/chat/completions/messages.py | 4 +- src/openai/resources/files.py | 24 +- src/openai/resources/responses/__init__.py | 33 + src/openai/resources/responses/input_items.py | 223 +++ src/openai/resources/responses/responses.py | 1433 +++++++++++++++++ src/openai/resources/uploads/uploads.py | 14 +- .../{beta => }/vector_stores/__init__.py | 0 .../{beta => }/vector_stores/file_batches.py | 44 +- .../{beta => }/vector_stores/files.py | 234 ++- .../{beta => }/vector_stores/vector_stores.py | 178 +- src/openai/types/__init__.py | 21 + .../auto_file_chunking_strategy_param.py | 0 src/openai/types/beta/__init__.py | 15 - .../types/beta/assistant_create_params.py | 49 +- .../types/beta/assistant_update_params.py | 5 +- .../beta/thread_create_and_run_params.py | 43 +- src/openai/types/beta/thread_create_params.py | 42 +- .../types/beta/threads/run_create_params.py | 5 +- .../types/chat/chat_completion_audio_param.py | 5 +- .../chat_completion_content_part_param.py | 31 +- .../types/chat/chat_completion_message.py | 30 +- .../chat/chat_completion_reasoning_effort.py | 6 +- .../types/chat/completion_create_params.py | 132 +- .../types/chat/completion_list_params.py | 8 +- .../{beta => }/file_chunking_strategy.py | 2 +- .../file_chunking_strategy_param.py | 0 src/openai/types/file_create_params.py | 10 +- src/openai/types/file_purpose.py | 2 +- .../other_file_chunking_strategy_object.py | 2 +- src/openai/types/responses/__init__.py | 130 ++ src/openai/types/responses/computer_tool.py | 21 + .../types/responses/computer_tool_param.py | 21 + .../responses/easy_input_message_param.py | 27 + .../types/responses/file_search_tool.py | 44 + .../types/responses/file_search_tool_param.py | 45 + src/openai/types/responses/function_tool.py | 28 + .../types/responses/function_tool_param.py | 28 + .../types/responses/input_item_list_params.py | 28 + src/openai/types/responses/response.py | 188 +++ .../responses/response_audio_delta_event.py | 15 + .../responses/response_audio_done_event.py | 12 + .../response_audio_transcript_delta_event.py | 15 + .../response_audio_transcript_done_event.py | 12 + ..._code_interpreter_call_code_delta_event.py | 18 + ...e_code_interpreter_call_code_done_event.py | 18 + ...e_code_interpreter_call_completed_event.py | 19 + ...code_interpreter_call_in_progress_event.py | 19 + ...ode_interpreter_call_interpreting_event.py | 19 + .../response_code_interpreter_tool_call.py | 52 + .../responses/response_completed_event.py | 16 + .../responses/response_computer_tool_call.py | 212 +++ .../response_computer_tool_call_param.py | 208 +++ .../response_content_part_added_event.py | 30 + .../response_content_part_done_event.py | 30 + .../types/responses/response_create_params.py | 204 +++ .../types/responses/response_created_event.py | 16 + src/openai/types/responses/response_error.py | 34 + .../types/responses/response_error_event.py | 22 + .../types/responses/response_failed_event.py | 16 + ...sponse_file_search_call_completed_event.py | 18 + ...onse_file_search_call_in_progress_event.py | 18 + ...sponse_file_search_call_searching_event.py | 18 + .../response_file_search_tool_call.py | 51 + .../response_file_search_tool_call_param.py | 51 + .../responses/response_format_text_config.py | 16 + .../response_format_text_config_param.py | 16 + ...response_format_text_json_schema_config.py | 43 + ...se_format_text_json_schema_config_param.py | 41 + ...nse_function_call_arguments_delta_event.py | 23 + ...onse_function_call_arguments_done_event.py | 20 + .../responses/response_function_tool_call.py | 32 + .../response_function_tool_call_param.py | 31 + .../responses/response_function_web_search.py | 18 + .../response_function_web_search_param.py | 18 + .../responses/response_in_progress_event.py | 16 + .../types/responses/response_includable.py | 9 + .../responses/response_incomplete_event.py | 16 + .../types/responses/response_input_content.py | 15 + .../responses/response_input_content_param.py | 14 + .../types/responses/response_input_file.py | 22 + .../responses/response_input_file_param.py | 21 + .../types/responses/response_input_image.py | 28 + .../responses/response_input_image_param.py | 28 + .../responses/response_input_item_param.py | 174 ++ .../response_input_message_content_list.py | 10 + ...sponse_input_message_content_list_param.py | 16 + .../types/responses/response_input_param.py | 177 ++ .../types/responses/response_input_text.py | 15 + .../responses/response_input_text_param.py | 15 + .../types/responses/response_item_list.py | 152 ++ .../types/responses/response_output_item.py | 55 + .../response_output_item_added_event.py | 19 + .../response_output_item_done_event.py | 19 + .../responses/response_output_message.py | 34 + .../response_output_message_param.py | 34 + .../responses/response_output_refusal.py | 15 + .../response_output_refusal_param.py | 15 + .../types/responses/response_output_text.py | 64 + .../responses/response_output_text_param.py | 67 + .../responses/response_refusal_delta_event.py | 24 + .../responses/response_refusal_done_event.py | 24 + .../responses/response_retrieve_params.py | 18 + src/openai/types/responses/response_status.py | 7 + .../types/responses/response_stream_event.py | 78 + .../response_text_annotation_delta_event.py | 79 + .../types/responses/response_text_config.py | 26 + .../responses/response_text_config_param.py | 27 + .../responses/response_text_delta_event.py | 24 + .../responses/response_text_done_event.py | 24 + src/openai/types/responses/response_usage.py | 25 + ...esponse_web_search_call_completed_event.py | 18 + ...ponse_web_search_call_in_progress_event.py | 18 + ...esponse_web_search_call_searching_event.py | 18 + src/openai/types/responses/tool.py | 16 + .../types/responses/tool_choice_function.py | 15 + .../responses/tool_choice_function_param.py | 15 + .../types/responses/tool_choice_options.py | 7 + .../types/responses/tool_choice_types.py | 22 + .../responses/tool_choice_types_param.py | 24 + src/openai/types/responses/tool_param.py | 15 + src/openai/types/responses/web_search_tool.py | 48 + .../types/responses/web_search_tool_param.py | 48 + src/openai/types/shared/__init__.py | 4 + src/openai/types/shared/chat_model.py | 3 + src/openai/types/shared/comparison_filter.py | 30 + src/openai/types/shared/compound_filter.py | 22 + src/openai/types/shared/reasoning.py | 28 + src/openai/types/shared/reasoning_effort.py | 8 + .../shared/response_format_json_object.py | 2 +- .../shared/response_format_json_schema.py | 18 +- .../types/shared/response_format_text.py | 2 +- src/openai/types/shared_params/__init__.py | 4 + src/openai/types/shared_params/chat_model.py | 3 + .../types/shared_params/comparison_filter.py | 30 + .../types/shared_params/compound_filter.py | 23 + src/openai/types/shared_params/reasoning.py | 29 + .../types/shared_params/reasoning_effort.py | 10 + .../response_format_json_object.py | 2 +- .../response_format_json_schema.py | 18 +- .../shared_params/response_format_text.py | 2 +- .../static_file_chunking_strategy.py | 2 +- .../static_file_chunking_strategy_object.py | 2 +- ...tic_file_chunking_strategy_object_param.py | 0 .../static_file_chunking_strategy_param.py | 0 src/openai/types/{beta => }/vector_store.py | 4 +- .../{beta => }/vector_store_create_params.py | 2 +- .../types/{beta => }/vector_store_deleted.py | 2 +- .../{beta => }/vector_store_list_params.py | 0 .../types/vector_store_search_params.py | 40 + .../types/vector_store_search_response.py | 39 + .../{beta => }/vector_store_update_params.py | 2 +- .../{beta => }/vector_stores/__init__.py | 2 + .../vector_stores/file_batch_create_params.py | 11 +- .../file_batch_list_files_params.py | 0 .../vector_stores/file_content_response.py | 15 + .../vector_stores/file_create_params.py | 10 + .../vector_stores/file_list_params.py | 0 .../types/vector_stores/file_update_params.py | 21 + .../vector_stores/vector_store_file.py | 13 +- .../vector_stores/vector_store_file_batch.py | 2 +- .../vector_store_file_deleted.py | 2 +- tests/api_resources/chat/test_completions.py | 64 +- .../vector_stores => responses}/__init__.py | 0 .../responses/test_input_items.py | 121 ++ tests/api_resources/test_responses.py | 498 ++++++ .../{beta => }/test_vector_stores.py | 211 ++- tests/api_resources/vector_stores/__init__.py | 1 + .../vector_stores/test_file_batches.py | 88 +- .../{beta => }/vector_stores/test_files.py | 289 +++- 178 files changed, 8087 insertions(+), 674 deletions(-) create mode 100644 src/openai/resources/responses/__init__.py create mode 100644 src/openai/resources/responses/input_items.py create mode 100644 src/openai/resources/responses/responses.py rename src/openai/resources/{beta => }/vector_stores/__init__.py (100%) rename src/openai/resources/{beta => }/vector_stores/file_batches.py (91%) rename src/openai/resources/{beta => }/vector_stores/files.py (67%) rename src/openai/resources/{beta => }/vector_stores/vector_stores.py (80%) rename src/openai/types/{beta => }/auto_file_chunking_strategy_param.py (100%) rename src/openai/types/{beta => }/file_chunking_strategy.py (93%) rename src/openai/types/{beta => }/file_chunking_strategy_param.py (100%) rename src/openai/types/{beta => }/other_file_chunking_strategy_object.py (89%) create mode 100644 src/openai/types/responses/__init__.py create mode 100644 src/openai/types/responses/computer_tool.py create mode 100644 src/openai/types/responses/computer_tool_param.py create mode 100644 src/openai/types/responses/easy_input_message_param.py create mode 100644 src/openai/types/responses/file_search_tool.py create mode 100644 src/openai/types/responses/file_search_tool_param.py create mode 100644 src/openai/types/responses/function_tool.py create mode 100644 src/openai/types/responses/function_tool_param.py create mode 100644 src/openai/types/responses/input_item_list_params.py create mode 100644 src/openai/types/responses/response.py create mode 100644 src/openai/types/responses/response_audio_delta_event.py create mode 100644 src/openai/types/responses/response_audio_done_event.py create mode 100644 src/openai/types/responses/response_audio_transcript_delta_event.py create mode 100644 src/openai/types/responses/response_audio_transcript_done_event.py create mode 100644 src/openai/types/responses/response_code_interpreter_call_code_delta_event.py create mode 100644 src/openai/types/responses/response_code_interpreter_call_code_done_event.py create mode 100644 src/openai/types/responses/response_code_interpreter_call_completed_event.py create mode 100644 src/openai/types/responses/response_code_interpreter_call_in_progress_event.py create mode 100644 src/openai/types/responses/response_code_interpreter_call_interpreting_event.py create mode 100644 src/openai/types/responses/response_code_interpreter_tool_call.py create mode 100644 src/openai/types/responses/response_completed_event.py create mode 100644 src/openai/types/responses/response_computer_tool_call.py create mode 100644 src/openai/types/responses/response_computer_tool_call_param.py create mode 100644 src/openai/types/responses/response_content_part_added_event.py create mode 100644 src/openai/types/responses/response_content_part_done_event.py create mode 100644 src/openai/types/responses/response_create_params.py create mode 100644 src/openai/types/responses/response_created_event.py create mode 100644 src/openai/types/responses/response_error.py create mode 100644 src/openai/types/responses/response_error_event.py create mode 100644 src/openai/types/responses/response_failed_event.py create mode 100644 src/openai/types/responses/response_file_search_call_completed_event.py create mode 100644 src/openai/types/responses/response_file_search_call_in_progress_event.py create mode 100644 src/openai/types/responses/response_file_search_call_searching_event.py create mode 100644 src/openai/types/responses/response_file_search_tool_call.py create mode 100644 src/openai/types/responses/response_file_search_tool_call_param.py create mode 100644 src/openai/types/responses/response_format_text_config.py create mode 100644 src/openai/types/responses/response_format_text_config_param.py create mode 100644 src/openai/types/responses/response_format_text_json_schema_config.py create mode 100644 src/openai/types/responses/response_format_text_json_schema_config_param.py create mode 100644 src/openai/types/responses/response_function_call_arguments_delta_event.py create mode 100644 src/openai/types/responses/response_function_call_arguments_done_event.py create mode 100644 src/openai/types/responses/response_function_tool_call.py create mode 100644 src/openai/types/responses/response_function_tool_call_param.py create mode 100644 src/openai/types/responses/response_function_web_search.py create mode 100644 src/openai/types/responses/response_function_web_search_param.py create mode 100644 src/openai/types/responses/response_in_progress_event.py create mode 100644 src/openai/types/responses/response_includable.py create mode 100644 src/openai/types/responses/response_incomplete_event.py create mode 100644 src/openai/types/responses/response_input_content.py create mode 100644 src/openai/types/responses/response_input_content_param.py create mode 100644 src/openai/types/responses/response_input_file.py create mode 100644 src/openai/types/responses/response_input_file_param.py create mode 100644 src/openai/types/responses/response_input_image.py create mode 100644 src/openai/types/responses/response_input_image_param.py create mode 100644 src/openai/types/responses/response_input_item_param.py create mode 100644 src/openai/types/responses/response_input_message_content_list.py create mode 100644 src/openai/types/responses/response_input_message_content_list_param.py create mode 100644 src/openai/types/responses/response_input_param.py create mode 100644 src/openai/types/responses/response_input_text.py create mode 100644 src/openai/types/responses/response_input_text_param.py create mode 100644 src/openai/types/responses/response_item_list.py create mode 100644 src/openai/types/responses/response_output_item.py create mode 100644 src/openai/types/responses/response_output_item_added_event.py create mode 100644 src/openai/types/responses/response_output_item_done_event.py create mode 100644 src/openai/types/responses/response_output_message.py create mode 100644 src/openai/types/responses/response_output_message_param.py create mode 100644 src/openai/types/responses/response_output_refusal.py create mode 100644 src/openai/types/responses/response_output_refusal_param.py create mode 100644 src/openai/types/responses/response_output_text.py create mode 100644 src/openai/types/responses/response_output_text_param.py create mode 100644 src/openai/types/responses/response_refusal_delta_event.py create mode 100644 src/openai/types/responses/response_refusal_done_event.py create mode 100644 src/openai/types/responses/response_retrieve_params.py create mode 100644 src/openai/types/responses/response_status.py create mode 100644 src/openai/types/responses/response_stream_event.py create mode 100644 src/openai/types/responses/response_text_annotation_delta_event.py create mode 100644 src/openai/types/responses/response_text_config.py create mode 100644 src/openai/types/responses/response_text_config_param.py create mode 100644 src/openai/types/responses/response_text_delta_event.py create mode 100644 src/openai/types/responses/response_text_done_event.py create mode 100644 src/openai/types/responses/response_usage.py create mode 100644 src/openai/types/responses/response_web_search_call_completed_event.py create mode 100644 src/openai/types/responses/response_web_search_call_in_progress_event.py create mode 100644 src/openai/types/responses/response_web_search_call_searching_event.py create mode 100644 src/openai/types/responses/tool.py create mode 100644 src/openai/types/responses/tool_choice_function.py create mode 100644 src/openai/types/responses/tool_choice_function_param.py create mode 100644 src/openai/types/responses/tool_choice_options.py create mode 100644 src/openai/types/responses/tool_choice_types.py create mode 100644 src/openai/types/responses/tool_choice_types_param.py create mode 100644 src/openai/types/responses/tool_param.py create mode 100644 src/openai/types/responses/web_search_tool.py create mode 100644 src/openai/types/responses/web_search_tool_param.py create mode 100644 src/openai/types/shared/comparison_filter.py create mode 100644 src/openai/types/shared/compound_filter.py create mode 100644 src/openai/types/shared/reasoning.py create mode 100644 src/openai/types/shared/reasoning_effort.py create mode 100644 src/openai/types/shared_params/comparison_filter.py create mode 100644 src/openai/types/shared_params/compound_filter.py create mode 100644 src/openai/types/shared_params/reasoning.py create mode 100644 src/openai/types/shared_params/reasoning_effort.py rename src/openai/types/{beta => }/static_file_chunking_strategy.py (94%) rename src/openai/types/{beta => }/static_file_chunking_strategy_object.py (92%) rename src/openai/types/{beta => }/static_file_chunking_strategy_object_param.py (100%) rename src/openai/types/{beta => }/static_file_chunking_strategy_param.py (100%) rename src/openai/types/{beta => }/vector_store.py (97%) rename src/openai/types/{beta => }/vector_store_create_params.py (97%) rename src/openai/types/{beta => }/vector_store_deleted.py (89%) rename src/openai/types/{beta => }/vector_store_list_params.py (100%) create mode 100644 src/openai/types/vector_store_search_params.py create mode 100644 src/openai/types/vector_store_search_response.py rename src/openai/types/{beta => }/vector_store_update_params.py (96%) rename src/openai/types/{beta => }/vector_stores/__init__.py (82%) rename src/openai/types/{beta => }/vector_stores/file_batch_create_params.py (61%) rename src/openai/types/{beta => }/vector_stores/file_batch_list_files_params.py (100%) create mode 100644 src/openai/types/vector_stores/file_content_response.py rename src/openai/types/{beta => }/vector_stores/file_create_params.py (60%) rename src/openai/types/{beta => }/vector_stores/file_list_params.py (100%) create mode 100644 src/openai/types/vector_stores/file_update_params.py rename src/openai/types/{beta => }/vector_stores/vector_store_file.py (76%) rename src/openai/types/{beta => }/vector_stores/vector_store_file_batch.py (97%) rename src/openai/types/{beta => }/vector_stores/vector_store_file_deleted.py (89%) rename tests/api_resources/{beta/vector_stores => responses}/__init__.py (100%) create mode 100644 tests/api_resources/responses/test_input_items.py create mode 100644 tests/api_resources/test_responses.py rename tests/api_resources/{beta => }/test_vector_stores.py (64%) create mode 100644 tests/api_resources/vector_stores/__init__.py rename tests/api_resources/{beta => }/vector_stores/test_file_batches.py (81%) rename tests/api_resources/{beta => }/vector_stores/test_files.py (55%) diff --git a/.stats.yml b/.stats.yml index 0d7e83be4f..455874212c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b524aed1c2c5c928aa4e2c546f5dbb364e7b4d5027daf05e42e210b05a97c3c6.yml +configured_endpoints: 81 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml diff --git a/api.md b/api.md index c130d478f8..8a01ba7c5a 100644 --- a/api.md +++ b/api.md @@ -3,10 +3,14 @@ ```python from openai.types import ( ChatModel, + ComparisonFilter, + CompoundFilter, ErrorObject, FunctionDefinition, FunctionParameters, Metadata, + Reasoning, + ReasoningEffort, ResponseFormatJSONObject, ResponseFormatJSONSchema, ResponseFormatText, @@ -59,7 +63,6 @@ from openai.types.chat import ( ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, - ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -69,6 +72,7 @@ from openai.types.chat import ( ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam, + ChatCompletionReasoningEffort, ) ``` @@ -248,6 +252,66 @@ Methods: - client.fine_tuning.jobs.checkpoints.list(fine_tuning_job_id, \*\*params) -> SyncCursorPage[FineTuningJobCheckpoint] +# VectorStores + +Types: + +```python +from openai.types import ( + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyObjectParam, + VectorStore, + VectorStoreDeleted, + VectorStoreSearchResponse, +) +``` + +Methods: + +- client.vector_stores.create(\*\*params) -> VectorStore +- client.vector_stores.retrieve(vector_store_id) -> VectorStore +- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStore +- client.vector_stores.list(\*\*params) -> SyncCursorPage[VectorStore] +- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleted +- client.vector_stores.search(vector_store_id, \*\*params) -> SyncPage[VectorStoreSearchResponse] + +## Files + +Types: + +```python +from openai.types.vector_stores import VectorStoreFile, VectorStoreFileDeleted, FileContentResponse +``` + +Methods: + +- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFile +- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFile +- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFile +- client.vector_stores.files.list(vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile] +- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> VectorStoreFileDeleted +- client.vector_stores.files.content(file_id, \*, vector_store_id) -> SyncPage[FileContentResponse] + +## FileBatches + +Types: + +```python +from openai.types.vector_stores import VectorStoreFileBatch +``` + +Methods: + +- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatch +- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatch +- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatch +- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile] + # Beta ## Realtime @@ -316,62 +380,6 @@ Methods: - client.beta.realtime.sessions.create(\*\*params) -> SessionCreateResponse -## VectorStores - -Types: - -```python -from openai.types.beta import ( - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyObjectParam, - VectorStore, - VectorStoreDeleted, -) -``` - -Methods: - -- client.beta.vector_stores.create(\*\*params) -> VectorStore -- client.beta.vector_stores.retrieve(vector_store_id) -> VectorStore -- client.beta.vector_stores.update(vector_store_id, \*\*params) -> VectorStore -- client.beta.vector_stores.list(\*\*params) -> SyncCursorPage[VectorStore] -- client.beta.vector_stores.delete(vector_store_id) -> VectorStoreDeleted - -### Files - -Types: - -```python -from openai.types.beta.vector_stores import VectorStoreFile, VectorStoreFileDeleted -``` - -Methods: - -- client.beta.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFile -- client.beta.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFile -- client.beta.vector_stores.files.list(vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile] -- client.beta.vector_stores.files.delete(file_id, \*, vector_store_id) -> VectorStoreFileDeleted - -### FileBatches - -Types: - -```python -from openai.types.beta.vector_stores import VectorStoreFileBatch -``` - -Methods: - -- client.beta.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatch -- client.beta.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatch -- client.beta.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatch -- client.beta.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile] - ## Assistants Types: @@ -557,3 +565,99 @@ from openai.types.uploads import UploadPart Methods: - client.uploads.parts.create(upload_id, \*\*params) -> UploadPart + +# Responses + +Types: + +```python +from openai.types.responses import ( + ComputerTool, + EasyInputMessage, + FileSearchTool, + FunctionTool, + Response, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCodeInterpreterCallCodeDeltaEvent, + ResponseCodeInterpreterCallCodeDoneEvent, + ResponseCodeInterpreterCallCompletedEvent, + ResponseCodeInterpreterCallInProgressEvent, + ResponseCodeInterpreterCallInterpretingEvent, + ResponseCodeInterpreterToolCall, + ResponseCompletedEvent, + ResponseComputerToolCall, + ResponseContent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseError, + ResponseErrorEvent, + ResponseFailedEvent, + ResponseFileSearchCallCompletedEvent, + ResponseFileSearchCallInProgressEvent, + ResponseFileSearchCallSearchingEvent, + ResponseFileSearchToolCall, + ResponseFormatTextConfig, + ResponseFormatTextJSONSchemaConfig, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseFunctionToolCall, + ResponseFunctionWebSearch, + ResponseInProgressEvent, + ResponseIncludable, + ResponseIncompleteEvent, + ResponseInput, + ResponseInputAudio, + ResponseInputContent, + ResponseInputFile, + ResponseInputImage, + ResponseInputItem, + ResponseInputMessageContentList, + ResponseInputText, + ResponseOutputAudio, + ResponseOutputItem, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseOutputMessage, + ResponseOutputRefusal, + ResponseOutputText, + ResponseRefusalDeltaEvent, + ResponseRefusalDoneEvent, + ResponseStatus, + ResponseStreamEvent, + ResponseTextAnnotationDeltaEvent, + ResponseTextConfig, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseUsage, + ResponseWebSearchCallCompletedEvent, + ResponseWebSearchCallInProgressEvent, + ResponseWebSearchCallSearchingEvent, + Tool, + ToolChoiceFunction, + ToolChoiceOptions, + ToolChoiceTypes, + WebSearchTool, +) +``` + +Methods: + +- client.responses.create(\*\*params) -> Response +- client.responses.retrieve(response_id, \*\*params) -> Response +- client.responses.delete(response_id) -> None + +## InputItems + +Types: + +```python +from openai.types.responses import ResponseItemList +``` + +Methods: + +- client.responses.input_items.list(response_id, \*\*params) -> SyncCursorPage[Data] diff --git a/src/openai/_client.py b/src/openai/_client.py index 2464c6504c..18d96da9a3 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -37,7 +37,9 @@ from .resources.chat import chat from .resources.audio import audio from .resources.uploads import uploads +from .resources.responses import responses from .resources.fine_tuning import fine_tuning +from .resources.vector_stores import vector_stores __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"] @@ -52,9 +54,11 @@ class OpenAI(SyncAPIClient): moderations: moderations.Moderations models: models.Models fine_tuning: fine_tuning.FineTuning + vector_stores: vector_stores.VectorStores beta: beta.Beta batches: batches.Batches uploads: uploads.Uploads + responses: responses.Responses with_raw_response: OpenAIWithRawResponse with_streaming_response: OpenAIWithStreamedResponse @@ -149,9 +153,11 @@ def __init__( self.moderations = moderations.Moderations(self) self.models = models.Models(self) self.fine_tuning = fine_tuning.FineTuning(self) + self.vector_stores = vector_stores.VectorStores(self) self.beta = beta.Beta(self) self.batches = batches.Batches(self) self.uploads = uploads.Uploads(self) + self.responses = responses.Responses(self) self.with_raw_response = OpenAIWithRawResponse(self) self.with_streaming_response = OpenAIWithStreamedResponse(self) @@ -279,9 +285,11 @@ class AsyncOpenAI(AsyncAPIClient): moderations: moderations.AsyncModerations models: models.AsyncModels fine_tuning: fine_tuning.AsyncFineTuning + vector_stores: vector_stores.AsyncVectorStores beta: beta.AsyncBeta batches: batches.AsyncBatches uploads: uploads.AsyncUploads + responses: responses.AsyncResponses with_raw_response: AsyncOpenAIWithRawResponse with_streaming_response: AsyncOpenAIWithStreamedResponse @@ -376,9 +384,11 @@ def __init__( self.moderations = moderations.AsyncModerations(self) self.models = models.AsyncModels(self) self.fine_tuning = fine_tuning.AsyncFineTuning(self) + self.vector_stores = vector_stores.AsyncVectorStores(self) self.beta = beta.AsyncBeta(self) self.batches = batches.AsyncBatches(self) self.uploads = uploads.AsyncUploads(self) + self.responses = responses.AsyncResponses(self) self.with_raw_response = AsyncOpenAIWithRawResponse(self) self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self) @@ -507,9 +517,11 @@ def __init__(self, client: OpenAI) -> None: self.moderations = moderations.ModerationsWithRawResponse(client.moderations) self.models = models.ModelsWithRawResponse(client.models) self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning) + self.vector_stores = vector_stores.VectorStoresWithRawResponse(client.vector_stores) self.beta = beta.BetaWithRawResponse(client.beta) self.batches = batches.BatchesWithRawResponse(client.batches) self.uploads = uploads.UploadsWithRawResponse(client.uploads) + self.responses = responses.ResponsesWithRawResponse(client.responses) class AsyncOpenAIWithRawResponse: @@ -523,9 +535,11 @@ def __init__(self, client: AsyncOpenAI) -> None: self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations) self.models = models.AsyncModelsWithRawResponse(client.models) self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning) + self.vector_stores = vector_stores.AsyncVectorStoresWithRawResponse(client.vector_stores) self.beta = beta.AsyncBetaWithRawResponse(client.beta) self.batches = batches.AsyncBatchesWithRawResponse(client.batches) self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads) + self.responses = responses.AsyncResponsesWithRawResponse(client.responses) class OpenAIWithStreamedResponse: @@ -539,9 +553,11 @@ def __init__(self, client: OpenAI) -> None: self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations) self.models = models.ModelsWithStreamingResponse(client.models) self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning) + self.vector_stores = vector_stores.VectorStoresWithStreamingResponse(client.vector_stores) self.beta = beta.BetaWithStreamingResponse(client.beta) self.batches = batches.BatchesWithStreamingResponse(client.batches) self.uploads = uploads.UploadsWithStreamingResponse(client.uploads) + self.responses = responses.ResponsesWithStreamingResponse(client.responses) class AsyncOpenAIWithStreamedResponse: @@ -555,9 +571,11 @@ def __init__(self, client: AsyncOpenAI) -> None: self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations) self.models = models.AsyncModelsWithStreamingResponse(client.models) self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning) + self.vector_stores = vector_stores.AsyncVectorStoresWithStreamingResponse(client.vector_stores) self.beta = beta.AsyncBetaWithStreamingResponse(client.beta) self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches) self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads) + self.responses = responses.AsyncResponsesWithStreamingResponse(client.responses) Client = OpenAI diff --git a/src/openai/resources/__init__.py b/src/openai/resources/__init__.py index e2cc1c4b0c..d3457cf319 100644 --- a/src/openai/resources/__init__.py +++ b/src/openai/resources/__init__.py @@ -64,6 +64,14 @@ UploadsWithStreamingResponse, AsyncUploadsWithStreamingResponse, ) +from .responses import ( + Responses, + AsyncResponses, + ResponsesWithRawResponse, + AsyncResponsesWithRawResponse, + ResponsesWithStreamingResponse, + AsyncResponsesWithStreamingResponse, +) from .embeddings import ( Embeddings, AsyncEmbeddings, @@ -96,6 +104,14 @@ ModerationsWithStreamingResponse, AsyncModerationsWithStreamingResponse, ) +from .vector_stores import ( + VectorStores, + AsyncVectorStores, + VectorStoresWithRawResponse, + AsyncVectorStoresWithRawResponse, + VectorStoresWithStreamingResponse, + AsyncVectorStoresWithStreamingResponse, +) __all__ = [ "Completions", @@ -152,6 +168,12 @@ "AsyncFineTuningWithRawResponse", "FineTuningWithStreamingResponse", "AsyncFineTuningWithStreamingResponse", + "VectorStores", + "AsyncVectorStores", + "VectorStoresWithRawResponse", + "AsyncVectorStoresWithRawResponse", + "VectorStoresWithStreamingResponse", + "AsyncVectorStoresWithStreamingResponse", "Beta", "AsyncBeta", "BetaWithRawResponse", @@ -170,4 +192,10 @@ "AsyncUploadsWithRawResponse", "UploadsWithStreamingResponse", "AsyncUploadsWithStreamingResponse", + "Responses", + "AsyncResponses", + "ResponsesWithRawResponse", + "AsyncResponsesWithRawResponse", + "ResponsesWithStreamingResponse", + "AsyncResponsesWithStreamingResponse", ] diff --git a/src/openai/resources/beta/__init__.py b/src/openai/resources/beta/__init__.py index 01f5338757..87fea25267 100644 --- a/src/openai/resources/beta/__init__.py +++ b/src/openai/resources/beta/__init__.py @@ -24,22 +24,8 @@ AssistantsWithStreamingResponse, AsyncAssistantsWithStreamingResponse, ) -from .vector_stores import ( - VectorStores, - AsyncVectorStores, - VectorStoresWithRawResponse, - AsyncVectorStoresWithRawResponse, - VectorStoresWithStreamingResponse, - AsyncVectorStoresWithStreamingResponse, -) __all__ = [ - "VectorStores", - "AsyncVectorStores", - "VectorStoresWithRawResponse", - "AsyncVectorStoresWithRawResponse", - "VectorStoresWithStreamingResponse", - "AsyncVectorStoresWithStreamingResponse", "Assistants", "AsyncAssistants", "AssistantsWithRawResponse", diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index ffecd8f9e9..1c7cbf3737 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -27,6 +27,7 @@ from ...types.shared.chat_model import ChatModel from ...types.beta.assistant_deleted import AssistantDeleted from ...types.shared_params.metadata import Metadata +from ...types.shared.reasoning_effort import ReasoningEffort from ...types.beta.assistant_tool_param import AssistantToolParam from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -61,7 +62,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -98,7 +99,7 @@ def create( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -256,7 +257,7 @@ def update( ] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -294,7 +295,7 @@ def update( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -504,7 +505,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -541,7 +542,7 @@ async def create( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -699,7 +700,7 @@ async def update( ] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, @@ -737,7 +738,7 @@ async def update( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 5946985519..46c100d3f9 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -28,14 +28,6 @@ RealtimeWithStreamingResponse, AsyncRealtimeWithStreamingResponse, ) -from .vector_stores.vector_stores import ( - VectorStores, - AsyncVectorStores, - VectorStoresWithRawResponse, - AsyncVectorStoresWithRawResponse, - VectorStoresWithStreamingResponse, - AsyncVectorStoresWithStreamingResponse, -) __all__ = ["Beta", "AsyncBeta"] @@ -45,10 +37,6 @@ class Beta(SyncAPIResource): def realtime(self) -> Realtime: return Realtime(self._client) - @cached_property - def vector_stores(self) -> VectorStores: - return VectorStores(self._client) - @cached_property def assistants(self) -> Assistants: return Assistants(self._client) @@ -82,10 +70,6 @@ class AsyncBeta(AsyncAPIResource): def realtime(self) -> AsyncRealtime: return AsyncRealtime(self._client) - @cached_property - def vector_stores(self) -> AsyncVectorStores: - return AsyncVectorStores(self._client) - @cached_property def assistants(self) -> AsyncAssistants: return AsyncAssistants(self._client) @@ -122,10 +106,6 @@ def __init__(self, beta: Beta) -> None: def realtime(self) -> RealtimeWithRawResponse: return RealtimeWithRawResponse(self._beta.realtime) - @cached_property - def vector_stores(self) -> VectorStoresWithRawResponse: - return VectorStoresWithRawResponse(self._beta.vector_stores) - @cached_property def assistants(self) -> AssistantsWithRawResponse: return AssistantsWithRawResponse(self._beta.assistants) @@ -143,10 +123,6 @@ def __init__(self, beta: AsyncBeta) -> None: def realtime(self) -> AsyncRealtimeWithRawResponse: return AsyncRealtimeWithRawResponse(self._beta.realtime) - @cached_property - def vector_stores(self) -> AsyncVectorStoresWithRawResponse: - return AsyncVectorStoresWithRawResponse(self._beta.vector_stores) - @cached_property def assistants(self) -> AsyncAssistantsWithRawResponse: return AsyncAssistantsWithRawResponse(self._beta.assistants) @@ -164,10 +140,6 @@ def __init__(self, beta: Beta) -> None: def realtime(self) -> RealtimeWithStreamingResponse: return RealtimeWithStreamingResponse(self._beta.realtime) - @cached_property - def vector_stores(self) -> VectorStoresWithStreamingResponse: - return VectorStoresWithStreamingResponse(self._beta.vector_stores) - @cached_property def assistants(self) -> AssistantsWithStreamingResponse: return AssistantsWithStreamingResponse(self._beta.assistants) @@ -185,10 +157,6 @@ def __init__(self, beta: AsyncBeta) -> None: def realtime(self) -> AsyncRealtimeWithStreamingResponse: return AsyncRealtimeWithStreamingResponse(self._beta.realtime) - @cached_property - def vector_stores(self) -> AsyncVectorStoresWithStreamingResponse: - return AsyncVectorStoresWithStreamingResponse(self._beta.vector_stores) - @cached_property def assistants(self) -> AsyncAssistantsWithStreamingResponse: return AsyncAssistantsWithStreamingResponse(self._beta.assistants) diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index e96e70fc5a..8f6eed0cad 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -37,6 +37,7 @@ from .....types.beta.threads.run import Run from .....types.shared.chat_model import ChatModel from .....types.shared_params.metadata import Metadata +from .....types.shared.reasoning_effort import ReasoningEffort from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent from .....types.beta.threads.runs.run_step_include import RunStepInclude @@ -85,7 +86,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -154,7 +155,7 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -236,7 +237,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -308,7 +309,7 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -386,7 +387,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -458,7 +459,7 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -535,7 +536,7 @@ def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -967,7 +968,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1036,7 +1037,7 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1118,7 +1119,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1190,7 +1191,7 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1268,7 +1269,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1340,7 +1341,7 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1417,7 +1418,7 @@ async def create( metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 9c2a0821a3..b3e4666fc1 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -29,7 +29,6 @@ from ....pagination import SyncCursorPage, AsyncCursorPage from ....types.chat import ( ChatCompletionAudioParam, - ChatCompletionReasoningEffort, completion_list_params, completion_create_params, completion_update_params, @@ -38,13 +37,12 @@ from ....types.shared.chat_model import ChatModel from ....types.chat.chat_completion import ChatCompletion from ....types.shared_params.metadata import Metadata +from ....types.shared.reasoning_effort import ReasoningEffort from ....types.chat.chat_completion_chunk import ChatCompletionChunk from ....types.chat.chat_completion_deleted import ChatCompletionDeleted -from ....types.chat.chat_completion_modality import ChatCompletionModality from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam -from ....types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam @@ -91,16 +89,16 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -110,6 +108,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -117,9 +116,15 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: - """Creates a model response for the given chat conversation. + """ + **Starting a new project?** We recommend trying + [Responses](https://platform.openai.com/docs/api-reference/responses) to take + advantage of the latest OpenAI platform features. Compare + [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- - Learn more in the + Creates a model response for the given chat conversation. Learn more in the [text generation](https://platform.openai.com/docs/guides/text-generation), [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. @@ -138,9 +143,11 @@ def create( [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). - model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - table for details on which models work with the Chat API. + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. audio: Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`. @@ -201,8 +208,8 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate for this request. Most - models are capable of generating text, which is the default: + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: `["text"]` @@ -227,7 +234,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -241,16 +248,9 @@ def create( in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -265,23 +265,29 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarantee. + latency guarentee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. + tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - stop: Up to 4 sequences where the API will stop generating further tokens. + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. store: Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -318,6 +324,10 @@ def create( and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -344,16 +354,16 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -362,6 +372,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -369,9 +380,15 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[ChatCompletionChunk]: - """Creates a model response for the given chat conversation. + """ + **Starting a new project?** We recommend trying + [Responses](https://platform.openai.com/docs/api-reference/responses) to take + advantage of the latest OpenAI platform features. Compare + [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- - Learn more in the + Creates a model response for the given chat conversation. Learn more in the [text generation](https://platform.openai.com/docs/guides/text-generation), [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. @@ -390,16 +407,20 @@ def create( [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). - model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - table for details on which models work with the Chat API. - - stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. audio: Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`. @@ -460,8 +481,8 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate for this request. Most - models are capable of generating text, which is the default: + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: `["text"]` @@ -486,7 +507,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -500,16 +521,9 @@ def create( in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -524,12 +538,16 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarantee. + latency guarentee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. + tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - stop: Up to 4 sequences where the API will stop generating further tokens. + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. store: Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) @@ -570,6 +588,10 @@ def create( and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -596,16 +618,16 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -614,6 +636,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -621,9 +644,15 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: - """Creates a model response for the given chat conversation. + """ + **Starting a new project?** We recommend trying + [Responses](https://platform.openai.com/docs/api-reference/responses) to take + advantage of the latest OpenAI platform features. Compare + [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- - Learn more in the + Creates a model response for the given chat conversation. Learn more in the [text generation](https://platform.openai.com/docs/guides/text-generation), [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. @@ -642,16 +671,20 @@ def create( [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). - model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - table for details on which models work with the Chat API. - - stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. audio: Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`. @@ -712,8 +745,8 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate for this request. Most - models are capable of generating text, which is the default: + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: `["text"]` @@ -738,7 +771,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -752,16 +785,9 @@ def create( in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -776,12 +802,16 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarantee. + latency guarentee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. + tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - stop: Up to 4 sequences where the API will stop generating further tokens. + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. store: Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) @@ -822,6 +852,10 @@ def create( and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -847,16 +881,16 @@ def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -866,6 +900,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -907,6 +942,7 @@ def create( "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParams, ), @@ -931,7 +967,7 @@ def retrieve( ) -> ChatCompletion: """Get a stored chat completion. - Only chat completions that have been created with + Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. Args: @@ -967,7 +1003,7 @@ def update( ) -> ChatCompletion: """Modify a stored chat completion. - Only chat completions that have been created + Only Chat Completions that have been created with the `store` parameter set to `true` can be modified. Currently, the only supported modification is to update the `metadata` field. @@ -1013,24 +1049,24 @@ def list( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[ChatCompletion]: - """List stored chat completions. + """List stored Chat Completions. - Only chat completions that have been stored with + Only Chat Completions that have been stored with the `store` parameter set to `true` will be returned. Args: after: Identifier for the last chat completion from the previous pagination request. - limit: Number of chat completions to retrieve. + limit: Number of Chat Completions to retrieve. metadata: - A list of metadata keys to filter the chat completions by. Example: + A list of metadata keys to filter the Chat Completions by. Example: `metadata[key1]=value1&metadata[key2]=value2` - model: The model used to generate the chat completions. + model: The model used to generate the Chat Completions. - order: Sort order for chat completions by timestamp. Use `asc` for ascending order or + order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. extra_headers: Send extra headers @@ -1076,7 +1112,7 @@ def delete( ) -> ChatCompletionDeleted: """Delete a stored chat completion. - Only chat completions that have been created + Only Chat Completions that have been created with the `store` parameter set to `true` can be deleted. Args: @@ -1138,16 +1174,16 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -1157,6 +1193,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1164,9 +1201,15 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: - """Creates a model response for the given chat conversation. + """ + **Starting a new project?** We recommend trying + [Responses](https://platform.openai.com/docs/api-reference/responses) to take + advantage of the latest OpenAI platform features. Compare + [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- - Learn more in the + Creates a model response for the given chat conversation. Learn more in the [text generation](https://platform.openai.com/docs/guides/text-generation), [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. @@ -1185,9 +1228,11 @@ async def create( [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). - model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - table for details on which models work with the Chat API. + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. audio: Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`. @@ -1248,8 +1293,8 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate for this request. Most - models are capable of generating text, which is the default: + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: `["text"]` @@ -1274,7 +1319,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1288,16 +1333,9 @@ async def create( in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -1312,23 +1350,29 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarantee. + latency guarentee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. + tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - stop: Up to 4 sequences where the API will stop generating further tokens. + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. store: Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -1365,6 +1409,10 @@ async def create( and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1391,16 +1439,16 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1409,6 +1457,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1416,9 +1465,15 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[ChatCompletionChunk]: - """Creates a model response for the given chat conversation. + """ + **Starting a new project?** We recommend trying + [Responses](https://platform.openai.com/docs/api-reference/responses) to take + advantage of the latest OpenAI platform features. Compare + [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- - Learn more in the + Creates a model response for the given chat conversation. Learn more in the [text generation](https://platform.openai.com/docs/guides/text-generation), [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. @@ -1437,16 +1492,20 @@ async def create( [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). - model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - table for details on which models work with the Chat API. - - stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. audio: Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`. @@ -1507,8 +1566,8 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate for this request. Most - models are capable of generating text, which is the default: + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: `["text"]` @@ -1533,7 +1592,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1547,16 +1606,9 @@ async def create( in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -1571,12 +1623,16 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarantee. + latency guarentee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. + tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - stop: Up to 4 sequences where the API will stop generating further tokens. + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. store: Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) @@ -1617,6 +1673,10 @@ async def create( and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1643,16 +1703,16 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1661,6 +1721,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1668,9 +1729,15 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: - """Creates a model response for the given chat conversation. + """ + **Starting a new project?** We recommend trying + [Responses](https://platform.openai.com/docs/api-reference/responses) to take + advantage of the latest OpenAI platform features. Compare + [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- - Learn more in the + Creates a model response for the given chat conversation. Learn more in the [text generation](https://platform.openai.com/docs/guides/text-generation), [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. @@ -1689,16 +1756,20 @@ async def create( [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). - model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - table for details on which models work with the Chat API. - - stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. audio: Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`. @@ -1759,8 +1830,8 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate for this request. Most - models are capable of generating text, which is the default: + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: `["text"]` @@ -1785,7 +1856,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o1 and o3-mini models only** + reasoning_effort: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -1799,16 +1870,9 @@ async def create( in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -1823,12 +1887,16 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarantee. + latency guarentee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. + tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - stop: Up to 4 sequences where the API will stop generating further tokens. + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. store: Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) @@ -1869,6 +1937,10 @@ async def create( and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1894,16 +1966,16 @@ async def create( max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -1913,6 +1985,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1954,6 +2027,7 @@ async def create( "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParams, ), @@ -1978,7 +2052,7 @@ async def retrieve( ) -> ChatCompletion: """Get a stored chat completion. - Only chat completions that have been created with + Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. Args: @@ -2014,7 +2088,7 @@ async def update( ) -> ChatCompletion: """Modify a stored chat completion. - Only chat completions that have been created + Only Chat Completions that have been created with the `store` parameter set to `true` can be modified. Currently, the only supported modification is to update the `metadata` field. @@ -2060,24 +2134,24 @@ def list( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]: - """List stored chat completions. + """List stored Chat Completions. - Only chat completions that have been stored with + Only Chat Completions that have been stored with the `store` parameter set to `true` will be returned. Args: after: Identifier for the last chat completion from the previous pagination request. - limit: Number of chat completions to retrieve. + limit: Number of Chat Completions to retrieve. metadata: - A list of metadata keys to filter the chat completions by. Example: + A list of metadata keys to filter the Chat Completions by. Example: `metadata[key1]=value1&metadata[key2]=value2` - model: The model used to generate the chat completions. + model: The model used to generate the Chat Completions. - order: Sort order for chat completions by timestamp. Use `asc` for ascending order or + order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. extra_headers: Send extra headers @@ -2123,7 +2197,7 @@ async def delete( ) -> ChatCompletionDeleted: """Delete a stored chat completion. - Only chat completions that have been created + Only Chat Completions that have been created with the `store` parameter set to `true` can be deleted. Args: diff --git a/src/openai/resources/chat/completions/messages.py b/src/openai/resources/chat/completions/messages.py index b71d670927..fac15fba8b 100644 --- a/src/openai/resources/chat/completions/messages.py +++ b/src/openai/resources/chat/completions/messages.py @@ -56,7 +56,7 @@ def list( ) -> SyncCursorPage[ChatCompletionStoreMessage]: """Get the messages in a stored chat completion. - Only chat completions that have + Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. Args: @@ -134,7 +134,7 @@ def list( ) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]: """Get the messages in a stored chat completion. - Only chat completions that have + Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. Args: diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index f76f70e0bc..4bc263511e 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -95,14 +95,10 @@ def create( Args: file: The File object (not file name) to be uploaded. - purpose: The intended purpose of the uploaded file. - - Use "assistants" for - [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - [Message](https://platform.openai.com/docs/api-reference/messages) files, - "vision" for Assistants image file inputs, "batch" for - [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the + Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + Flexible file type for any purpose - `evals`: Used for eval data sets extra_headers: Send extra headers @@ -388,14 +384,10 @@ async def create( Args: file: The File object (not file name) to be uploaded. - purpose: The intended purpose of the uploaded file. - - Use "assistants" for - [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - [Message](https://platform.openai.com/docs/api-reference/messages) files, - "vision" for Assistants image file inputs, "batch" for - [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the + Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + Flexible file type for any purpose - `evals`: Used for eval data sets extra_headers: Send extra headers diff --git a/src/openai/resources/responses/__init__.py b/src/openai/resources/responses/__init__.py new file mode 100644 index 0000000000..ad19218b01 --- /dev/null +++ b/src/openai/resources/responses/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .responses import ( + Responses, + AsyncResponses, + ResponsesWithRawResponse, + AsyncResponsesWithRawResponse, + ResponsesWithStreamingResponse, + AsyncResponsesWithStreamingResponse, +) +from .input_items import ( + InputItems, + AsyncInputItems, + InputItemsWithRawResponse, + AsyncInputItemsWithRawResponse, + InputItemsWithStreamingResponse, + AsyncInputItemsWithStreamingResponse, +) + +__all__ = [ + "InputItems", + "AsyncInputItems", + "InputItemsWithRawResponse", + "AsyncInputItemsWithRawResponse", + "InputItemsWithStreamingResponse", + "AsyncInputItemsWithStreamingResponse", + "Responses", + "AsyncResponses", + "ResponsesWithRawResponse", + "AsyncResponsesWithRawResponse", + "ResponsesWithStreamingResponse", + "AsyncResponsesWithStreamingResponse", +] diff --git a/src/openai/resources/responses/input_items.py b/src/openai/resources/responses/input_items.py new file mode 100644 index 0000000000..10e7d545dc --- /dev/null +++ b/src/openai/resources/responses/input_items.py @@ -0,0 +1,223 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, cast +from typing_extensions import Literal + +import httpx + +from ... import _legacy_response +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...pagination import SyncCursorPage, AsyncCursorPage +from ..._base_client import AsyncPaginator, make_request_options +from ...types.responses import input_item_list_params +from ...types.responses.response_item_list import Data + +__all__ = ["InputItems", "AsyncInputItems"] + + +class InputItems(SyncAPIResource): + @cached_property + def with_raw_response(self) -> InputItemsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return InputItemsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> InputItemsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return InputItemsWithStreamingResponse(self) + + def list( + self, + response_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[Data]: + """ + Returns a list of input items for a given response. + + Args: + after: An item ID to list items after, used in pagination. + + before: An item ID to list items before, used in pagination. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `asc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return self._get_api_list( + f"/responses/{response_id}/input_items", + page=SyncCursorPage[Data], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + input_item_list_params.InputItemListParams, + ), + ), + model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system + ) + + +class AsyncInputItems(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncInputItemsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncInputItemsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncInputItemsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncInputItemsWithStreamingResponse(self) + + def list( + self, + response_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[Data, AsyncCursorPage[Data]]: + """ + Returns a list of input items for a given response. + + Args: + after: An item ID to list items after, used in pagination. + + before: An item ID to list items before, used in pagination. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `asc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return self._get_api_list( + f"/responses/{response_id}/input_items", + page=AsyncCursorPage[Data], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + input_item_list_params.InputItemListParams, + ), + ), + model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system + ) + + +class InputItemsWithRawResponse: + def __init__(self, input_items: InputItems) -> None: + self._input_items = input_items + + self.list = _legacy_response.to_raw_response_wrapper( + input_items.list, + ) + + +class AsyncInputItemsWithRawResponse: + def __init__(self, input_items: AsyncInputItems) -> None: + self._input_items = input_items + + self.list = _legacy_response.async_to_raw_response_wrapper( + input_items.list, + ) + + +class InputItemsWithStreamingResponse: + def __init__(self, input_items: InputItems) -> None: + self._input_items = input_items + + self.list = to_streamed_response_wrapper( + input_items.list, + ) + + +class AsyncInputItemsWithStreamingResponse: + def __init__(self, input_items: AsyncInputItems) -> None: + self._input_items = input_items + + self.list = async_to_streamed_response_wrapper( + input_items.list, + ) diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py new file mode 100644 index 0000000000..843e4972a9 --- /dev/null +++ b/src/openai/resources/responses/responses.py @@ -0,0 +1,1433 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, overload + +import httpx + +from ... import _legacy_response +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import ( + required_args, + maybe_transform, + async_maybe_transform, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from .input_items import ( + InputItems, + AsyncInputItems, + InputItemsWithRawResponse, + AsyncInputItemsWithRawResponse, + InputItemsWithStreamingResponse, + AsyncInputItemsWithStreamingResponse, +) +from ..._streaming import Stream, AsyncStream +from ..._base_client import make_request_options +from ...types.responses import response_create_params, response_retrieve_params +from ...types.shared.chat_model import ChatModel +from ...types.responses.response import Response +from ...types.responses.tool_param import ToolParam +from ...types.shared_params.metadata import Metadata +from ...types.shared_params.reasoning import Reasoning +from ...types.responses.response_includable import ResponseIncludable +from ...types.responses.response_input_param import ResponseInputParam +from ...types.responses.response_stream_event import ResponseStreamEvent +from ...types.responses.response_text_config_param import ResponseTextConfigParam + +__all__ = ["Responses", "AsyncResponses"] + + +class Responses(SyncAPIResource): + @cached_property + def input_items(self) -> InputItems: + return InputItems(self._client) + + @cached_property + def with_raw_response(self) -> ResponsesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ResponsesWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ResponsesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ResponsesWithStreamingResponse(self) + + @overload + def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """Creates a model response. + + Provide + [text](https://platform.openai.com/docs/guides/text) or + [image](https://platform.openai.com/docs/guides/images) inputs to generate + [text](https://platform.openai.com/docs/guides/text) or + [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + the model call your own + [custom code](https://platform.openai.com/docs/guides/function-calling) or use + built-in [tools](https://platform.openai.com/docs/guides/tools) like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + your own data as input for the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + stream: Literal[True], + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Stream[ResponseStreamEvent]: + """Creates a model response. + + Provide + [text](https://platform.openai.com/docs/guides/text) or + [image](https://platform.openai.com/docs/guides/images) inputs to generate + [text](https://platform.openai.com/docs/guides/text) or + [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + the model call your own + [custom code](https://platform.openai.com/docs/guides/function-calling) or use + built-in [tools](https://platform.openai.com/docs/guides/tools) like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + your own data as input for the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + stream: bool, + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response | Stream[ResponseStreamEvent]: + """Creates a model response. + + Provide + [text](https://platform.openai.com/docs/guides/text) or + [image](https://platform.openai.com/docs/guides/images) inputs to generate + [text](https://platform.openai.com/docs/guides/text) or + [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + the model call your own + [custom code](https://platform.openai.com/docs/guides/function-calling) or use + built-in [tools](https://platform.openai.com/docs/guides/tools) like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + your own data as input for the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["input", "model"], ["input", "model", "stream"]) + def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response | Stream[ResponseStreamEvent]: + return self._post( + "/responses", + body=maybe_transform( + { + "input": input, + "model": model, + "include": include, + "instructions": instructions, + "max_output_tokens": max_output_tokens, + "metadata": metadata, + "parallel_tool_calls": parallel_tool_calls, + "previous_response_id": previous_response_id, + "reasoning": reasoning, + "store": store, + "stream": stream, + "temperature": temperature, + "text": text, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation": truncation, + "user": user, + }, + response_create_params.ResponseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Response, + stream=stream or False, + stream_cls=Stream[ResponseStreamEvent], + ) + + def retrieve( + self, + response_id: str, + *, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """ + Retrieves a model response with the given ID. + + Args: + include: Additional fields to include in the response. See the `include` parameter for + Response creation above for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return self._get( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams), + ), + cast_to=Response, + ) + + def delete( + self, + response_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Deletes a model response with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncResponses(AsyncAPIResource): + @cached_property + def input_items(self) -> AsyncInputItems: + return AsyncInputItems(self._client) + + @cached_property + def with_raw_response(self) -> AsyncResponsesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncResponsesWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncResponsesWithStreamingResponse(self) + + @overload + async def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """Creates a model response. + + Provide + [text](https://platform.openai.com/docs/guides/text) or + [image](https://platform.openai.com/docs/guides/images) inputs to generate + [text](https://platform.openai.com/docs/guides/text) or + [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + the model call your own + [custom code](https://platform.openai.com/docs/guides/function-calling) or use + built-in [tools](https://platform.openai.com/docs/guides/tools) like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + your own data as input for the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + stream: Literal[True], + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncStream[ResponseStreamEvent]: + """Creates a model response. + + Provide + [text](https://platform.openai.com/docs/guides/text) or + [image](https://platform.openai.com/docs/guides/images) inputs to generate + [text](https://platform.openai.com/docs/guides/text) or + [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + the model call your own + [custom code](https://platform.openai.com/docs/guides/function-calling) or use + built-in [tools](https://platform.openai.com/docs/guides/tools) like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + your own data as input for the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + stream: bool, + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response | AsyncStream[ResponseStreamEvent]: + """Creates a model response. + + Provide + [text](https://platform.openai.com/docs/guides/text) or + [image](https://platform.openai.com/docs/guides/images) inputs to generate + [text](https://platform.openai.com/docs/guides/text) or + [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + the model call your own + [custom code](https://platform.openai.com/docs/guides/function-calling) or use + built-in [tools](https://platform.openai.com/docs/guides/tools) like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + your own data as input for the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["input", "model"], ["input", "model", "stream"]) + async def create( + self, + *, + input: Union[str, ResponseInputParam], + model: Union[str, ChatModel], + include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response | AsyncStream[ResponseStreamEvent]: + return await self._post( + "/responses", + body=await async_maybe_transform( + { + "input": input, + "model": model, + "include": include, + "instructions": instructions, + "max_output_tokens": max_output_tokens, + "metadata": metadata, + "parallel_tool_calls": parallel_tool_calls, + "previous_response_id": previous_response_id, + "reasoning": reasoning, + "store": store, + "stream": stream, + "temperature": temperature, + "text": text, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation": truncation, + "user": user, + }, + response_create_params.ResponseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Response, + stream=stream or False, + stream_cls=AsyncStream[ResponseStreamEvent], + ) + + async def retrieve( + self, + response_id: str, + *, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """ + Retrieves a model response with the given ID. + + Args: + include: Additional fields to include in the response. See the `include` parameter for + Response creation above for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return await self._get( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"include": include}, response_retrieve_params.ResponseRetrieveParams + ), + ), + cast_to=Response, + ) + + async def delete( + self, + response_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Deletes a model response with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class ResponsesWithRawResponse: + def __init__(self, responses: Responses) -> None: + self._responses = responses + + self.create = _legacy_response.to_raw_response_wrapper( + responses.create, + ) + self.retrieve = _legacy_response.to_raw_response_wrapper( + responses.retrieve, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + responses.delete, + ) + + @cached_property + def input_items(self) -> InputItemsWithRawResponse: + return InputItemsWithRawResponse(self._responses.input_items) + + +class AsyncResponsesWithRawResponse: + def __init__(self, responses: AsyncResponses) -> None: + self._responses = responses + + self.create = _legacy_response.async_to_raw_response_wrapper( + responses.create, + ) + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + responses.retrieve, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + responses.delete, + ) + + @cached_property + def input_items(self) -> AsyncInputItemsWithRawResponse: + return AsyncInputItemsWithRawResponse(self._responses.input_items) + + +class ResponsesWithStreamingResponse: + def __init__(self, responses: Responses) -> None: + self._responses = responses + + self.create = to_streamed_response_wrapper( + responses.create, + ) + self.retrieve = to_streamed_response_wrapper( + responses.retrieve, + ) + self.delete = to_streamed_response_wrapper( + responses.delete, + ) + + @cached_property + def input_items(self) -> InputItemsWithStreamingResponse: + return InputItemsWithStreamingResponse(self._responses.input_items) + + +class AsyncResponsesWithStreamingResponse: + def __init__(self, responses: AsyncResponses) -> None: + self._responses = responses + + self.create = async_to_streamed_response_wrapper( + responses.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + responses.retrieve, + ) + self.delete = async_to_streamed_response_wrapper( + responses.delete, + ) + + @cached_property + def input_items(self) -> AsyncInputItemsWithStreamingResponse: + return AsyncInputItemsWithStreamingResponse(self._responses.input_items) diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index 297ea98c45..c897c47f33 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -82,10 +82,9 @@ def create( contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. - For certain `purpose`s, the correct `mime_type` must be specified. Please refer - to documentation for the supported MIME types for your use case: - - - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) + For certain `purpose` values, the correct `mime_type` must be specified. Please + refer to documentation for the + [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). For guidance on the proper filename extensions for each purpose, please follow the documentation on @@ -276,10 +275,9 @@ async def create( contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. - For certain `purpose`s, the correct `mime_type` must be specified. Please refer - to documentation for the supported MIME types for your use case: - - - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) + For certain `purpose` values, the correct `mime_type` must be specified. Please + refer to documentation for the + [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). For guidance on the proper filename extensions for each purpose, please follow the documentation on diff --git a/src/openai/resources/beta/vector_stores/__init__.py b/src/openai/resources/vector_stores/__init__.py similarity index 100% rename from src/openai/resources/beta/vector_stores/__init__.py rename to src/openai/resources/vector_stores/__init__.py diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/vector_stores/file_batches.py similarity index 91% rename from src/openai/resources/beta/vector_stores/file_batches.py rename to src/openai/resources/vector_stores/file_batches.py index 279e59c135..a400d30a3e 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/vector_stores/file_batches.py @@ -2,27 +2,27 @@ from __future__ import annotations -from typing import List +from typing import Dict, List, Union, Optional from typing_extensions import Literal import httpx -from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import ( +from ... import _legacy_response +from ...types import FileChunkingStrategyParam +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import ( maybe_transform, async_maybe_transform, ) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from ....pagination import SyncCursorPage, AsyncCursorPage -from ....types.beta import FileChunkingStrategyParam -from ...._base_client import AsyncPaginator, make_request_options -from ....types.beta.vector_stores import file_batch_create_params, file_batch_list_files_params -from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam -from ....types.beta.vector_stores.vector_store_file import VectorStoreFile -from ....types.beta.vector_stores.vector_store_file_batch import VectorStoreFileBatch +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...pagination import SyncCursorPage, AsyncCursorPage +from ..._base_client import AsyncPaginator, make_request_options +from ...types.vector_stores import file_batch_create_params, file_batch_list_files_params +from ...types.file_chunking_strategy_param import FileChunkingStrategyParam +from ...types.vector_stores.vector_store_file import VectorStoreFile +from ...types.vector_stores.vector_store_file_batch import VectorStoreFileBatch __all__ = ["FileBatches", "AsyncFileBatches"] @@ -52,6 +52,7 @@ def create( vector_store_id: str, *, file_ids: List[str], + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -68,6 +69,12 @@ def create( the vector store should use. Useful for tools like `file_search` that can access files. + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. @@ -87,6 +94,7 @@ def create( body=maybe_transform( { "file_ids": file_ids, + "attributes": attributes, "chunking_strategy": chunking_strategy, }, file_batch_create_params.FileBatchCreateParams, @@ -273,6 +281,7 @@ async def create( vector_store_id: str, *, file_ids: List[str], + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -289,6 +298,12 @@ async def create( the vector store should use. Useful for tools like `file_search` that can access files. + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. @@ -308,6 +323,7 @@ async def create( body=await async_maybe_transform( { "file_ids": file_ids, + "attributes": attributes, "chunking_strategy": chunking_strategy, }, file_batch_create_params.FileBatchCreateParams, diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/vector_stores/files.py similarity index 67% rename from src/openai/resources/beta/vector_stores/files.py rename to src/openai/resources/vector_stores/files.py index 51545229c4..1435e72fd9 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/vector_stores/files.py @@ -2,26 +2,28 @@ from __future__ import annotations +from typing import Dict, Union, Optional from typing_extensions import Literal import httpx -from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import ( +from ... import _legacy_response +from ...types import FileChunkingStrategyParam +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import ( maybe_transform, async_maybe_transform, ) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from ....pagination import SyncCursorPage, AsyncCursorPage -from ....types.beta import FileChunkingStrategyParam -from ...._base_client import AsyncPaginator, make_request_options -from ....types.beta.vector_stores import file_list_params, file_create_params -from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam -from ....types.beta.vector_stores.vector_store_file import VectorStoreFile -from ....types.beta.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage +from ..._base_client import AsyncPaginator, make_request_options +from ...types.vector_stores import file_list_params, file_create_params, file_update_params +from ...types.file_chunking_strategy_param import FileChunkingStrategyParam +from ...types.vector_stores.vector_store_file import VectorStoreFile +from ...types.vector_stores.file_content_response import FileContentResponse +from ...types.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted __all__ = ["Files", "AsyncFiles"] @@ -51,6 +53,7 @@ def create( vector_store_id: str, *, file_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -69,6 +72,12 @@ def create( vector store should use. Useful for tools like `file_search` that can access files. + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. @@ -88,6 +97,7 @@ def create( body=maybe_transform( { "file_id": file_id, + "attributes": attributes, "chunking_strategy": chunking_strategy, }, file_create_params.FileCreateParams, @@ -135,6 +145,51 @@ def retrieve( cast_to=VectorStoreFile, ) + def update( + self, + file_id: str, + *, + vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFile: + """ + Update attributes on a vector store file. + + Args: + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._post( + f"/vector_stores/{vector_store_id}/files/{file_id}", + body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFile, + ) + def list( self, vector_store_id: str, @@ -247,6 +302,44 @@ def delete( cast_to=VectorStoreFileDeleted, ) + def content( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncPage[FileContentResponse]: + """ + Retrieve the parsed contents of a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._get_api_list( + f"/vector_stores/{vector_store_id}/files/{file_id}/content", + page=SyncPage[FileContentResponse], + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + model=FileContentResponse, + ) + class AsyncFiles(AsyncAPIResource): @cached_property @@ -273,6 +366,7 @@ async def create( vector_store_id: str, *, file_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -291,6 +385,12 @@ async def create( vector store should use. Useful for tools like `file_search` that can access files. + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. @@ -310,6 +410,7 @@ async def create( body=await async_maybe_transform( { "file_id": file_id, + "attributes": attributes, "chunking_strategy": chunking_strategy, }, file_create_params.FileCreateParams, @@ -357,6 +458,51 @@ async def retrieve( cast_to=VectorStoreFile, ) + async def update( + self, + file_id: str, + *, + vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFile: + """ + Update attributes on a vector store file. + + Args: + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._post( + f"/vector_stores/{vector_store_id}/files/{file_id}", + body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFile, + ) + def list( self, vector_store_id: str, @@ -469,6 +615,44 @@ async def delete( cast_to=VectorStoreFileDeleted, ) + def content( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[FileContentResponse, AsyncPage[FileContentResponse]]: + """ + Retrieve the parsed contents of a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._get_api_list( + f"/vector_stores/{vector_store_id}/files/{file_id}/content", + page=AsyncPage[FileContentResponse], + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + model=FileContentResponse, + ) + class FilesWithRawResponse: def __init__(self, files: Files) -> None: @@ -480,12 +664,18 @@ def __init__(self, files: Files) -> None: self.retrieve = _legacy_response.to_raw_response_wrapper( files.retrieve, ) + self.update = _legacy_response.to_raw_response_wrapper( + files.update, + ) self.list = _legacy_response.to_raw_response_wrapper( files.list, ) self.delete = _legacy_response.to_raw_response_wrapper( files.delete, ) + self.content = _legacy_response.to_raw_response_wrapper( + files.content, + ) class AsyncFilesWithRawResponse: @@ -498,12 +688,18 @@ def __init__(self, files: AsyncFiles) -> None: self.retrieve = _legacy_response.async_to_raw_response_wrapper( files.retrieve, ) + self.update = _legacy_response.async_to_raw_response_wrapper( + files.update, + ) self.list = _legacy_response.async_to_raw_response_wrapper( files.list, ) self.delete = _legacy_response.async_to_raw_response_wrapper( files.delete, ) + self.content = _legacy_response.async_to_raw_response_wrapper( + files.content, + ) class FilesWithStreamingResponse: @@ -516,12 +712,18 @@ def __init__(self, files: Files) -> None: self.retrieve = to_streamed_response_wrapper( files.retrieve, ) + self.update = to_streamed_response_wrapper( + files.update, + ) self.list = to_streamed_response_wrapper( files.list, ) self.delete = to_streamed_response_wrapper( files.delete, ) + self.content = to_streamed_response_wrapper( + files.content, + ) class AsyncFilesWithStreamingResponse: @@ -534,9 +736,15 @@ def __init__(self, files: AsyncFiles) -> None: self.retrieve = async_to_streamed_response_wrapper( files.retrieve, ) + self.update = async_to_streamed_response_wrapper( + files.update, + ) self.list = async_to_streamed_response_wrapper( files.list, ) self.delete = async_to_streamed_response_wrapper( files.delete, ) + self.content = async_to_streamed_response_wrapper( + files.content, + ) diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/vector_stores/vector_stores.py similarity index 80% rename from src/openai/resources/beta/vector_stores/vector_stores.py rename to src/openai/resources/vector_stores/vector_stores.py index 1da52fb3c7..aaa6ed2757 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/vector_stores/vector_stores.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing import List, Optional +from typing import List, Union, Optional from typing_extensions import Literal import httpx -from .... import _legacy_response +from ... import _legacy_response from .files import ( Files, AsyncFiles, @@ -16,14 +16,22 @@ FilesWithStreamingResponse, AsyncFilesWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import ( +from ...types import ( + FileChunkingStrategyParam, + vector_store_list_params, + vector_store_create_params, + vector_store_search_params, + vector_store_update_params, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import ( maybe_transform, async_maybe_transform, ) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage from .file_batches import ( FileBatches, AsyncFileBatches, @@ -32,18 +40,12 @@ FileBatchesWithStreamingResponse, AsyncFileBatchesWithStreamingResponse, ) -from ....pagination import SyncCursorPage, AsyncCursorPage -from ....types.beta import ( - FileChunkingStrategyParam, - vector_store_list_params, - vector_store_create_params, - vector_store_update_params, -) -from ...._base_client import AsyncPaginator, make_request_options -from ....types.beta.vector_store import VectorStore -from ....types.shared_params.metadata import Metadata -from ....types.beta.vector_store_deleted import VectorStoreDeleted -from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam +from ..._base_client import AsyncPaginator, make_request_options +from ...types.vector_store import VectorStore +from ...types.vector_store_deleted import VectorStoreDeleted +from ...types.shared_params.metadata import Metadata +from ...types.file_chunking_strategy_param import FileChunkingStrategyParam +from ...types.vector_store_search_response import VectorStoreSearchResponse __all__ = ["VectorStores", "AsyncVectorStores"] @@ -329,6 +331,69 @@ def delete( cast_to=VectorStoreDeleted, ) + def search( + self, + vector_store_id: str, + *, + query: Union[str, List[str]], + filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, + max_num_results: int | NotGiven = NOT_GIVEN, + ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, + rewrite_query: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncPage[VectorStoreSearchResponse]: + """ + Search a vector store for relevant chunks based on a query and file attributes + filter. + + Args: + query: A query string for a search + + filters: A filter to apply based on file attributes. + + max_num_results: The maximum number of results to return. This number should be between 1 and 50 + inclusive. + + ranking_options: Ranking options for search. + + rewrite_query: Whether to rewrite the natural language query for vector search. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._get_api_list( + f"/vector_stores/{vector_store_id}/search", + page=SyncPage[VectorStoreSearchResponse], + body=maybe_transform( + { + "query": query, + "filters": filters, + "max_num_results": max_num_results, + "ranking_options": ranking_options, + "rewrite_query": rewrite_query, + }, + vector_store_search_params.VectorStoreSearchParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + model=VectorStoreSearchResponse, + method="post", + ) + class AsyncVectorStores(AsyncAPIResource): @cached_property @@ -611,6 +676,69 @@ async def delete( cast_to=VectorStoreDeleted, ) + def search( + self, + vector_store_id: str, + *, + query: Union[str, List[str]], + filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, + max_num_results: int | NotGiven = NOT_GIVEN, + ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, + rewrite_query: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[VectorStoreSearchResponse, AsyncPage[VectorStoreSearchResponse]]: + """ + Search a vector store for relevant chunks based on a query and file attributes + filter. + + Args: + query: A query string for a search + + filters: A filter to apply based on file attributes. + + max_num_results: The maximum number of results to return. This number should be between 1 and 50 + inclusive. + + ranking_options: Ranking options for search. + + rewrite_query: Whether to rewrite the natural language query for vector search. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._get_api_list( + f"/vector_stores/{vector_store_id}/search", + page=AsyncPage[VectorStoreSearchResponse], + body=maybe_transform( + { + "query": query, + "filters": filters, + "max_num_results": max_num_results, + "ranking_options": ranking_options, + "rewrite_query": rewrite_query, + }, + vector_store_search_params.VectorStoreSearchParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + model=VectorStoreSearchResponse, + method="post", + ) + class VectorStoresWithRawResponse: def __init__(self, vector_stores: VectorStores) -> None: @@ -631,6 +759,9 @@ def __init__(self, vector_stores: VectorStores) -> None: self.delete = _legacy_response.to_raw_response_wrapper( vector_stores.delete, ) + self.search = _legacy_response.to_raw_response_wrapper( + vector_stores.search, + ) @cached_property def files(self) -> FilesWithRawResponse: @@ -660,6 +791,9 @@ def __init__(self, vector_stores: AsyncVectorStores) -> None: self.delete = _legacy_response.async_to_raw_response_wrapper( vector_stores.delete, ) + self.search = _legacy_response.async_to_raw_response_wrapper( + vector_stores.search, + ) @cached_property def files(self) -> AsyncFilesWithRawResponse: @@ -689,6 +823,9 @@ def __init__(self, vector_stores: VectorStores) -> None: self.delete = to_streamed_response_wrapper( vector_stores.delete, ) + self.search = to_streamed_response_wrapper( + vector_stores.search, + ) @cached_property def files(self) -> FilesWithStreamingResponse: @@ -718,6 +855,9 @@ def __init__(self, vector_stores: AsyncVectorStores) -> None: self.delete = async_to_streamed_response_wrapper( vector_stores.delete, ) + self.search = async_to_streamed_response_wrapper( + vector_stores.search, + ) @cached_property def files(self) -> AsyncFilesWithStreamingResponse: diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index eb71ac6ccc..4c337d41c7 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -8,7 +8,11 @@ from .shared import ( Metadata as Metadata, ChatModel as ChatModel, + Reasoning as Reasoning, ErrorObject as ErrorObject, + CompoundFilter as CompoundFilter, + ReasoningEffort as ReasoningEffort, + ComparisonFilter as ComparisonFilter, FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, ResponseFormatText as ResponseFormatText, @@ -27,6 +31,7 @@ from .file_content import FileContent as FileContent from .file_deleted import FileDeleted as FileDeleted from .file_purpose import FilePurpose as FilePurpose +from .vector_store import VectorStore as VectorStore from .model_deleted import ModelDeleted as ModelDeleted from .embedding_model import EmbeddingModel as EmbeddingModel from .images_response import ImagesResponse as ImagesResponse @@ -40,16 +45,32 @@ from .batch_create_params import BatchCreateParams as BatchCreateParams from .batch_request_counts import BatchRequestCounts as BatchRequestCounts from .upload_create_params import UploadCreateParams as UploadCreateParams +from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted from .audio_response_format import AudioResponseFormat as AudioResponseFormat from .image_generate_params import ImageGenerateParams as ImageGenerateParams +from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy from .upload_complete_params import UploadCompleteParams as UploadCompleteParams from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .moderation_create_params import ModerationCreateParams as ModerationCreateParams +from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse +from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams +from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams +from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam +from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam +from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams +from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam +from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam +from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam +from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject +from .static_file_chunking_strategy_object_param import ( + StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, +) diff --git a/src/openai/types/beta/auto_file_chunking_strategy_param.py b/src/openai/types/auto_file_chunking_strategy_param.py similarity index 100% rename from src/openai/types/beta/auto_file_chunking_strategy_param.py rename to src/openai/types/auto_file_chunking_strategy_param.py diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index b9ea792bfa..5ba3eadf3c 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -4,7 +4,6 @@ from .thread import Thread as Thread from .assistant import Assistant as Assistant -from .vector_store import VectorStore as VectorStore from .function_tool import FunctionTool as FunctionTool from .assistant_tool import AssistantTool as AssistantTool from .thread_deleted import ThreadDeleted as ThreadDeleted @@ -14,35 +13,21 @@ from .assistant_tool_param import AssistantToolParam as AssistantToolParam from .thread_create_params import ThreadCreateParams as ThreadCreateParams from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams -from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted from .assistant_list_params import AssistantListParams as AssistantListParams from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent -from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams -from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams -from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams -from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption -from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams -from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption -from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam -from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject -from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam -from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject from .assistant_response_format_option_param import ( AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam, ) -from .static_file_chunking_strategy_object_param import ( - StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, -) diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index e90aabfd3f..8b3c331850 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -3,12 +3,12 @@ from __future__ import annotations from typing import List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared.chat_model import ChatModel from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata -from .file_chunking_strategy_param import FileChunkingStrategyParam +from ..shared.reasoning_effort import ReasoningEffort from .assistant_response_format_option_param import AssistantResponseFormatOptionParam __all__ = [ @@ -17,6 +17,10 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", + "ToolResourcesFileSearchVectorStoreChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -53,8 +57,8 @@ class AssistantCreateParams(TypedDict, total=False): name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" - reasoning_effort: Optional[Literal["low", "medium", "high"]] - """**o1 and o3-mini models only** + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -127,12 +131,43 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ +class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): + static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ + ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic +] + + class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: FileChunkingStrategyParam + chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. Only applicable if `file_ids` is - non-empty. + If not set, will use the `auto` strategy. """ file_ids: List[str] diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 12a57a4063..d3ec7614fd 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -7,6 +7,7 @@ from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort from .assistant_response_format_option_param import AssistantResponseFormatOptionParam __all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -79,8 +80,8 @@ class AssistantUpdateParams(TypedDict, total=False): name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" - reasoning_effort: Optional[Literal["low", "medium", "high"]] - """**o1 and o3-mini models only** + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index d888fb3eee..065c390f4e 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -10,7 +10,6 @@ from .file_search_tool_param import FileSearchToolParam from ..shared_params.metadata import Metadata from .code_interpreter_tool_param import CodeInterpreterToolParam -from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .threads.message_content_part_param import MessageContentPartParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -26,6 +25,10 @@ "ThreadToolResourcesCodeInterpreter", "ThreadToolResourcesFileSearch", "ThreadToolResourcesFileSearchVectorStore", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategy", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic", + "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", @@ -224,12 +227,44 @@ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): """ +class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): + static: Required[ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ThreadToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ + ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto, + ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic, +] + + class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: FileChunkingStrategyParam + chunking_strategy: ThreadToolResourcesFileSearchVectorStoreChunkingStrategy """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. Only applicable if `file_ids` is - non-empty. + If not set, will use the `auto` strategy. """ file_ids: List[str] diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index 127202753c..ec1ccf19a6 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -7,7 +7,6 @@ from ..shared_params.metadata import Metadata from .code_interpreter_tool_param import CodeInterpreterToolParam -from .file_chunking_strategy_param import FileChunkingStrategyParam from .threads.message_content_part_param import MessageContentPartParam __all__ = [ @@ -20,6 +19,10 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", + "ToolResourcesFileSearchVectorStoreChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -101,12 +104,43 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ +class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): + static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ + ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic +] + + class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: FileChunkingStrategyParam + chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. Only applicable if `file_ids` is - non-empty. + If not set, will use the `auto` strategy. """ file_ids: List[str] diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 098e50a1d9..fc70227862 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -9,6 +9,7 @@ from ..assistant_tool_param import AssistantToolParam from .runs.run_step_include import RunStepInclude from ...shared_params.metadata import Metadata +from ...shared.reasoning_effort import ReasoningEffort from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -106,8 +107,8 @@ class RunCreateParamsBase(TypedDict, total=False): during tool use. """ - reasoning_effort: Optional[Literal["low", "medium", "high"]] - """**o1 and o3-mini models only** + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py index 1e20a52b41..6321417826 100644 --- a/src/openai/types/chat/chat_completion_audio_param.py +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -17,7 +17,6 @@ class ChatCompletionAudioParam(TypedDict, total=False): voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. - Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also - supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices - are less expressive). + Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and + `shimmer`. """ diff --git a/src/openai/types/chat/chat_completion_content_part_param.py b/src/openai/types/chat/chat_completion_content_part_param.py index 682d11f4c7..1293c54312 100644 --- a/src/openai/types/chat/chat_completion_content_part_param.py +++ b/src/openai/types/chat/chat_completion_content_part_param.py @@ -3,14 +3,39 @@ from __future__ import annotations from typing import Union -from typing_extensions import TypeAlias +from typing_extensions import Literal, Required, TypeAlias, TypedDict from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam -__all__ = ["ChatCompletionContentPartParam"] +__all__ = ["ChatCompletionContentPartParam", "File", "FileFile"] + + +class FileFile(TypedDict, total=False): + file_data: str + """ + The base64 encoded file data, used when passing the file to the model as a + string. + """ + + file_id: str + """The ID of an uploaded file to use as input.""" + + file_name: str + """The name of the file, used when passing the file to the model as a string.""" + + +class File(TypedDict, total=False): + file: Required[FileFile] + + type: Required[Literal["file"]] + """The type of the content part. Always `file`.""" + ChatCompletionContentPartParam: TypeAlias = Union[ - ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam + ChatCompletionContentPartTextParam, + ChatCompletionContentPartImageParam, + ChatCompletionContentPartInputAudioParam, + File, ] diff --git a/src/openai/types/chat/chat_completion_message.py b/src/openai/types/chat/chat_completion_message.py index 704fa5d5d1..c659ac3da0 100644 --- a/src/openai/types/chat/chat_completion_message.py +++ b/src/openai/types/chat/chat_completion_message.py @@ -7,7 +7,29 @@ from .chat_completion_audio import ChatCompletionAudio from .chat_completion_message_tool_call import ChatCompletionMessageToolCall -__all__ = ["ChatCompletionMessage", "FunctionCall"] +__all__ = ["ChatCompletionMessage", "Annotation", "AnnotationURLCitation", "FunctionCall"] + + +class AnnotationURLCitation(BaseModel): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + url: str + """The URL of the web resource.""" + + +class Annotation(BaseModel): + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url_citation: AnnotationURLCitation + """A URL citation when using web search.""" class FunctionCall(BaseModel): @@ -33,6 +55,12 @@ class ChatCompletionMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" + annotations: Optional[List[Annotation]] = None + """ + Annotations for the message, when applicable, as when using the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + """ + audio: Optional[ChatCompletionAudio] = None """ If the audio output modality is requested, this object contains data about the diff --git a/src/openai/types/chat/chat_completion_reasoning_effort.py b/src/openai/types/chat/chat_completion_reasoning_effort.py index 85249c53b1..e4785c90bf 100644 --- a/src/openai/types/chat/chat_completion_reasoning_effort.py +++ b/src/openai/types/chat/chat_completion_reasoning_effort.py @@ -1,8 +1,8 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal, TypeAlias + +from ..shared.reasoning_effort import ReasoningEffort __all__ = ["ChatCompletionReasoningEffort"] -ChatCompletionReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] +ChatCompletionReasoningEffort = ReasoningEffort diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 4dd2812aba..05103fba91 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -7,11 +7,10 @@ from ..shared.chat_model import ChatModel from ..shared_params.metadata import Metadata -from .chat_completion_modality import ChatCompletionModality +from ..shared.reasoning_effort import ReasoningEffort from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam -from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort from ..shared_params.function_parameters import FunctionParameters from ..shared_params.response_format_text import ResponseFormatText from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam @@ -26,6 +25,9 @@ "FunctionCall", "Function", "ResponseFormat", + "WebSearchOptions", + "WebSearchOptionsUserLocation", + "WebSearchOptionsUserLocationApproximate", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] @@ -43,11 +45,12 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ model: Required[Union[str, ChatModel]] - """ID of the model to use. + """Model ID used to generate the response, like `gpt-4o` or `o1`. - See the - [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - table for details on which models work with the Chat API. + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. """ audio: Optional[ChatCompletionAudioParam] @@ -133,10 +136,10 @@ class CompletionCreateParamsBase(TypedDict, total=False): a maximum length of 512 characters. """ - modalities: Optional[List[ChatCompletionModality]] + modalities: Optional[List[Literal["text", "audio"]]] """ - Output types that you would like the model to generate for this request. Most - models are capable of generating text, which is the default: + Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: `["text"]` @@ -174,8 +177,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ - reasoning_effort: Optional[ChatCompletionReasoningEffort] - """**o1 and o3-mini models only** + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently @@ -191,16 +194,9 @@ class CompletionCreateParamsBase(TypedDict, total=False): in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. """ seed: Optional[int] @@ -221,14 +217,20 @@ class CompletionCreateParamsBase(TypedDict, total=False): utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarantee. + latency guarentee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. + tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` + utilized. """ - stop: Union[Optional[str], List[str]] - """Up to 4 sequences where the API will stop generating further tokens.""" + stop: Union[Optional[str], List[str], None] + """Up to 4 sequences where the API will stop generating further tokens. + + The returned text will not contain the stop sequence. + """ store: Optional[bool] """ @@ -292,6 +294,13 @@ class CompletionCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ + web_search_options: WebSearchOptions + """ + This tool searches the web for relevant results to use in a response. Learn more + about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + """ + FunctionCall: TypeAlias = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam] @@ -322,30 +331,73 @@ class Function(TypedDict, total=False): """ -ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema] +ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject] + + +class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): + city: str + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: str + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: str + """Free text input for the region of the user, e.g. `California`.""" + + timezone: str + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ + + +class WebSearchOptionsUserLocation(TypedDict, total=False): + approximate: Required[WebSearchOptionsUserLocationApproximate] + """Approximate location parameters for the search.""" + + type: Required[Literal["approximate"]] + """The type of location approximation. Always `approximate`.""" + + +class WebSearchOptions(TypedDict, total=False): + search_context_size: Literal["low", "medium", "high"] + """ + High level guidance for the amount of context window space to use for the + search. One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[WebSearchOptionsUserLocation] + """Approximate location parameters for the search.""" class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] - """If set, partial message deltas will be sent, like in ChatGPT. - - Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + """ + If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. """ class CompletionCreateParamsStreaming(CompletionCreateParamsBase): stream: Required[Literal[True]] - """If set, partial message deltas will be sent, like in ChatGPT. - - Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + """ + If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + for more information, along with the + [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + guide for more information on how to handle the streaming events. """ diff --git a/src/openai/types/chat/completion_list_params.py b/src/openai/types/chat/completion_list_params.py index a8fce900ce..d93da834a3 100644 --- a/src/openai/types/chat/completion_list_params.py +++ b/src/openai/types/chat/completion_list_params.py @@ -15,19 +15,19 @@ class CompletionListParams(TypedDict, total=False): """Identifier for the last chat completion from the previous pagination request.""" limit: int - """Number of chat completions to retrieve.""" + """Number of Chat Completions to retrieve.""" metadata: Optional[Metadata] - """A list of metadata keys to filter the chat completions by. Example: + """A list of metadata keys to filter the Chat Completions by. Example: `metadata[key1]=value1&metadata[key2]=value2` """ model: str - """The model used to generate the chat completions.""" + """The model used to generate the Chat Completions.""" order: Literal["asc", "desc"] - """Sort order for chat completions by timestamp. + """Sort order for Chat Completions by timestamp. Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. """ diff --git a/src/openai/types/beta/file_chunking_strategy.py b/src/openai/types/file_chunking_strategy.py similarity index 93% rename from src/openai/types/beta/file_chunking_strategy.py rename to src/openai/types/file_chunking_strategy.py index 406d69dd0e..ee96bd7884 100644 --- a/src/openai/types/beta/file_chunking_strategy.py +++ b/src/openai/types/file_chunking_strategy.py @@ -3,7 +3,7 @@ from typing import Union from typing_extensions import Annotated, TypeAlias -from ..._utils import PropertyInfo +from .._utils import PropertyInfo from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject diff --git a/src/openai/types/beta/file_chunking_strategy_param.py b/src/openai/types/file_chunking_strategy_param.py similarity index 100% rename from src/openai/types/beta/file_chunking_strategy_param.py rename to src/openai/types/file_chunking_strategy_param.py diff --git a/src/openai/types/file_create_params.py b/src/openai/types/file_create_params.py index ecf7503358..728dfd350f 100644 --- a/src/openai/types/file_create_params.py +++ b/src/openai/types/file_create_params.py @@ -17,10 +17,8 @@ class FileCreateParams(TypedDict, total=False): purpose: Required[FilePurpose] """The intended purpose of the uploaded file. - Use "assistants" for - [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - [Message](https://platform.openai.com/docs/api-reference/messages) files, - "vision" for Assistants image file inputs, "batch" for - [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch + API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision + fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used + for eval data sets """ diff --git a/src/openai/types/file_purpose.py b/src/openai/types/file_purpose.py index 32dc352c62..b2c2d5f9fc 100644 --- a/src/openai/types/file_purpose.py +++ b/src/openai/types/file_purpose.py @@ -4,4 +4,4 @@ __all__ = ["FilePurpose"] -FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision"] +FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"] diff --git a/src/openai/types/beta/other_file_chunking_strategy_object.py b/src/openai/types/other_file_chunking_strategy_object.py similarity index 89% rename from src/openai/types/beta/other_file_chunking_strategy_object.py rename to src/openai/types/other_file_chunking_strategy_object.py index 89da560be4..e4cd61a8fc 100644 --- a/src/openai/types/beta/other_file_chunking_strategy_object.py +++ b/src/openai/types/other_file_chunking_strategy_object.py @@ -2,7 +2,7 @@ from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["OtherFileChunkingStrategyObject"] diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py new file mode 100644 index 0000000000..d0df31ed86 --- /dev/null +++ b/src/openai/types/responses/__init__.py @@ -0,0 +1,130 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .tool import Tool as Tool +from .response import Response as Response +from .tool_param import ToolParam as ToolParam +from .computer_tool import ComputerTool as ComputerTool +from .function_tool import FunctionTool as FunctionTool +from .response_error import ResponseError as ResponseError +from .response_usage import ResponseUsage as ResponseUsage +from .response_status import ResponseStatus as ResponseStatus +from .web_search_tool import WebSearchTool as WebSearchTool +from .file_search_tool import FileSearchTool as FileSearchTool +from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes +from .response_item_list import ResponseItemList as ResponseItemList +from .computer_tool_param import ComputerToolParam as ComputerToolParam +from .function_tool_param import FunctionToolParam as FunctionToolParam +from .response_includable import ResponseIncludable as ResponseIncludable +from .response_input_file import ResponseInputFile as ResponseInputFile +from .response_input_text import ResponseInputText as ResponseInputText +from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions +from .response_error_event import ResponseErrorEvent as ResponseErrorEvent +from .response_input_image import ResponseInputImage as ResponseInputImage +from .response_input_param import ResponseInputParam as ResponseInputParam +from .response_output_item import ResponseOutputItem as ResponseOutputItem +from .response_output_text import ResponseOutputText as ResponseOutputText +from .response_text_config import ResponseTextConfig as ResponseTextConfig +from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction +from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent +from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent +from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam +from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam +from .input_item_list_params import InputItemListParams as InputItemListParams +from .response_create_params import ResponseCreateParams as ResponseCreateParams +from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent +from .response_input_content import ResponseInputContent as ResponseInputContent +from .response_output_message import ResponseOutputMessage as ResponseOutputMessage +from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal +from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam +from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam +from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent +from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams +from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent +from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent +from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent +from .response_input_file_param import ResponseInputFileParam as ResponseInputFileParam +from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam +from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam +from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent +from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent +from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent +from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam +from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam +from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam +from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam +from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall +from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig +from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall +from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent +from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch +from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam +from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent +from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam +from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam +from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall +from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent +from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent +from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent +from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam +from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent +from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam +from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam +from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall +from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent +from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam +from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent +from .response_audio_transcript_delta_event import ( + ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, +) +from .response_format_text_json_schema_config import ( + ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig, +) +from .response_web_search_call_completed_event import ( + ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent, +) +from .response_web_search_call_searching_event import ( + ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent, +) +from .response_file_search_call_completed_event import ( + ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent, +) +from .response_file_search_call_searching_event import ( + ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent, +) +from .response_input_message_content_list_param import ( + ResponseInputMessageContentListParam as ResponseInputMessageContentListParam, +) +from .response_web_search_call_in_progress_event import ( + ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent, +) +from .response_file_search_call_in_progress_event import ( + ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent, +) +from .response_function_call_arguments_done_event import ( + ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, +) +from .response_function_call_arguments_delta_event import ( + ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, +) +from .response_format_text_json_schema_config_param import ( + ResponseFormatTextJSONSchemaConfigParam as ResponseFormatTextJSONSchemaConfigParam, +) +from .response_code_interpreter_call_code_done_event import ( + ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent, +) +from .response_code_interpreter_call_completed_event import ( + ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent, +) +from .response_code_interpreter_call_code_delta_event import ( + ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent, +) +from .response_code_interpreter_call_in_progress_event import ( + ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent, +) +from .response_code_interpreter_call_interpreting_event import ( + ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent, +) diff --git a/src/openai/types/responses/computer_tool.py b/src/openai/types/responses/computer_tool.py new file mode 100644 index 0000000000..f0499cd950 --- /dev/null +++ b/src/openai/types/responses/computer_tool.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ComputerTool"] + + +class ComputerTool(BaseModel): + display_height: float + """The height of the computer display.""" + + display_width: float + """The width of the computer display.""" + + environment: Literal["mac", "windows", "ubuntu", "browser"] + """The type of computer environment to control.""" + + type: Literal["computer-preview"] + """The type of the computer use tool. Always `computer_use_preview`.""" diff --git a/src/openai/types/responses/computer_tool_param.py b/src/openai/types/responses/computer_tool_param.py new file mode 100644 index 0000000000..685b471378 --- /dev/null +++ b/src/openai/types/responses/computer_tool_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ComputerToolParam"] + + +class ComputerToolParam(TypedDict, total=False): + display_height: Required[float] + """The height of the computer display.""" + + display_width: Required[float] + """The width of the computer display.""" + + environment: Required[Literal["mac", "windows", "ubuntu", "browser"]] + """The type of computer environment to control.""" + + type: Required[Literal["computer-preview"]] + """The type of the computer use tool. Always `computer_use_preview`.""" diff --git a/src/openai/types/responses/easy_input_message_param.py b/src/openai/types/responses/easy_input_message_param.py new file mode 100644 index 0000000000..ef2f1c5f37 --- /dev/null +++ b/src/openai/types/responses/easy_input_message_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypedDict + +from .response_input_message_content_list_param import ResponseInputMessageContentListParam + +__all__ = ["EasyInputMessageParam"] + + +class EasyInputMessageParam(TypedDict, total=False): + content: Required[Union[str, ResponseInputMessageContentListParam]] + """ + Text, image, or audio input to the model, used to generate a response. Can also + contain previous assistant responses. + """ + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" diff --git a/src/openai/types/responses/file_search_tool.py b/src/openai/types/responses/file_search_tool.py new file mode 100644 index 0000000000..683fc533fe --- /dev/null +++ b/src/openai/types/responses/file_search_tool.py @@ -0,0 +1,44 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from ..shared.compound_filter import CompoundFilter +from ..shared.comparison_filter import ComparisonFilter + +__all__ = ["FileSearchTool", "Filters", "RankingOptions"] + +Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter] + + +class RankingOptions(BaseModel): + ranker: Optional[Literal["auto", "default-2024-11-15"]] = None + """The ranker to use for the file search.""" + + score_threshold: Optional[float] = None + """ + The score threshold for the file search, a number between 0 and 1. Numbers + closer to 1 will attempt to return only the most relevant results, but may + return fewer results. + """ + + +class FileSearchTool(BaseModel): + type: Literal["file_search"] + """The type of the file search tool. Always `file_search`.""" + + vector_store_ids: List[str] + """The IDs of the vector stores to search.""" + + filters: Optional[Filters] = None + """A filter to apply based on file attributes.""" + + max_num_results: Optional[int] = None + """The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + """ + + ranking_options: Optional[RankingOptions] = None + """Ranking options for search.""" diff --git a/src/openai/types/responses/file_search_tool_param.py b/src/openai/types/responses/file_search_tool_param.py new file mode 100644 index 0000000000..2d6af8536b --- /dev/null +++ b/src/openai/types/responses/file_search_tool_param.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..shared_params.compound_filter import CompoundFilter +from ..shared_params.comparison_filter import ComparisonFilter + +__all__ = ["FileSearchToolParam", "Filters", "RankingOptions"] + +Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter] + + +class RankingOptions(TypedDict, total=False): + ranker: Literal["auto", "default-2024-11-15"] + """The ranker to use for the file search.""" + + score_threshold: float + """ + The score threshold for the file search, a number between 0 and 1. Numbers + closer to 1 will attempt to return only the most relevant results, but may + return fewer results. + """ + + +class FileSearchToolParam(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of the file search tool. Always `file_search`.""" + + vector_store_ids: Required[List[str]] + """The IDs of the vector stores to search.""" + + filters: Filters + """A filter to apply based on file attributes.""" + + max_num_results: int + """The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + """ + + ranking_options: RankingOptions + """Ranking options for search.""" diff --git a/src/openai/types/responses/function_tool.py b/src/openai/types/responses/function_tool.py new file mode 100644 index 0000000000..236a2c7c63 --- /dev/null +++ b/src/openai/types/responses/function_tool.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FunctionTool"] + + +class FunctionTool(BaseModel): + name: str + """The name of the function to call.""" + + parameters: Dict[str, object] + """A JSON schema object describing the parameters of the function.""" + + strict: bool + """Whether to enforce strict parameter validation. Default `true`.""" + + type: Literal["function"] + """The type of the function tool. Always `function`.""" + + description: Optional[str] = None + """A description of the function. + + Used by the model to determine whether or not to call the function. + """ diff --git a/src/openai/types/responses/function_tool_param.py b/src/openai/types/responses/function_tool_param.py new file mode 100644 index 0000000000..774a22e336 --- /dev/null +++ b/src/openai/types/responses/function_tool_param.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["FunctionToolParam"] + + +class FunctionToolParam(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + parameters: Required[Dict[str, object]] + """A JSON schema object describing the parameters of the function.""" + + strict: Required[bool] + """Whether to enforce strict parameter validation. Default `true`.""" + + type: Required[Literal["function"]] + """The type of the function tool. Always `function`.""" + + description: Optional[str] + """A description of the function. + + Used by the model to determine whether or not to call the function. + """ diff --git a/src/openai/types/responses/input_item_list_params.py b/src/openai/types/responses/input_item_list_params.py new file mode 100644 index 0000000000..e0b71f1ac5 --- /dev/null +++ b/src/openai/types/responses/input_item_list_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["InputItemListParams"] + + +class InputItemListParams(TypedDict, total=False): + after: str + """An item ID to list items after, used in pagination.""" + + before: str + """An item ID to list items before, used in pagination.""" + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """The order to return the input items in. Default is `asc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + """ diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py new file mode 100644 index 0000000000..ec1b199f64 --- /dev/null +++ b/src/openai/types/responses/response.py @@ -0,0 +1,188 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from .tool import Tool +from ..._models import BaseModel +from .response_error import ResponseError +from .response_usage import ResponseUsage +from .response_status import ResponseStatus +from ..shared.metadata import Metadata +from ..shared.reasoning import Reasoning +from .tool_choice_types import ToolChoiceTypes +from ..shared.chat_model import ChatModel +from .tool_choice_options import ToolChoiceOptions +from .response_output_item import ResponseOutputItem +from .response_text_config import ResponseTextConfig +from .tool_choice_function import ToolChoiceFunction + +__all__ = ["Response", "IncompleteDetails", "ToolChoice"] + + +class IncompleteDetails(BaseModel): + reason: Optional[Literal["max_output_tokens", "content_filter"]] = None + """The reason why the response is incomplete.""" + + +ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction] + + +class Response(BaseModel): + id: str + """Unique identifier for this Response.""" + + created_at: float + """Unix timestamp (in seconds) of when this Response was created.""" + + error: Optional[ResponseError] = None + """An error object returned when the model fails to generate a Response.""" + + incomplete_details: Optional[IncompleteDetails] = None + """Details about why the response is incomplete.""" + + instructions: Optional[str] = None + """ + Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + """ + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: Union[str, ChatModel] + """Model ID used to generate the response, like `gpt-4o` or `o1`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + + object: Literal["response"] + """The object type of this resource - always set to `response`.""" + + output: List[ResponseOutputItem] + """An array of content items generated by the model. + + - The length and order of items in the `output` array is dependent on the + model's response. + - Rather than accessing the first item in the `output` array and assuming it's + an `assistant` message with the content generated by the model, you might + consider using the `output_text` property where supported in SDKs. + """ + + parallel_tool_calls: bool + """Whether to allow the model to run tool calls in parallel.""" + + temperature: Optional[float] = None + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + tool_choice: ToolChoice + """ + How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + """ + + tools: List[Tool] + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + + top_p: Optional[float] = None + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + max_output_tokens: Optional[int] = None + """ + An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + """ + + previous_response_id: Optional[str] = None + """The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + """ + + reasoning: Optional[Reasoning] = None + """**o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + + status: Optional[ResponseStatus] = None + """The status of the response generation. + + One of `completed`, `failed`, `in_progress`, or `incomplete`. + """ + + text: Optional[ResponseTextConfig] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + truncation: Optional[Literal["auto", "disabled"]] = None + """The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + """ + + usage: Optional[ResponseUsage] = None + """ + Represents token usage details including input tokens, output tokens, a + breakdown of output tokens, and the total tokens used. + """ + + user: Optional[str] = None + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + """ diff --git a/src/openai/types/responses/response_audio_delta_event.py b/src/openai/types/responses/response_audio_delta_event.py new file mode 100644 index 0000000000..f3d77fac52 --- /dev/null +++ b/src/openai/types/responses/response_audio_delta_event.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioDeltaEvent"] + + +class ResponseAudioDeltaEvent(BaseModel): + delta: str + """A chunk of Base64 encoded response audio bytes.""" + + type: Literal["response.audio.delta"] + """The type of the event. Always `response.audio.delta`.""" diff --git a/src/openai/types/responses/response_audio_done_event.py b/src/openai/types/responses/response_audio_done_event.py new file mode 100644 index 0000000000..5654f8e398 --- /dev/null +++ b/src/openai/types/responses/response_audio_done_event.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioDoneEvent"] + + +class ResponseAudioDoneEvent(BaseModel): + type: Literal["response.audio.done"] + """The type of the event. Always `response.audio.done`.""" diff --git a/src/openai/types/responses/response_audio_transcript_delta_event.py b/src/openai/types/responses/response_audio_transcript_delta_event.py new file mode 100644 index 0000000000..69b6660f3f --- /dev/null +++ b/src/openai/types/responses/response_audio_transcript_delta_event.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDeltaEvent"] + + +class ResponseAudioTranscriptDeltaEvent(BaseModel): + delta: str + """The partial transcript of the audio response.""" + + type: Literal["response.audio.transcript.delta"] + """The type of the event. Always `response.audio.transcript.delta`.""" diff --git a/src/openai/types/responses/response_audio_transcript_done_event.py b/src/openai/types/responses/response_audio_transcript_done_event.py new file mode 100644 index 0000000000..1a20319f83 --- /dev/null +++ b/src/openai/types/responses/response_audio_transcript_done_event.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDoneEvent"] + + +class ResponseAudioTranscriptDoneEvent(BaseModel): + type: Literal["response.audio.transcript.done"] + """The type of the event. Always `response.audio.transcript.done`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py new file mode 100644 index 0000000000..7527238d06 --- /dev/null +++ b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCodeInterpreterCallCodeDeltaEvent"] + + +class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel): + delta: str + """The partial code snippet added by the code interpreter.""" + + output_index: int + """The index of the output item that the code interpreter call is in progress.""" + + type: Literal["response.code_interpreter_call.code.delta"] + """The type of the event. Always `response.code_interpreter_call.code.delta`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py new file mode 100644 index 0000000000..f84d4cf3e8 --- /dev/null +++ b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCodeInterpreterCallCodeDoneEvent"] + + +class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel): + code: str + """The final code snippet output by the code interpreter.""" + + output_index: int + """The index of the output item that the code interpreter call is in progress.""" + + type: Literal["response.code_interpreter_call.code.done"] + """The type of the event. Always `response.code_interpreter_call.code.done`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_completed_event.py b/src/openai/types/responses/response_code_interpreter_call_completed_event.py new file mode 100644 index 0000000000..b0cb73fb72 --- /dev/null +++ b/src/openai/types/responses/response_code_interpreter_call_completed_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall + +__all__ = ["ResponseCodeInterpreterCallCompletedEvent"] + + +class ResponseCodeInterpreterCallCompletedEvent(BaseModel): + code_interpreter_call: ResponseCodeInterpreterToolCall + """A tool call to run code.""" + + output_index: int + """The index of the output item that the code interpreter call is in progress.""" + + type: Literal["response.code_interpreter_call.completed"] + """The type of the event. Always `response.code_interpreter_call.completed`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py new file mode 100644 index 0000000000..64b739f308 --- /dev/null +++ b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall + +__all__ = ["ResponseCodeInterpreterCallInProgressEvent"] + + +class ResponseCodeInterpreterCallInProgressEvent(BaseModel): + code_interpreter_call: ResponseCodeInterpreterToolCall + """A tool call to run code.""" + + output_index: int + """The index of the output item that the code interpreter call is in progress.""" + + type: Literal["response.code_interpreter_call.in_progress"] + """The type of the event. Always `response.code_interpreter_call.in_progress`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py new file mode 100644 index 0000000000..3100eac175 --- /dev/null +++ b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall + +__all__ = ["ResponseCodeInterpreterCallInterpretingEvent"] + + +class ResponseCodeInterpreterCallInterpretingEvent(BaseModel): + code_interpreter_call: ResponseCodeInterpreterToolCall + """A tool call to run code.""" + + output_index: int + """The index of the output item that the code interpreter call is in progress.""" + + type: Literal["response.code_interpreter_call.interpreting"] + """The type of the event. Always `response.code_interpreter_call.interpreting`.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py new file mode 100644 index 0000000000..d5a5057074 --- /dev/null +++ b/src/openai/types/responses/response_code_interpreter_tool_call.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["ResponseCodeInterpreterToolCall", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"] + + +class ResultLogs(BaseModel): + logs: str + """The logs of the code interpreter tool call.""" + + type: Literal["logs"] + """The type of the code interpreter text output. Always `logs`.""" + + +class ResultFilesFile(BaseModel): + file_id: str + """The ID of the file.""" + + mime_type: str + """The MIME type of the file.""" + + +class ResultFiles(BaseModel): + files: List[ResultFilesFile] + + type: Literal["files"] + """The type of the code interpreter file output. Always `files`.""" + + +Result: TypeAlias = Annotated[Union[ResultLogs, ResultFiles], PropertyInfo(discriminator="type")] + + +class ResponseCodeInterpreterToolCall(BaseModel): + id: str + """The unique ID of the code interpreter tool call.""" + + code: str + """The code to run.""" + + results: List[Result] + """The results of the code interpreter tool call.""" + + status: Literal["in_progress", "interpreting", "completed"] + """The status of the code interpreter tool call.""" + + type: Literal["code_interpreter_call"] + """The type of the code interpreter tool call. Always `code_interpreter_call`.""" diff --git a/src/openai/types/responses/response_completed_event.py b/src/openai/types/responses/response_completed_event.py new file mode 100644 index 0000000000..a944f248ef --- /dev/null +++ b/src/openai/types/responses/response_completed_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .response import Response +from ..._models import BaseModel + +__all__ = ["ResponseCompletedEvent"] + + +class ResponseCompletedEvent(BaseModel): + response: Response + """Properties of the completed response.""" + + type: Literal["response.completed"] + """The type of the event. Always `response.completed`.""" diff --git a/src/openai/types/responses/response_computer_tool_call.py b/src/openai/types/responses/response_computer_tool_call.py new file mode 100644 index 0000000000..994837567a --- /dev/null +++ b/src/openai/types/responses/response_computer_tool_call.py @@ -0,0 +1,212 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = [ + "ResponseComputerToolCall", + "Action", + "ActionClick", + "ActionDoubleClick", + "ActionDrag", + "ActionDragPath", + "ActionKeypress", + "ActionMove", + "ActionScreenshot", + "ActionScroll", + "ActionType", + "ActionWait", + "PendingSafetyCheck", +] + + +class ActionClick(BaseModel): + button: Literal["left", "right", "wheel", "back", "forward"] + """Indicates which mouse button was pressed during the click. + + One of `left`, `right`, `wheel`, `back`, or `forward`. + """ + + type: Literal["click"] + """Specifies the event type. + + For a click action, this property is always set to `click`. + """ + + x: int + """The x-coordinate where the click occurred.""" + + y: int + """The y-coordinate where the click occurred.""" + + +class ActionDoubleClick(BaseModel): + type: Literal["double_click"] + """Specifies the event type. + + For a double click action, this property is always set to `double_click`. + """ + + x: int + """The x-coordinate where the double click occurred.""" + + y: int + """The y-coordinate where the double click occurred.""" + + +class ActionDragPath(BaseModel): + x: int + """The x-coordinate.""" + + y: int + """The y-coordinate.""" + + +class ActionDrag(BaseModel): + path: List[ActionDragPath] + """An array of coordinates representing the path of the drag action. + + Coordinates will appear as an array of objects, eg + + ``` + [ + { x: 100, y: 200 }, + { x: 200, y: 300 } + ] + ``` + """ + + type: Literal["drag"] + """Specifies the event type. + + For a drag action, this property is always set to `drag`. + """ + + +class ActionKeypress(BaseModel): + keys: List[str] + """The combination of keys the model is requesting to be pressed. + + This is an array of strings, each representing a key. + """ + + type: Literal["keypress"] + """Specifies the event type. + + For a keypress action, this property is always set to `keypress`. + """ + + +class ActionMove(BaseModel): + type: Literal["move"] + """Specifies the event type. + + For a move action, this property is always set to `move`. + """ + + x: int + """The x-coordinate to move to.""" + + y: int + """The y-coordinate to move to.""" + + +class ActionScreenshot(BaseModel): + type: Literal["screenshot"] + """Specifies the event type. + + For a screenshot action, this property is always set to `screenshot`. + """ + + +class ActionScroll(BaseModel): + scroll_x: int + """The horizontal scroll distance.""" + + scroll_y: int + """The vertical scroll distance.""" + + type: Literal["scroll"] + """Specifies the event type. + + For a scroll action, this property is always set to `scroll`. + """ + + x: int + """The x-coordinate where the scroll occurred.""" + + y: int + """The y-coordinate where the scroll occurred.""" + + +class ActionType(BaseModel): + text: str + """The text to type.""" + + type: Literal["type"] + """Specifies the event type. + + For a type action, this property is always set to `type`. + """ + + +class ActionWait(BaseModel): + type: Literal["wait"] + """Specifies the event type. + + For a wait action, this property is always set to `wait`. + """ + + +Action: TypeAlias = Annotated[ + Union[ + ActionClick, + ActionDoubleClick, + ActionDrag, + ActionKeypress, + ActionMove, + ActionScreenshot, + ActionScroll, + ActionType, + ActionWait, + ], + PropertyInfo(discriminator="type"), +] + + +class PendingSafetyCheck(BaseModel): + id: str + """The ID of the pending safety check.""" + + code: str + """The type of the pending safety check.""" + + message: str + """Details about the pending safety check.""" + + +class ResponseComputerToolCall(BaseModel): + id: str + """The unique ID of the computer call.""" + + action: Action + """A click action.""" + + call_id: str + """An identifier used when responding to the tool call with output.""" + + pending_safety_checks: List[PendingSafetyCheck] + """The pending safety checks for the computer call.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Literal["computer_call"] + """The type of the computer call. Always `computer_call`.""" diff --git a/src/openai/types/responses/response_computer_tool_call_param.py b/src/openai/types/responses/response_computer_tool_call_param.py new file mode 100644 index 0000000000..d4ef56ab5c --- /dev/null +++ b/src/openai/types/responses/response_computer_tool_call_param.py @@ -0,0 +1,208 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "ResponseComputerToolCallParam", + "Action", + "ActionClick", + "ActionDoubleClick", + "ActionDrag", + "ActionDragPath", + "ActionKeypress", + "ActionMove", + "ActionScreenshot", + "ActionScroll", + "ActionType", + "ActionWait", + "PendingSafetyCheck", +] + + +class ActionClick(TypedDict, total=False): + button: Required[Literal["left", "right", "wheel", "back", "forward"]] + """Indicates which mouse button was pressed during the click. + + One of `left`, `right`, `wheel`, `back`, or `forward`. + """ + + type: Required[Literal["click"]] + """Specifies the event type. + + For a click action, this property is always set to `click`. + """ + + x: Required[int] + """The x-coordinate where the click occurred.""" + + y: Required[int] + """The y-coordinate where the click occurred.""" + + +class ActionDoubleClick(TypedDict, total=False): + type: Required[Literal["double_click"]] + """Specifies the event type. + + For a double click action, this property is always set to `double_click`. + """ + + x: Required[int] + """The x-coordinate where the double click occurred.""" + + y: Required[int] + """The y-coordinate where the double click occurred.""" + + +class ActionDragPath(TypedDict, total=False): + x: Required[int] + """The x-coordinate.""" + + y: Required[int] + """The y-coordinate.""" + + +class ActionDrag(TypedDict, total=False): + path: Required[Iterable[ActionDragPath]] + """An array of coordinates representing the path of the drag action. + + Coordinates will appear as an array of objects, eg + + ``` + [ + { x: 100, y: 200 }, + { x: 200, y: 300 } + ] + ``` + """ + + type: Required[Literal["drag"]] + """Specifies the event type. + + For a drag action, this property is always set to `drag`. + """ + + +class ActionKeypress(TypedDict, total=False): + keys: Required[List[str]] + """The combination of keys the model is requesting to be pressed. + + This is an array of strings, each representing a key. + """ + + type: Required[Literal["keypress"]] + """Specifies the event type. + + For a keypress action, this property is always set to `keypress`. + """ + + +class ActionMove(TypedDict, total=False): + type: Required[Literal["move"]] + """Specifies the event type. + + For a move action, this property is always set to `move`. + """ + + x: Required[int] + """The x-coordinate to move to.""" + + y: Required[int] + """The y-coordinate to move to.""" + + +class ActionScreenshot(TypedDict, total=False): + type: Required[Literal["screenshot"]] + """Specifies the event type. + + For a screenshot action, this property is always set to `screenshot`. + """ + + +class ActionScroll(TypedDict, total=False): + scroll_x: Required[int] + """The horizontal scroll distance.""" + + scroll_y: Required[int] + """The vertical scroll distance.""" + + type: Required[Literal["scroll"]] + """Specifies the event type. + + For a scroll action, this property is always set to `scroll`. + """ + + x: Required[int] + """The x-coordinate where the scroll occurred.""" + + y: Required[int] + """The y-coordinate where the scroll occurred.""" + + +class ActionType(TypedDict, total=False): + text: Required[str] + """The text to type.""" + + type: Required[Literal["type"]] + """Specifies the event type. + + For a type action, this property is always set to `type`. + """ + + +class ActionWait(TypedDict, total=False): + type: Required[Literal["wait"]] + """Specifies the event type. + + For a wait action, this property is always set to `wait`. + """ + + +Action: TypeAlias = Union[ + ActionClick, + ActionDoubleClick, + ActionDrag, + ActionKeypress, + ActionMove, + ActionScreenshot, + ActionScroll, + ActionType, + ActionWait, +] + + +class PendingSafetyCheck(TypedDict, total=False): + id: Required[str] + """The ID of the pending safety check.""" + + code: Required[str] + """The type of the pending safety check.""" + + message: Required[str] + """Details about the pending safety check.""" + + +class ResponseComputerToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the computer call.""" + + action: Required[Action] + """A click action.""" + + call_id: Required[str] + """An identifier used when responding to the tool call with output.""" + + pending_safety_checks: Required[Iterable[PendingSafetyCheck]] + """The pending safety checks for the computer call.""" + + status: Required[Literal["in_progress", "completed", "incomplete"]] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Required[Literal["computer_call"]] + """The type of the computer call. Always `computer_call`.""" diff --git a/src/openai/types/responses/response_content_part_added_event.py b/src/openai/types/responses/response_content_part_added_event.py new file mode 100644 index 0000000000..93f5ec4b0c --- /dev/null +++ b/src/openai/types/responses/response_content_part_added_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .response_output_text import ResponseOutputText +from .response_output_refusal import ResponseOutputRefusal + +__all__ = ["ResponseContentPartAddedEvent", "Part"] + +Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")] + + +class ResponseContentPartAddedEvent(BaseModel): + content_index: int + """The index of the content part that was added.""" + + item_id: str + """The ID of the output item that the content part was added to.""" + + output_index: int + """The index of the output item that the content part was added to.""" + + part: Part + """The content part that was added.""" + + type: Literal["response.content_part.added"] + """The type of the event. Always `response.content_part.added`.""" diff --git a/src/openai/types/responses/response_content_part_done_event.py b/src/openai/types/responses/response_content_part_done_event.py new file mode 100644 index 0000000000..4ec0739877 --- /dev/null +++ b/src/openai/types/responses/response_content_part_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .response_output_text import ResponseOutputText +from .response_output_refusal import ResponseOutputRefusal + +__all__ = ["ResponseContentPartDoneEvent", "Part"] + +Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")] + + +class ResponseContentPartDoneEvent(BaseModel): + content_index: int + """The index of the content part that is done.""" + + item_id: str + """The ID of the output item that the content part was added to.""" + + output_index: int + """The index of the output item that the content part was added to.""" + + part: Part + """The content part that is done.""" + + type: Literal["response.content_part.done"] + """The type of the event. Always `response.content_part.done`.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py new file mode 100644 index 0000000000..d5b2fdeb1a --- /dev/null +++ b/src/openai/types/responses/response_create_params.py @@ -0,0 +1,204 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .tool_param import ToolParam +from ..shared.chat_model import ChatModel +from .response_includable import ResponseIncludable +from .tool_choice_options import ToolChoiceOptions +from .response_input_param import ResponseInputParam +from ..shared_params.metadata import Metadata +from .tool_choice_types_param import ToolChoiceTypesParam +from ..shared_params.reasoning import Reasoning +from .response_text_config_param import ResponseTextConfigParam +from .tool_choice_function_param import ToolChoiceFunctionParam + +__all__ = [ + "ResponseCreateParamsBase", + "ToolChoice", + "ResponseCreateParamsNonStreaming", + "ResponseCreateParamsStreaming", +] + + +class ResponseCreateParamsBase(TypedDict, total=False): + input: Required[Union[str, ResponseInputParam]] + """Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + """ + + model: Required[Union[str, ChatModel]] + """Model ID used to generate the response, like `gpt-4o` or `o1`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + + include: Optional[List[ResponseIncludable]] + """Specify additional output data to include in the model response. + + Currently supported values are: + + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + """ + + instructions: Optional[str] + """ + Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + """ + + max_output_tokens: Optional[int] + """ + An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + parallel_tool_calls: Optional[bool] + """Whether to allow the model to run tool calls in parallel.""" + + previous_response_id: Optional[str] + """The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + """ + + reasoning: Optional[Reasoning] + """**o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + + store: Optional[bool] + """Whether to store the generated model response for later retrieval via API.""" + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + text: ResponseTextConfigParam + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + tool_choice: ToolChoice + """ + How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + """ + + tools: Iterable[ToolParam] + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + truncation: Optional[Literal["auto", "disabled"]] + """The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + """ + + +ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypesParam, ToolChoiceFunctionParam] + + +class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False): + stream: Optional[Literal[False]] + """ + If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + """ + + +class ResponseCreateParamsStreaming(ResponseCreateParamsBase): + stream: Required[Literal[True]] + """ + If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. + """ + + +ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming] diff --git a/src/openai/types/responses/response_created_event.py b/src/openai/types/responses/response_created_event.py new file mode 100644 index 0000000000..7a524cec87 --- /dev/null +++ b/src/openai/types/responses/response_created_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .response import Response +from ..._models import BaseModel + +__all__ = ["ResponseCreatedEvent"] + + +class ResponseCreatedEvent(BaseModel): + response: Response + """The response that was created.""" + + type: Literal["response.created"] + """The type of the event. Always `response.created`.""" diff --git a/src/openai/types/responses/response_error.py b/src/openai/types/responses/response_error.py new file mode 100644 index 0000000000..90f1fcf5da --- /dev/null +++ b/src/openai/types/responses/response_error.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseError"] + + +class ResponseError(BaseModel): + code: Literal[ + "server_error", + "rate_limit_exceeded", + "invalid_prompt", + "vector_store_timeout", + "invalid_image", + "invalid_image_format", + "invalid_base64_image", + "invalid_image_url", + "image_too_large", + "image_too_small", + "image_parse_error", + "image_content_policy_violation", + "invalid_image_mode", + "image_file_too_large", + "unsupported_image_media_type", + "empty_image_file", + "failed_to_download_image", + "image_file_not_found", + ] + """The error code for the response.""" + + message: str + """A human-readable description of the error.""" diff --git a/src/openai/types/responses/response_error_event.py b/src/openai/types/responses/response_error_event.py new file mode 100644 index 0000000000..1b7e605d02 --- /dev/null +++ b/src/openai/types/responses/response_error_event.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseErrorEvent"] + + +class ResponseErrorEvent(BaseModel): + code: Optional[str] = None + """The error code.""" + + message: str + """The error message.""" + + param: Optional[str] = None + """The error parameter.""" + + type: Literal["error"] + """The type of the event. Always `error`.""" diff --git a/src/openai/types/responses/response_failed_event.py b/src/openai/types/responses/response_failed_event.py new file mode 100644 index 0000000000..3e8f75d8c4 --- /dev/null +++ b/src/openai/types/responses/response_failed_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .response import Response +from ..._models import BaseModel + +__all__ = ["ResponseFailedEvent"] + + +class ResponseFailedEvent(BaseModel): + response: Response + """The response that failed.""" + + type: Literal["response.failed"] + """The type of the event. Always `response.failed`.""" diff --git a/src/openai/types/responses/response_file_search_call_completed_event.py b/src/openai/types/responses/response_file_search_call_completed_event.py new file mode 100644 index 0000000000..4b86083369 --- /dev/null +++ b/src/openai/types/responses/response_file_search_call_completed_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFileSearchCallCompletedEvent"] + + +class ResponseFileSearchCallCompletedEvent(BaseModel): + item_id: str + """The ID of the output item that the file search call is initiated.""" + + output_index: int + """The index of the output item that the file search call is initiated.""" + + type: Literal["response.file_search_call.completed"] + """The type of the event. Always `response.file_search_call.completed`.""" diff --git a/src/openai/types/responses/response_file_search_call_in_progress_event.py b/src/openai/types/responses/response_file_search_call_in_progress_event.py new file mode 100644 index 0000000000..eb42e3dad6 --- /dev/null +++ b/src/openai/types/responses/response_file_search_call_in_progress_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFileSearchCallInProgressEvent"] + + +class ResponseFileSearchCallInProgressEvent(BaseModel): + item_id: str + """The ID of the output item that the file search call is initiated.""" + + output_index: int + """The index of the output item that the file search call is initiated.""" + + type: Literal["response.file_search_call.in_progress"] + """The type of the event. Always `response.file_search_call.in_progress`.""" diff --git a/src/openai/types/responses/response_file_search_call_searching_event.py b/src/openai/types/responses/response_file_search_call_searching_event.py new file mode 100644 index 0000000000..3cd8905de6 --- /dev/null +++ b/src/openai/types/responses/response_file_search_call_searching_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFileSearchCallSearchingEvent"] + + +class ResponseFileSearchCallSearchingEvent(BaseModel): + item_id: str + """The ID of the output item that the file search call is initiated.""" + + output_index: int + """The index of the output item that the file search call is searching.""" + + type: Literal["response.file_search_call.searching"] + """The type of the event. Always `response.file_search_call.searching`.""" diff --git a/src/openai/types/responses/response_file_search_tool_call.py b/src/openai/types/responses/response_file_search_tool_call.py new file mode 100644 index 0000000000..ef1c6a5608 --- /dev/null +++ b/src/openai/types/responses/response_file_search_tool_call.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFileSearchToolCall", "Result"] + + +class Result(BaseModel): + attributes: Optional[Dict[str, Union[str, float, bool]]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + file_id: Optional[str] = None + """The unique ID of the file.""" + + filename: Optional[str] = None + """The name of the file.""" + + score: Optional[float] = None + """The relevance score of the file - a value between 0 and 1.""" + + text: Optional[str] = None + """The text that was retrieved from the file.""" + + +class ResponseFileSearchToolCall(BaseModel): + id: str + """The unique ID of the file search tool call.""" + + queries: List[str] + """The queries used to search for files.""" + + status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] + """The status of the file search tool call. + + One of `in_progress`, `searching`, `incomplete` or `failed`, + """ + + type: Literal["file_search_call"] + """The type of the file search tool call. Always `file_search_call`.""" + + results: Optional[List[Result]] = None + """The results of the file search tool call.""" diff --git a/src/openai/types/responses/response_file_search_tool_call_param.py b/src/openai/types/responses/response_file_search_tool_call_param.py new file mode 100644 index 0000000000..9a4177cf81 --- /dev/null +++ b/src/openai/types/responses/response_file_search_tool_call_param.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFileSearchToolCallParam", "Result"] + + +class Result(TypedDict, total=False): + attributes: Optional[Dict[str, Union[str, float, bool]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + file_id: str + """The unique ID of the file.""" + + filename: str + """The name of the file.""" + + score: float + """The relevance score of the file - a value between 0 and 1.""" + + text: str + """The text that was retrieved from the file.""" + + +class ResponseFileSearchToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the file search tool call.""" + + queries: Required[List[str]] + """The queries used to search for files.""" + + status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]] + """The status of the file search tool call. + + One of `in_progress`, `searching`, `incomplete` or `failed`, + """ + + type: Required[Literal["file_search_call"]] + """The type of the file search tool call. Always `file_search_call`.""" + + results: Optional[Iterable[Result]] + """The results of the file search tool call.""" diff --git a/src/openai/types/responses/response_format_text_config.py b/src/openai/types/responses/response_format_text_config.py new file mode 100644 index 0000000000..a4896bf9fe --- /dev/null +++ b/src/openai/types/responses/response_format_text_config.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..shared.response_format_text import ResponseFormatText +from ..shared.response_format_json_object import ResponseFormatJSONObject +from .response_format_text_json_schema_config import ResponseFormatTextJSONSchemaConfig + +__all__ = ["ResponseFormatTextConfig"] + +ResponseFormatTextConfig: TypeAlias = Annotated[ + Union[ResponseFormatText, ResponseFormatTextJSONSchemaConfig, ResponseFormatJSONObject], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/responses/response_format_text_config_param.py b/src/openai/types/responses/response_format_text_config_param.py new file mode 100644 index 0000000000..fcaf8f3fb6 --- /dev/null +++ b/src/openai/types/responses/response_format_text_config_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from ..shared_params.response_format_text import ResponseFormatText +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from .response_format_text_json_schema_config_param import ResponseFormatTextJSONSchemaConfigParam + +__all__ = ["ResponseFormatTextConfigParam"] + +ResponseFormatTextConfigParam: TypeAlias = Union[ + ResponseFormatText, ResponseFormatTextJSONSchemaConfigParam, ResponseFormatJSONObject +] diff --git a/src/openai/types/responses/response_format_text_json_schema_config.py b/src/openai/types/responses/response_format_text_json_schema_config.py new file mode 100644 index 0000000000..3cf066370f --- /dev/null +++ b/src/openai/types/responses/response_format_text_json_schema_config.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["ResponseFormatTextJSONSchemaConfig"] + + +class ResponseFormatTextJSONSchemaConfig(BaseModel): + schema_: Dict[str, object] = FieldInfo(alias="schema") + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + + type: Literal["json_schema"] + """The type of response format being defined. Always `json_schema`.""" + + description: Optional[str] = None + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + name: Optional[str] = None + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + strict: Optional[bool] = None + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + """ diff --git a/src/openai/types/responses/response_format_text_json_schema_config_param.py b/src/openai/types/responses/response_format_text_json_schema_config_param.py new file mode 100644 index 0000000000..211c5d1eff --- /dev/null +++ b/src/openai/types/responses/response_format_text_json_schema_config_param.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatTextJSONSchemaConfigParam"] + + +class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False): + schema: Required[Dict[str, object]] + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + + type: Required[Literal["json_schema"]] + """The type of response format being defined. Always `json_schema`.""" + + description: str + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + name: str + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + strict: Optional[bool] + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + """ diff --git a/src/openai/types/responses/response_function_call_arguments_delta_event.py b/src/openai/types/responses/response_function_call_arguments_delta_event.py new file mode 100644 index 0000000000..0989b7caeb --- /dev/null +++ b/src/openai/types/responses/response_function_call_arguments_delta_event.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"] + + +class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + delta: str + """The function-call arguments delta that is added.""" + + item_id: str + """The ID of the output item that the function-call arguments delta is added to.""" + + output_index: int + """ + The index of the output item that the function-call arguments delta is added to. + """ + + type: Literal["response.function_call_arguments.delta"] + """The type of the event. Always `response.function_call_arguments.delta`.""" diff --git a/src/openai/types/responses/response_function_call_arguments_done_event.py b/src/openai/types/responses/response_function_call_arguments_done_event.py new file mode 100644 index 0000000000..1d805a57c6 --- /dev/null +++ b/src/openai/types/responses/response_function_call_arguments_done_event.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDoneEvent"] + + +class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + arguments: str + """The function-call arguments.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item.""" + + type: Literal["response.function_call_arguments.done"] diff --git a/src/openai/types/responses/response_function_tool_call.py b/src/openai/types/responses/response_function_tool_call.py new file mode 100644 index 0000000000..5d82906cb7 --- /dev/null +++ b/src/openai/types/responses/response_function_tool_call.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionToolCall"] + + +class ResponseFunctionToolCall(BaseModel): + id: str + """The unique ID of the function tool call.""" + + arguments: str + """A JSON string of the arguments to pass to the function.""" + + call_id: str + """The unique ID of the function tool call generated by the model.""" + + name: str + """The name of the function to run.""" + + type: Literal["function_call"] + """The type of the function tool call. Always `function_call`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/openai/types/responses/response_function_tool_call_param.py b/src/openai/types/responses/response_function_tool_call_param.py new file mode 100644 index 0000000000..51b947a764 --- /dev/null +++ b/src/openai/types/responses/response_function_tool_call_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFunctionToolCallParam"] + + +class ResponseFunctionToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the function tool call.""" + + arguments: Required[str] + """A JSON string of the arguments to pass to the function.""" + + call_id: Required[str] + """The unique ID of the function tool call generated by the model.""" + + name: Required[str] + """The name of the function to run.""" + + type: Required[Literal["function_call"]] + """The type of the function tool call. Always `function_call`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/openai/types/responses/response_function_web_search.py b/src/openai/types/responses/response_function_web_search.py new file mode 100644 index 0000000000..44734b681f --- /dev/null +++ b/src/openai/types/responses/response_function_web_search.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionWebSearch"] + + +class ResponseFunctionWebSearch(BaseModel): + id: str + """The unique ID of the web search tool call.""" + + status: Literal["in_progress", "searching", "completed", "failed"] + """The status of the web search tool call.""" + + type: Literal["web_search_call"] + """The type of the web search tool call. Always `web_search_call`.""" diff --git a/src/openai/types/responses/response_function_web_search_param.py b/src/openai/types/responses/response_function_web_search_param.py new file mode 100644 index 0000000000..d413e60b12 --- /dev/null +++ b/src/openai/types/responses/response_function_web_search_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFunctionWebSearchParam"] + + +class ResponseFunctionWebSearchParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the web search tool call.""" + + status: Required[Literal["in_progress", "searching", "completed", "failed"]] + """The status of the web search tool call.""" + + type: Required[Literal["web_search_call"]] + """The type of the web search tool call. Always `web_search_call`.""" diff --git a/src/openai/types/responses/response_in_progress_event.py b/src/openai/types/responses/response_in_progress_event.py new file mode 100644 index 0000000000..7d96cbb8ad --- /dev/null +++ b/src/openai/types/responses/response_in_progress_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .response import Response +from ..._models import BaseModel + +__all__ = ["ResponseInProgressEvent"] + + +class ResponseInProgressEvent(BaseModel): + response: Response + """The response that is in progress.""" + + type: Literal["response.in_progress"] + """The type of the event. Always `response.in_progress`.""" diff --git a/src/openai/types/responses/response_includable.py b/src/openai/types/responses/response_includable.py new file mode 100644 index 0000000000..83489fa7f1 --- /dev/null +++ b/src/openai/types/responses/response_includable.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ResponseIncludable"] + +ResponseIncludable: TypeAlias = Literal[ + "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url" +] diff --git a/src/openai/types/responses/response_incomplete_event.py b/src/openai/types/responses/response_incomplete_event.py new file mode 100644 index 0000000000..742b789c7e --- /dev/null +++ b/src/openai/types/responses/response_incomplete_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .response import Response +from ..._models import BaseModel + +__all__ = ["ResponseIncompleteEvent"] + + +class ResponseIncompleteEvent(BaseModel): + response: Response + """The response that was incomplete.""" + + type: Literal["response.incomplete"] + """The type of the event. Always `response.incomplete`.""" diff --git a/src/openai/types/responses/response_input_content.py b/src/openai/types/responses/response_input_content.py new file mode 100644 index 0000000000..1726909a17 --- /dev/null +++ b/src/openai/types/responses/response_input_content.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .response_input_file import ResponseInputFile +from .response_input_text import ResponseInputText +from .response_input_image import ResponseInputImage + +__all__ = ["ResponseInputContent"] + +ResponseInputContent: TypeAlias = Annotated[ + Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type") +] diff --git a/src/openai/types/responses/response_input_content_param.py b/src/openai/types/responses/response_input_content_param.py new file mode 100644 index 0000000000..7791cdfd8e --- /dev/null +++ b/src/openai/types/responses/response_input_content_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .response_input_file_param import ResponseInputFileParam +from .response_input_text_param import ResponseInputTextParam +from .response_input_image_param import ResponseInputImageParam + +__all__ = ["ResponseInputContentParam"] + +ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam] diff --git a/src/openai/types/responses/response_input_file.py b/src/openai/types/responses/response_input_file.py new file mode 100644 index 0000000000..00b35dc844 --- /dev/null +++ b/src/openai/types/responses/response_input_file.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseInputFile"] + + +class ResponseInputFile(BaseModel): + type: Literal["input_file"] + """The type of the input item. Always `input_file`.""" + + file_data: Optional[str] = None + """The content of the file to be sent to the model.""" + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + filename: Optional[str] = None + """The name of the file to be sent to the model.""" diff --git a/src/openai/types/responses/response_input_file_param.py b/src/openai/types/responses/response_input_file_param.py new file mode 100644 index 0000000000..dc06a4ea2d --- /dev/null +++ b/src/openai/types/responses/response_input_file_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseInputFileParam"] + + +class ResponseInputFileParam(TypedDict, total=False): + type: Required[Literal["input_file"]] + """The type of the input item. Always `input_file`.""" + + file_data: str + """The content of the file to be sent to the model.""" + + file_id: str + """The ID of the file to be sent to the model.""" + + filename: str + """The name of the file to be sent to the model.""" diff --git a/src/openai/types/responses/response_input_image.py b/src/openai/types/responses/response_input_image.py new file mode 100644 index 0000000000..d719f44e9b --- /dev/null +++ b/src/openai/types/responses/response_input_image.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseInputImage"] + + +class ResponseInputImage(BaseModel): + detail: Literal["high", "low", "auto"] + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + type: Literal["input_image"] + """The type of the input item. Always `input_image`.""" + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] = None + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ diff --git a/src/openai/types/responses/response_input_image_param.py b/src/openai/types/responses/response_input_image_param.py new file mode 100644 index 0000000000..5dd4db2b5d --- /dev/null +++ b/src/openai/types/responses/response_input_image_param.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseInputImageParam"] + + +class ResponseInputImageParam(TypedDict, total=False): + detail: Required[Literal["high", "low", "auto"]] + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + type: Required[Literal["input_image"]] + """The type of the input item. Always `input_image`.""" + + file_id: Optional[str] + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py new file mode 100644 index 0000000000..c9daaa6a89 --- /dev/null +++ b/src/openai/types/responses/response_input_item_param.py @@ -0,0 +1,174 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .easy_input_message_param import EasyInputMessageParam +from .response_output_message_param import ResponseOutputMessageParam +from .response_computer_tool_call_param import ResponseComputerToolCallParam +from .response_function_tool_call_param import ResponseFunctionToolCallParam +from .response_function_web_search_param import ResponseFunctionWebSearchParam +from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_input_message_content_list_param import ResponseInputMessageContentListParam + +__all__ = [ + "ResponseInputItemParam", + "Message", + "ComputerCallOutput", + "ComputerCallOutputOutput", + "ComputerCallOutputAcknowledgedSafetyCheck", + "FunctionCallOutput", + "Reasoning", + "ReasoningContent", + "ItemReference", +] + + +class Message(TypedDict, total=False): + content: Required[ResponseInputMessageContentListParam] + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Required[Literal["user", "system", "developer"]] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Literal["message"] + """The type of the message input. Always set to `message`.""" + + +class ComputerCallOutputOutput(TypedDict, total=False): + type: Required[Literal["computer_screenshot"]] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ + + file_id: str + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: str + """The URL of the screenshot image.""" + + +class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): + id: Required[str] + """The ID of the pending safety check.""" + + code: Required[str] + """The type of the pending safety check.""" + + message: Required[str] + """Details about the pending safety check.""" + + +class ComputerCallOutput(TypedDict, total=False): + call_id: Required[str] + """The ID of the computer tool call that produced the output.""" + + output: Required[ComputerCallOutputOutput] + """A computer screenshot image used with the computer use tool.""" + + type: Required[Literal["computer_call_output"]] + """The type of the computer tool call output. Always `computer_call_output`.""" + + id: str + """The ID of the computer tool call output.""" + + acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck] + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + +class FunctionCallOutput(TypedDict, total=False): + call_id: Required[str] + """The unique ID of the function tool call generated by the model.""" + + output: Required[str] + """A JSON string of the output of the function tool call.""" + + type: Required[Literal["function_call_output"]] + """The type of the function tool call output. Always `function_call_output`.""" + + id: str + """The unique ID of the function tool call output. + + Populated when this item is returned via API. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +class ReasoningContent(TypedDict, total=False): + text: Required[str] + """ + A short summary of the reasoning used by the model when generating the response. + """ + + type: Required[Literal["reasoning_summary"]] + """The type of the object. Always `text`.""" + + +class Reasoning(TypedDict, total=False): + id: Required[str] + """The unique identifier of the reasoning content.""" + + content: Required[Iterable[ReasoningContent]] + """Reasoning text contents.""" + + type: Required[Literal["reasoning"]] + """The type of the object. Always `reasoning`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +class ItemReference(TypedDict, total=False): + id: Required[str] + """The ID of the item to reference.""" + + type: Required[Literal["item_reference"]] + """The type of item to reference. Always `item_reference`.""" + + +ResponseInputItemParam: TypeAlias = Union[ + EasyInputMessageParam, + Message, + ResponseOutputMessageParam, + ResponseFileSearchToolCallParam, + ResponseComputerToolCallParam, + ComputerCallOutput, + ResponseFunctionWebSearchParam, + ResponseFunctionToolCallParam, + FunctionCallOutput, + Reasoning, + ItemReference, +] diff --git a/src/openai/types/responses/response_input_message_content_list.py b/src/openai/types/responses/response_input_message_content_list.py new file mode 100644 index 0000000000..99b7c10f12 --- /dev/null +++ b/src/openai/types/responses/response_input_message_content_list.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from .response_input_content import ResponseInputContent + +__all__ = ["ResponseInputMessageContentList"] + +ResponseInputMessageContentList: TypeAlias = List[ResponseInputContent] diff --git a/src/openai/types/responses/response_input_message_content_list_param.py b/src/openai/types/responses/response_input_message_content_list_param.py new file mode 100644 index 0000000000..080613df0d --- /dev/null +++ b/src/openai/types/responses/response_input_message_content_list_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import TypeAlias + +from .response_input_file_param import ResponseInputFileParam +from .response_input_text_param import ResponseInputTextParam +from .response_input_image_param import ResponseInputImageParam + +__all__ = ["ResponseInputMessageContentListParam", "ResponseInputContentParam"] + +ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam] + +ResponseInputMessageContentListParam: TypeAlias = List[ResponseInputContentParam] diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py new file mode 100644 index 0000000000..c81308500d --- /dev/null +++ b/src/openai/types/responses/response_input_param.py @@ -0,0 +1,177 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .easy_input_message_param import EasyInputMessageParam +from .response_output_message_param import ResponseOutputMessageParam +from .response_computer_tool_call_param import ResponseComputerToolCallParam +from .response_function_tool_call_param import ResponseFunctionToolCallParam +from .response_function_web_search_param import ResponseFunctionWebSearchParam +from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_input_message_content_list_param import ResponseInputMessageContentListParam + +__all__ = [ + "ResponseInputParam", + "ResponseInputItemParam", + "Message", + "ComputerCallOutput", + "ComputerCallOutputOutput", + "ComputerCallOutputAcknowledgedSafetyCheck", + "FunctionCallOutput", + "Reasoning", + "ReasoningContent", + "ItemReference", +] + + +class Message(TypedDict, total=False): + content: Required[ResponseInputMessageContentListParam] + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Required[Literal["user", "system", "developer"]] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Literal["message"] + """The type of the message input. Always set to `message`.""" + + +class ComputerCallOutputOutput(TypedDict, total=False): + type: Required[Literal["computer_screenshot"]] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ + + file_id: str + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: str + """The URL of the screenshot image.""" + + +class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): + id: Required[str] + """The ID of the pending safety check.""" + + code: Required[str] + """The type of the pending safety check.""" + + message: Required[str] + """Details about the pending safety check.""" + + +class ComputerCallOutput(TypedDict, total=False): + call_id: Required[str] + """The ID of the computer tool call that produced the output.""" + + output: Required[ComputerCallOutputOutput] + """A computer screenshot image used with the computer use tool.""" + + type: Required[Literal["computer_call_output"]] + """The type of the computer tool call output. Always `computer_call_output`.""" + + id: str + """The ID of the computer tool call output.""" + + acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck] + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + +class FunctionCallOutput(TypedDict, total=False): + call_id: Required[str] + """The unique ID of the function tool call generated by the model.""" + + output: Required[str] + """A JSON string of the output of the function tool call.""" + + type: Required[Literal["function_call_output"]] + """The type of the function tool call output. Always `function_call_output`.""" + + id: str + """The unique ID of the function tool call output. + + Populated when this item is returned via API. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +class ReasoningContent(TypedDict, total=False): + text: Required[str] + """ + A short summary of the reasoning used by the model when generating the response. + """ + + type: Required[Literal["reasoning_summary"]] + """The type of the object. Always `text`.""" + + +class Reasoning(TypedDict, total=False): + id: Required[str] + """The unique identifier of the reasoning content.""" + + content: Required[Iterable[ReasoningContent]] + """Reasoning text contents.""" + + type: Required[Literal["reasoning"]] + """The type of the object. Always `reasoning`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +class ItemReference(TypedDict, total=False): + id: Required[str] + """The ID of the item to reference.""" + + type: Required[Literal["item_reference"]] + """The type of item to reference. Always `item_reference`.""" + + +ResponseInputItemParam: TypeAlias = Union[ + EasyInputMessageParam, + Message, + ResponseOutputMessageParam, + ResponseFileSearchToolCallParam, + ResponseComputerToolCallParam, + ComputerCallOutput, + ResponseFunctionWebSearchParam, + ResponseFunctionToolCallParam, + FunctionCallOutput, + Reasoning, + ItemReference, +] + +ResponseInputParam: TypeAlias = List[ResponseInputItemParam] diff --git a/src/openai/types/responses/response_input_text.py b/src/openai/types/responses/response_input_text.py new file mode 100644 index 0000000000..ba8d1ea18b --- /dev/null +++ b/src/openai/types/responses/response_input_text.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseInputText"] + + +class ResponseInputText(BaseModel): + text: str + """The text input to the model.""" + + type: Literal["input_text"] + """The type of the input item. Always `input_text`.""" diff --git a/src/openai/types/responses/response_input_text_param.py b/src/openai/types/responses/response_input_text_param.py new file mode 100644 index 0000000000..f2ba834082 --- /dev/null +++ b/src/openai/types/responses/response_input_text_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseInputTextParam"] + + +class ResponseInputTextParam(TypedDict, total=False): + text: Required[str] + """The text input to the model.""" + + type: Required[Literal["input_text"]] + """The type of the input item. Always `input_text`.""" diff --git a/src/openai/types/responses/response_item_list.py b/src/openai/types/responses/response_item_list.py new file mode 100644 index 0000000000..7c3e4d7f82 --- /dev/null +++ b/src/openai/types/responses/response_item_list.py @@ -0,0 +1,152 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .response_output_message import ResponseOutputMessage +from .response_computer_tool_call import ResponseComputerToolCall +from .response_function_tool_call import ResponseFunctionToolCall +from .response_function_web_search import ResponseFunctionWebSearch +from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_input_message_content_list import ResponseInputMessageContentList + +__all__ = [ + "ResponseItemList", + "Data", + "DataMessage", + "DataComputerCallOutput", + "DataComputerCallOutputOutput", + "DataComputerCallOutputAcknowledgedSafetyCheck", + "DataFunctionCallOutput", +] + + +class DataMessage(BaseModel): + id: str + """The unique ID of the message input.""" + + content: ResponseInputMessageContentList + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Literal["user", "system", "developer"] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always set to `message`.""" + + +class DataComputerCallOutputOutput(BaseModel): + type: Literal["computer_screenshot"] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ + + file_id: Optional[str] = None + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: Optional[str] = None + """The URL of the screenshot image.""" + + +class DataComputerCallOutputAcknowledgedSafetyCheck(BaseModel): + id: str + """The ID of the pending safety check.""" + + code: str + """The type of the pending safety check.""" + + message: str + """Details about the pending safety check.""" + + +class DataComputerCallOutput(BaseModel): + id: str + """The unique ID of the computer call tool output.""" + + call_id: str + """The ID of the computer tool call that produced the output.""" + + output: DataComputerCallOutputOutput + """A computer screenshot image used with the computer use tool.""" + + type: Literal["computer_call_output"] + """The type of the computer tool call output. Always `computer_call_output`.""" + + acknowledged_safety_checks: Optional[List[DataComputerCallOutputAcknowledgedSafetyCheck]] = None + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + +class DataFunctionCallOutput(BaseModel): + id: str + """The unique ID of the function call tool output.""" + + call_id: str + """The unique ID of the function tool call generated by the model.""" + + output: str + """A JSON string of the output of the function tool call.""" + + type: Literal["function_call_output"] + """The type of the function tool call output. Always `function_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +Data: TypeAlias = Annotated[ + Union[ + DataMessage, + ResponseOutputMessage, + ResponseFileSearchToolCall, + ResponseComputerToolCall, + DataComputerCallOutput, + ResponseFunctionWebSearch, + ResponseFunctionToolCall, + DataFunctionCallOutput, + ], + PropertyInfo(discriminator="type"), +] + + +class ResponseItemList(BaseModel): + data: List[Data] + """A list of items used to generate this response.""" + + first_id: str + """The ID of the first item in the list.""" + + has_more: bool + """Whether there are more items available.""" + + last_id: str + """The ID of the last item in the list.""" + + object: Literal["list"] + """The type of object returned, must be `list`.""" diff --git a/src/openai/types/responses/response_output_item.py b/src/openai/types/responses/response_output_item.py new file mode 100644 index 0000000000..45d5cc0094 --- /dev/null +++ b/src/openai/types/responses/response_output_item.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .response_output_message import ResponseOutputMessage +from .response_computer_tool_call import ResponseComputerToolCall +from .response_function_tool_call import ResponseFunctionToolCall +from .response_function_web_search import ResponseFunctionWebSearch +from .response_file_search_tool_call import ResponseFileSearchToolCall + +__all__ = ["ResponseOutputItem", "Reasoning", "ReasoningContent"] + + +class ReasoningContent(BaseModel): + text: str + """ + A short summary of the reasoning used by the model when generating the response. + """ + + type: Literal["reasoning_summary"] + """The type of the object. Always `text`.""" + + +class Reasoning(BaseModel): + id: str + """The unique identifier of the reasoning content.""" + + content: List[ReasoningContent] + """Reasoning text contents.""" + + type: Literal["reasoning"] + """The type of the object. Always `reasoning`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +ResponseOutputItem: TypeAlias = Annotated[ + Union[ + ResponseOutputMessage, + ResponseFileSearchToolCall, + ResponseFunctionToolCall, + ResponseFunctionWebSearch, + ResponseComputerToolCall, + Reasoning, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/responses/response_output_item_added_event.py b/src/openai/types/responses/response_output_item_added_event.py new file mode 100644 index 0000000000..7344fb9a6c --- /dev/null +++ b/src/openai/types/responses/response_output_item_added_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_output_item import ResponseOutputItem + +__all__ = ["ResponseOutputItemAddedEvent"] + + +class ResponseOutputItemAddedEvent(BaseModel): + item: ResponseOutputItem + """The output item that was added.""" + + output_index: int + """The index of the output item that was added.""" + + type: Literal["response.output_item.added"] + """The type of the event. Always `response.output_item.added`.""" diff --git a/src/openai/types/responses/response_output_item_done_event.py b/src/openai/types/responses/response_output_item_done_event.py new file mode 100644 index 0000000000..a0a871a019 --- /dev/null +++ b/src/openai/types/responses/response_output_item_done_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_output_item import ResponseOutputItem + +__all__ = ["ResponseOutputItemDoneEvent"] + + +class ResponseOutputItemDoneEvent(BaseModel): + item: ResponseOutputItem + """The output item that was marked done.""" + + output_index: int + """The index of the output item that was marked done.""" + + type: Literal["response.output_item.done"] + """The type of the event. Always `response.output_item.done`.""" diff --git a/src/openai/types/responses/response_output_message.py b/src/openai/types/responses/response_output_message.py new file mode 100644 index 0000000000..3864aa2111 --- /dev/null +++ b/src/openai/types/responses/response_output_message.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .response_output_text import ResponseOutputText +from .response_output_refusal import ResponseOutputRefusal + +__all__ = ["ResponseOutputMessage", "Content"] + +Content: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")] + + +class ResponseOutputMessage(BaseModel): + id: str + """The unique ID of the output message.""" + + content: List[Content] + """The content of the output message.""" + + role: Literal["assistant"] + """The role of the output message. Always `assistant`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + type: Literal["message"] + """The type of the output message. Always `message`.""" diff --git a/src/openai/types/responses/response_output_message_param.py b/src/openai/types/responses/response_output_message_param.py new file mode 100644 index 0000000000..46cbbd20de --- /dev/null +++ b/src/openai/types/responses/response_output_message_param.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .response_output_text_param import ResponseOutputTextParam +from .response_output_refusal_param import ResponseOutputRefusalParam + +__all__ = ["ResponseOutputMessageParam", "Content"] + +Content: TypeAlias = Union[ResponseOutputTextParam, ResponseOutputRefusalParam] + + +class ResponseOutputMessageParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the output message.""" + + content: Required[Iterable[Content]] + """The content of the output message.""" + + role: Required[Literal["assistant"]] + """The role of the output message. Always `assistant`.""" + + status: Required[Literal["in_progress", "completed", "incomplete"]] + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + type: Required[Literal["message"]] + """The type of the output message. Always `message`.""" diff --git a/src/openai/types/responses/response_output_refusal.py b/src/openai/types/responses/response_output_refusal.py new file mode 100644 index 0000000000..eba581070d --- /dev/null +++ b/src/openai/types/responses/response_output_refusal.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseOutputRefusal"] + + +class ResponseOutputRefusal(BaseModel): + refusal: str + """The refusal explanationfrom the model.""" + + type: Literal["refusal"] + """The type of the refusal. Always `refusal`.""" diff --git a/src/openai/types/responses/response_output_refusal_param.py b/src/openai/types/responses/response_output_refusal_param.py new file mode 100644 index 0000000000..53140a6080 --- /dev/null +++ b/src/openai/types/responses/response_output_refusal_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseOutputRefusalParam"] + + +class ResponseOutputRefusalParam(TypedDict, total=False): + refusal: Required[str] + """The refusal explanationfrom the model.""" + + type: Required[Literal["refusal"]] + """The type of the refusal. Always `refusal`.""" diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py new file mode 100644 index 0000000000..fa653cd1af --- /dev/null +++ b/src/openai/types/responses/response_output_text.py @@ -0,0 +1,64 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["ResponseOutputText", "Annotation", "AnnotationFileCitation", "AnnotationURLCitation", "AnnotationFilePath"] + + +class AnnotationFileCitation(BaseModel): + file_id: str + """The ID of the file.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_citation"] + """The type of the file citation. Always `file_citation`.""" + + +class AnnotationURLCitation(BaseModel): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url: str + """The URL of the web resource.""" + + +class AnnotationFilePath(BaseModel): + file_id: str + """The ID of the file.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_path"] + """The type of the file path. Always `file_path`.""" + + +Annotation: TypeAlias = Annotated[ + Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type") +] + + +class ResponseOutputText(BaseModel): + annotations: List[Annotation] + """The annotations of the text output.""" + + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" diff --git a/src/openai/types/responses/response_output_text_param.py b/src/openai/types/responses/response_output_text_param.py new file mode 100644 index 0000000000..1f0967285f --- /dev/null +++ b/src/openai/types/responses/response_output_text_param.py @@ -0,0 +1,67 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "ResponseOutputTextParam", + "Annotation", + "AnnotationFileCitation", + "AnnotationURLCitation", + "AnnotationFilePath", +] + + +class AnnotationFileCitation(TypedDict, total=False): + file_id: Required[str] + """The ID of the file.""" + + index: Required[int] + """The index of the file in the list of files.""" + + type: Required[Literal["file_citation"]] + """The type of the file citation. Always `file_citation`.""" + + +class AnnotationURLCitation(TypedDict, total=False): + end_index: Required[int] + """The index of the last character of the URL citation in the message.""" + + start_index: Required[int] + """The index of the first character of the URL citation in the message.""" + + title: Required[str] + """The title of the web resource.""" + + type: Required[Literal["url_citation"]] + """The type of the URL citation. Always `url_citation`.""" + + url: Required[str] + """The URL of the web resource.""" + + +class AnnotationFilePath(TypedDict, total=False): + file_id: Required[str] + """The ID of the file.""" + + index: Required[int] + """The index of the file in the list of files.""" + + type: Required[Literal["file_path"]] + """The type of the file path. Always `file_path`.""" + + +Annotation: TypeAlias = Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath] + + +class ResponseOutputTextParam(TypedDict, total=False): + annotations: Required[Iterable[Annotation]] + """The annotations of the text output.""" + + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" diff --git a/src/openai/types/responses/response_refusal_delta_event.py b/src/openai/types/responses/response_refusal_delta_event.py new file mode 100644 index 0000000000..04dcdf1c8c --- /dev/null +++ b/src/openai/types/responses/response_refusal_delta_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseRefusalDeltaEvent"] + + +class ResponseRefusalDeltaEvent(BaseModel): + content_index: int + """The index of the content part that the refusal text is added to.""" + + delta: str + """The refusal text that is added.""" + + item_id: str + """The ID of the output item that the refusal text is added to.""" + + output_index: int + """The index of the output item that the refusal text is added to.""" + + type: Literal["response.refusal.delta"] + """The type of the event. Always `response.refusal.delta`.""" diff --git a/src/openai/types/responses/response_refusal_done_event.py b/src/openai/types/responses/response_refusal_done_event.py new file mode 100644 index 0000000000..a9b6f4b055 --- /dev/null +++ b/src/openai/types/responses/response_refusal_done_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseRefusalDoneEvent"] + + +class ResponseRefusalDoneEvent(BaseModel): + content_index: int + """The index of the content part that the refusal text is finalized.""" + + item_id: str + """The ID of the output item that the refusal text is finalized.""" + + output_index: int + """The index of the output item that the refusal text is finalized.""" + + refusal: str + """The refusal text that is finalized.""" + + type: Literal["response.refusal.done"] + """The type of the event. Always `response.refusal.done`.""" diff --git a/src/openai/types/responses/response_retrieve_params.py b/src/openai/types/responses/response_retrieve_params.py new file mode 100644 index 0000000000..137bf4dcee --- /dev/null +++ b/src/openai/types/responses/response_retrieve_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +from .response_includable import ResponseIncludable + +__all__ = ["ResponseRetrieveParams"] + + +class ResponseRetrieveParams(TypedDict, total=False): + include: List[ResponseIncludable] + """Additional fields to include in the response. + + See the `include` parameter for Response creation above for more information. + """ diff --git a/src/openai/types/responses/response_status.py b/src/openai/types/responses/response_status.py new file mode 100644 index 0000000000..934d17cda3 --- /dev/null +++ b/src/openai/types/responses/response_status.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ResponseStatus"] + +ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "incomplete"] diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py new file mode 100644 index 0000000000..446863b175 --- /dev/null +++ b/src/openai/types/responses/response_stream_event.py @@ -0,0 +1,78 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .response_error_event import ResponseErrorEvent +from .response_failed_event import ResponseFailedEvent +from .response_created_event import ResponseCreatedEvent +from .response_completed_event import ResponseCompletedEvent +from .response_text_done_event import ResponseTextDoneEvent +from .response_audio_done_event import ResponseAudioDoneEvent +from .response_incomplete_event import ResponseIncompleteEvent +from .response_text_delta_event import ResponseTextDeltaEvent +from .response_audio_delta_event import ResponseAudioDeltaEvent +from .response_in_progress_event import ResponseInProgressEvent +from .response_refusal_done_event import ResponseRefusalDoneEvent +from .response_refusal_delta_event import ResponseRefusalDeltaEvent +from .response_output_item_done_event import ResponseOutputItemDoneEvent +from .response_content_part_done_event import ResponseContentPartDoneEvent +from .response_output_item_added_event import ResponseOutputItemAddedEvent +from .response_content_part_added_event import ResponseContentPartAddedEvent +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent +from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent +from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent +from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent +from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent +from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent +from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent +from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent +from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent +from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent +from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent +from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent +from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent +from .response_code_interpreter_call_code_delta_event import ResponseCodeInterpreterCallCodeDeltaEvent +from .response_code_interpreter_call_in_progress_event import ResponseCodeInterpreterCallInProgressEvent +from .response_code_interpreter_call_interpreting_event import ResponseCodeInterpreterCallInterpretingEvent + +__all__ = ["ResponseStreamEvent"] + +ResponseStreamEvent: TypeAlias = Annotated[ + Union[ + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCodeInterpreterCallCodeDeltaEvent, + ResponseCodeInterpreterCallCodeDoneEvent, + ResponseCodeInterpreterCallCompletedEvent, + ResponseCodeInterpreterCallInProgressEvent, + ResponseCodeInterpreterCallInterpretingEvent, + ResponseCompletedEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseErrorEvent, + ResponseFileSearchCallCompletedEvent, + ResponseFileSearchCallInProgressEvent, + ResponseFileSearchCallSearchingEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseInProgressEvent, + ResponseFailedEvent, + ResponseIncompleteEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseRefusalDeltaEvent, + ResponseRefusalDoneEvent, + ResponseTextAnnotationDeltaEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseWebSearchCallCompletedEvent, + ResponseWebSearchCallInProgressEvent, + ResponseWebSearchCallSearchingEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/responses/response_text_annotation_delta_event.py b/src/openai/types/responses/response_text_annotation_delta_event.py new file mode 100644 index 0000000000..4f2582282a --- /dev/null +++ b/src/openai/types/responses/response_text_annotation_delta_event.py @@ -0,0 +1,79 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = [ + "ResponseTextAnnotationDeltaEvent", + "Annotation", + "AnnotationFileCitation", + "AnnotationURLCitation", + "AnnotationFilePath", +] + + +class AnnotationFileCitation(BaseModel): + file_id: str + """The ID of the file.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_citation"] + """The type of the file citation. Always `file_citation`.""" + + +class AnnotationURLCitation(BaseModel): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url: str + """The URL of the web resource.""" + + +class AnnotationFilePath(BaseModel): + file_id: str + """The ID of the file.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_path"] + """The type of the file path. Always `file_path`.""" + + +Annotation: TypeAlias = Annotated[ + Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type") +] + + +class ResponseTextAnnotationDeltaEvent(BaseModel): + annotation: Annotation + """A citation to a file.""" + + annotation_index: int + """The index of the annotation that was added.""" + + content_index: int + """The index of the content part that the text annotation was added to.""" + + item_id: str + """The ID of the output item that the text annotation was added to.""" + + output_index: int + """The index of the output item that the text annotation was added to.""" + + type: Literal["response.output_text.annotation.added"] + """The type of the event. Always `response.output_text.annotation.added`.""" diff --git a/src/openai/types/responses/response_text_config.py b/src/openai/types/responses/response_text_config.py new file mode 100644 index 0000000000..a1894a9176 --- /dev/null +++ b/src/openai/types/responses/response_text_config.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .response_format_text_config import ResponseFormatTextConfig + +__all__ = ["ResponseTextConfig"] + + +class ResponseTextConfig(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ diff --git a/src/openai/types/responses/response_text_config_param.py b/src/openai/types/responses/response_text_config_param.py new file mode 100644 index 0000000000..aec064bf89 --- /dev/null +++ b/src/openai/types/responses/response_text_config_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from .response_format_text_config_param import ResponseFormatTextConfigParam + +__all__ = ["ResponseTextConfigParam"] + + +class ResponseTextConfigParam(TypedDict, total=False): + format: ResponseFormatTextConfigParam + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ diff --git a/src/openai/types/responses/response_text_delta_event.py b/src/openai/types/responses/response_text_delta_event.py new file mode 100644 index 0000000000..751a5e2a19 --- /dev/null +++ b/src/openai/types/responses/response_text_delta_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseTextDeltaEvent"] + + +class ResponseTextDeltaEvent(BaseModel): + content_index: int + """The index of the content part that the text delta was added to.""" + + delta: str + """The text delta that was added.""" + + item_id: str + """The ID of the output item that the text delta was added to.""" + + output_index: int + """The index of the output item that the text delta was added to.""" + + type: Literal["response.output_text.delta"] + """The type of the event. Always `response.output_text.delta`.""" diff --git a/src/openai/types/responses/response_text_done_event.py b/src/openai/types/responses/response_text_done_event.py new file mode 100644 index 0000000000..9b5c5e020c --- /dev/null +++ b/src/openai/types/responses/response_text_done_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseTextDoneEvent"] + + +class ResponseTextDoneEvent(BaseModel): + content_index: int + """The index of the content part that the text content is finalized.""" + + item_id: str + """The ID of the output item that the text content is finalized.""" + + output_index: int + """The index of the output item that the text content is finalized.""" + + text: str + """The text content that is finalized.""" + + type: Literal["response.output_text.done"] + """The type of the event. Always `response.output_text.done`.""" diff --git a/src/openai/types/responses/response_usage.py b/src/openai/types/responses/response_usage.py new file mode 100644 index 0000000000..ef631c5882 --- /dev/null +++ b/src/openai/types/responses/response_usage.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from ..._models import BaseModel + +__all__ = ["ResponseUsage", "OutputTokensDetails"] + + +class OutputTokensDetails(BaseModel): + reasoning_tokens: int + """The number of reasoning tokens.""" + + +class ResponseUsage(BaseModel): + input_tokens: int + """The number of input tokens.""" + + output_tokens: int + """The number of output tokens.""" + + output_tokens_details: OutputTokensDetails + """A detailed breakdown of the output tokens.""" + + total_tokens: int + """The total number of tokens used.""" diff --git a/src/openai/types/responses/response_web_search_call_completed_event.py b/src/openai/types/responses/response_web_search_call_completed_event.py new file mode 100644 index 0000000000..76f26766a1 --- /dev/null +++ b/src/openai/types/responses/response_web_search_call_completed_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseWebSearchCallCompletedEvent"] + + +class ResponseWebSearchCallCompletedEvent(BaseModel): + item_id: str + """Unique ID for the output item associated with the web search call.""" + + output_index: int + """The index of the output item that the web search call is associated with.""" + + type: Literal["response.web_search_call.completed"] + """The type of the event. Always `response.web_search_call.completed`.""" diff --git a/src/openai/types/responses/response_web_search_call_in_progress_event.py b/src/openai/types/responses/response_web_search_call_in_progress_event.py new file mode 100644 index 0000000000..681ce6d94b --- /dev/null +++ b/src/openai/types/responses/response_web_search_call_in_progress_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseWebSearchCallInProgressEvent"] + + +class ResponseWebSearchCallInProgressEvent(BaseModel): + item_id: str + """Unique ID for the output item associated with the web search call.""" + + output_index: int + """The index of the output item that the web search call is associated with.""" + + type: Literal["response.web_search_call.in_progress"] + """The type of the event. Always `response.web_search_call.in_progress`.""" diff --git a/src/openai/types/responses/response_web_search_call_searching_event.py b/src/openai/types/responses/response_web_search_call_searching_event.py new file mode 100644 index 0000000000..c885d98918 --- /dev/null +++ b/src/openai/types/responses/response_web_search_call_searching_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseWebSearchCallSearchingEvent"] + + +class ResponseWebSearchCallSearchingEvent(BaseModel): + item_id: str + """Unique ID for the output item associated with the web search call.""" + + output_index: int + """The index of the output item that the web search call is associated with.""" + + type: Literal["response.web_search_call.searching"] + """The type of the event. Always `response.web_search_call.searching`.""" diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py new file mode 100644 index 0000000000..de5d5524d4 --- /dev/null +++ b/src/openai/types/responses/tool.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .computer_tool import ComputerTool +from .function_tool import FunctionTool +from .web_search_tool import WebSearchTool +from .file_search_tool import FileSearchTool + +__all__ = ["Tool"] + +Tool: TypeAlias = Annotated[ + Union[FileSearchTool, FunctionTool, ComputerTool, WebSearchTool], PropertyInfo(discriminator="type") +] diff --git a/src/openai/types/responses/tool_choice_function.py b/src/openai/types/responses/tool_choice_function.py new file mode 100644 index 0000000000..8d2a4f2822 --- /dev/null +++ b/src/openai/types/responses/tool_choice_function.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceFunction"] + + +class ToolChoiceFunction(BaseModel): + name: str + """The name of the function to call.""" + + type: Literal["function"] + """For function calling, the type is always `function`.""" diff --git a/src/openai/types/responses/tool_choice_function_param.py b/src/openai/types/responses/tool_choice_function_param.py new file mode 100644 index 0000000000..910537fd97 --- /dev/null +++ b/src/openai/types/responses/tool_choice_function_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceFunctionParam"] + + +class ToolChoiceFunctionParam(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + type: Required[Literal["function"]] + """For function calling, the type is always `function`.""" diff --git a/src/openai/types/responses/tool_choice_options.py b/src/openai/types/responses/tool_choice_options.py new file mode 100644 index 0000000000..c200db54e1 --- /dev/null +++ b/src/openai/types/responses/tool_choice_options.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ToolChoiceOptions"] + +ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"] diff --git a/src/openai/types/responses/tool_choice_types.py b/src/openai/types/responses/tool_choice_types.py new file mode 100644 index 0000000000..4942808f14 --- /dev/null +++ b/src/openai/types/responses/tool_choice_types.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceTypes"] + + +class ToolChoiceTypes(BaseModel): + type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] + """The type of hosted tool the model should to use. + + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + + Allowed values are: + + - `file_search` + - `web_search_preview` + - `computer_use_preview` + """ diff --git a/src/openai/types/responses/tool_choice_types_param.py b/src/openai/types/responses/tool_choice_types_param.py new file mode 100644 index 0000000000..b14f2a9eb0 --- /dev/null +++ b/src/openai/types/responses/tool_choice_types_param.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceTypesParam"] + + +class ToolChoiceTypesParam(TypedDict, total=False): + type: Required[ + Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] + ] + """The type of hosted tool the model should to use. + + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + + Allowed values are: + + - `file_search` + - `web_search_preview` + - `computer_use_preview` + """ diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py new file mode 100644 index 0000000000..8bb089c5f1 --- /dev/null +++ b/src/openai/types/responses/tool_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .computer_tool_param import ComputerToolParam +from .function_tool_param import FunctionToolParam +from .web_search_tool_param import WebSearchToolParam +from .file_search_tool_param import FileSearchToolParam + +__all__ = ["ToolParam"] + +ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, ComputerToolParam, WebSearchToolParam] diff --git a/src/openai/types/responses/web_search_tool.py b/src/openai/types/responses/web_search_tool.py new file mode 100644 index 0000000000..bee270bf85 --- /dev/null +++ b/src/openai/types/responses/web_search_tool.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["WebSearchTool", "UserLocation"] + + +class UserLocation(BaseModel): + type: Literal["approximate"] + """The type of location approximation. Always `approximate`.""" + + city: Optional[str] = None + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: Optional[str] = None + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: Optional[str] = None + """Free text input for the region of the user, e.g. `California`.""" + + timezone: Optional[str] = None + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ + + +class WebSearchTool(BaseModel): + type: Literal["web_search_preview", "web_search_preview_2025_03_11"] + """The type of the web search tool. One of: + + - `web_search_preview` + - `web_search_preview_2025_03_11` + """ + + search_context_size: Optional[Literal["low", "medium", "high"]] = None + """ + High level guidance for the amount of context window space to use for the + search. One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[UserLocation] = None diff --git a/src/openai/types/responses/web_search_tool_param.py b/src/openai/types/responses/web_search_tool_param.py new file mode 100644 index 0000000000..8ee36ffb47 --- /dev/null +++ b/src/openai/types/responses/web_search_tool_param.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["WebSearchToolParam", "UserLocation"] + + +class UserLocation(TypedDict, total=False): + type: Required[Literal["approximate"]] + """The type of location approximation. Always `approximate`.""" + + city: str + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: str + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: str + """Free text input for the region of the user, e.g. `California`.""" + + timezone: str + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ + + +class WebSearchToolParam(TypedDict, total=False): + type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] + """The type of the web search tool. One of: + + - `web_search_preview` + - `web_search_preview_2025_03_11` + """ + + search_context_size: Literal["low", "medium", "high"] + """ + High level guidance for the amount of context window space to use for the + search. One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[UserLocation] diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index 4cf367b1cc..6ccc2313cc 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -1,8 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata +from .reasoning import Reasoning as Reasoning from .chat_model import ChatModel as ChatModel from .error_object import ErrorObject as ErrorObject +from .compound_filter import CompoundFilter as CompoundFilter +from .reasoning_effort import ReasoningEffort as ReasoningEffort +from .comparison_filter import ComparisonFilter as ComparisonFilter from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText diff --git a/src/openai/types/shared/chat_model.py b/src/openai/types/shared/chat_model.py index 6fe705a0b4..31d7104e6e 100644 --- a/src/openai/types/shared/chat_model.py +++ b/src/openai/types/shared/chat_model.py @@ -13,6 +13,9 @@ "o1-preview-2024-09-12", "o1-mini", "o1-mini-2024-09-12", + "computer-use-preview", + "computer-use-preview-2025-02-04", + "computer-use-preview-2025-03-11", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-4o", diff --git a/src/openai/types/shared/comparison_filter.py b/src/openai/types/shared/comparison_filter.py new file mode 100644 index 0000000000..2ec2651ff2 --- /dev/null +++ b/src/openai/types/shared/comparison_filter.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ComparisonFilter"] + + +class ComparisonFilter(BaseModel): + key: str + """The key to compare against the value.""" + + type: Literal["eq", "ne", "gt", "gte", "lt", "lte"] + """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + + - `eq`: equals + - `ne`: not equal + - `gt`: greater than + - `gte`: greater than or equal + - `lt`: less than + - `lte`: less than or equal + """ + + value: Union[str, float, bool] + """ + The value to compare against the attribute key; supports string, number, or + boolean types. + """ diff --git a/src/openai/types/shared/compound_filter.py b/src/openai/types/shared/compound_filter.py new file mode 100644 index 0000000000..3aefa43647 --- /dev/null +++ b/src/openai/types/shared/compound_filter.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .comparison_filter import ComparisonFilter + +__all__ = ["CompoundFilter", "Filter"] + +Filter: TypeAlias = Union[ComparisonFilter, object] + + +class CompoundFilter(BaseModel): + filters: List[Filter] + """Array of filters to combine. + + Items can be `ComparisonFilter` or `CompoundFilter`. + """ + + type: Literal["and", "or"] + """Type of operation: `and` or `or`.""" diff --git a/src/openai/types/shared/reasoning.py b/src/openai/types/shared/reasoning.py new file mode 100644 index 0000000000..50821a1727 --- /dev/null +++ b/src/openai/types/shared/reasoning.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .reasoning_effort import ReasoningEffort + +__all__ = ["Reasoning"] + + +class Reasoning(BaseModel): + effort: Optional[ReasoningEffort] = None + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + generate_summary: Optional[Literal["concise", "detailed"]] = None + """**o-series models only** + + A summary of the reasoning performed by the model. This can be useful for + debugging and understanding the model's reasoning process. One of `concise` or + `detailed`. + """ diff --git a/src/openai/types/shared/reasoning_effort.py b/src/openai/types/shared/reasoning_effort.py new file mode 100644 index 0000000000..ace21b67e4 --- /dev/null +++ b/src/openai/types/shared/reasoning_effort.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal, TypeAlias + +__all__ = ["ReasoningEffort"] + +ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] diff --git a/src/openai/types/shared/response_format_json_object.py b/src/openai/types/shared/response_format_json_object.py index 107728dd2e..2aaa5dbdfe 100644 --- a/src/openai/types/shared/response_format_json_object.py +++ b/src/openai/types/shared/response_format_json_object.py @@ -9,4 +9,4 @@ class ResponseFormatJSONObject(BaseModel): type: Literal["json_object"] - """The type of response format being defined: `json_object`""" + """The type of response format being defined. Always `json_object`.""" diff --git a/src/openai/types/shared/response_format_json_schema.py b/src/openai/types/shared/response_format_json_schema.py index 3194a4fe91..c7924446f4 100644 --- a/src/openai/types/shared/response_format_json_schema.py +++ b/src/openai/types/shared/response_format_json_schema.py @@ -25,20 +25,24 @@ class JSONSchema(BaseModel): """ schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None) - """The schema for the response format, described as a JSON Schema object.""" + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ strict: Optional[bool] = None - """Whether to enable strict schema adherence when generating the output. - - If set to true, the model will always follow the exact schema defined in the - `schema` field. Only a subset of JSON Schema is supported when `strict` is - `true`. To learn more, read the + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). """ class ResponseFormatJSONSchema(BaseModel): json_schema: JSONSchema + """Structured Outputs configuration options, including a JSON Schema.""" type: Literal["json_schema"] - """The type of response format being defined: `json_schema`""" + """The type of response format being defined. Always `json_schema`.""" diff --git a/src/openai/types/shared/response_format_text.py b/src/openai/types/shared/response_format_text.py index 6721fe0973..f0c8cfb700 100644 --- a/src/openai/types/shared/response_format_text.py +++ b/src/openai/types/shared/response_format_text.py @@ -9,4 +9,4 @@ class ResponseFormatText(BaseModel): type: Literal["text"] - """The type of response format being defined: `text`""" + """The type of response format being defined. Always `text`.""" diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index 47a747b2d4..4a4a8cdf1e 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -1,7 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata +from .reasoning import Reasoning as Reasoning from .chat_model import ChatModel as ChatModel +from .compound_filter import CompoundFilter as CompoundFilter +from .reasoning_effort import ReasoningEffort as ReasoningEffort +from .comparison_filter import ComparisonFilter as ComparisonFilter from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText diff --git a/src/openai/types/shared_params/chat_model.py b/src/openai/types/shared_params/chat_model.py index 0ac3f31611..55649876eb 100644 --- a/src/openai/types/shared_params/chat_model.py +++ b/src/openai/types/shared_params/chat_model.py @@ -15,6 +15,9 @@ "o1-preview-2024-09-12", "o1-mini", "o1-mini-2024-09-12", + "computer-use-preview", + "computer-use-preview-2025-02-04", + "computer-use-preview-2025-03-11", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-4o", diff --git a/src/openai/types/shared_params/comparison_filter.py b/src/openai/types/shared_params/comparison_filter.py new file mode 100644 index 0000000000..38edd315ed --- /dev/null +++ b/src/openai/types/shared_params/comparison_filter.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ComparisonFilter"] + + +class ComparisonFilter(TypedDict, total=False): + key: Required[str] + """The key to compare against the value.""" + + type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]] + """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + + - `eq`: equals + - `ne`: not equal + - `gt`: greater than + - `gte`: greater than or equal + - `lt`: less than + - `lte`: less than or equal + """ + + value: Required[Union[str, float, bool]] + """ + The value to compare against the attribute key; supports string, number, or + boolean types. + """ diff --git a/src/openai/types/shared_params/compound_filter.py b/src/openai/types/shared_params/compound_filter.py new file mode 100644 index 0000000000..d12e9b1bda --- /dev/null +++ b/src/openai/types/shared_params/compound_filter.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .comparison_filter import ComparisonFilter + +__all__ = ["CompoundFilter", "Filter"] + +Filter: TypeAlias = Union[ComparisonFilter, object] + + +class CompoundFilter(TypedDict, total=False): + filters: Required[Iterable[Filter]] + """Array of filters to combine. + + Items can be `ComparisonFilter` or `CompoundFilter`. + """ + + type: Required[Literal["and", "or"]] + """Type of operation: `and` or `or`.""" diff --git a/src/openai/types/shared_params/reasoning.py b/src/openai/types/shared_params/reasoning.py new file mode 100644 index 0000000000..f2b5c5963a --- /dev/null +++ b/src/openai/types/shared_params/reasoning.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +from ..shared.reasoning_effort import ReasoningEffort + +__all__ = ["Reasoning"] + + +class Reasoning(TypedDict, total=False): + effort: Required[Optional[ReasoningEffort]] + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + generate_summary: Optional[Literal["concise", "detailed"]] + """**o-series models only** + + A summary of the reasoning performed by the model. This can be useful for + debugging and understanding the model's reasoning process. One of `concise` or + `detailed`. + """ diff --git a/src/openai/types/shared_params/reasoning_effort.py b/src/openai/types/shared_params/reasoning_effort.py new file mode 100644 index 0000000000..6052c5ae15 --- /dev/null +++ b/src/openai/types/shared_params/reasoning_effort.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, TypeAlias + +__all__ = ["ReasoningEffort"] + +ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] diff --git a/src/openai/types/shared_params/response_format_json_object.py b/src/openai/types/shared_params/response_format_json_object.py index 8419c6cb56..d4d1deaae5 100644 --- a/src/openai/types/shared_params/response_format_json_object.py +++ b/src/openai/types/shared_params/response_format_json_object.py @@ -9,4 +9,4 @@ class ResponseFormatJSONObject(TypedDict, total=False): type: Required[Literal["json_object"]] - """The type of response format being defined: `json_object`""" + """The type of response format being defined. Always `json_object`.""" diff --git a/src/openai/types/shared_params/response_format_json_schema.py b/src/openai/types/shared_params/response_format_json_schema.py index 4b60fae8ee..5b0a13ee06 100644 --- a/src/openai/types/shared_params/response_format_json_schema.py +++ b/src/openai/types/shared_params/response_format_json_schema.py @@ -23,20 +23,24 @@ class JSONSchema(TypedDict, total=False): """ schema: Dict[str, object] - """The schema for the response format, described as a JSON Schema object.""" + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ strict: Optional[bool] - """Whether to enable strict schema adherence when generating the output. - - If set to true, the model will always follow the exact schema defined in the - `schema` field. Only a subset of JSON Schema is supported when `strict` is - `true`. To learn more, read the + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). """ class ResponseFormatJSONSchema(TypedDict, total=False): json_schema: Required[JSONSchema] + """Structured Outputs configuration options, including a JSON Schema.""" type: Required[Literal["json_schema"]] - """The type of response format being defined: `json_schema`""" + """The type of response format being defined. Always `json_schema`.""" diff --git a/src/openai/types/shared_params/response_format_text.py b/src/openai/types/shared_params/response_format_text.py index 5bec7fc503..c3ef2b0816 100644 --- a/src/openai/types/shared_params/response_format_text.py +++ b/src/openai/types/shared_params/response_format_text.py @@ -9,4 +9,4 @@ class ResponseFormatText(TypedDict, total=False): type: Required[Literal["text"]] - """The type of response format being defined: `text`""" + """The type of response format being defined. Always `text`.""" diff --git a/src/openai/types/beta/static_file_chunking_strategy.py b/src/openai/types/static_file_chunking_strategy.py similarity index 94% rename from src/openai/types/beta/static_file_chunking_strategy.py rename to src/openai/types/static_file_chunking_strategy.py index 6080093517..2813bc6630 100644 --- a/src/openai/types/beta/static_file_chunking_strategy.py +++ b/src/openai/types/static_file_chunking_strategy.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["StaticFileChunkingStrategy"] diff --git a/src/openai/types/beta/static_file_chunking_strategy_object.py b/src/openai/types/static_file_chunking_strategy_object.py similarity index 92% rename from src/openai/types/beta/static_file_chunking_strategy_object.py rename to src/openai/types/static_file_chunking_strategy_object.py index 896c4b8320..2a95dce5b3 100644 --- a/src/openai/types/beta/static_file_chunking_strategy_object.py +++ b/src/openai/types/static_file_chunking_strategy_object.py @@ -2,7 +2,7 @@ from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel from .static_file_chunking_strategy import StaticFileChunkingStrategy __all__ = ["StaticFileChunkingStrategyObject"] diff --git a/src/openai/types/beta/static_file_chunking_strategy_object_param.py b/src/openai/types/static_file_chunking_strategy_object_param.py similarity index 100% rename from src/openai/types/beta/static_file_chunking_strategy_object_param.py rename to src/openai/types/static_file_chunking_strategy_object_param.py diff --git a/src/openai/types/beta/static_file_chunking_strategy_param.py b/src/openai/types/static_file_chunking_strategy_param.py similarity index 100% rename from src/openai/types/beta/static_file_chunking_strategy_param.py rename to src/openai/types/static_file_chunking_strategy_param.py diff --git a/src/openai/types/beta/vector_store.py b/src/openai/types/vector_store.py similarity index 97% rename from src/openai/types/beta/vector_store.py rename to src/openai/types/vector_store.py index b947dfb79d..2473a442d2 100644 --- a/src/openai/types/beta/vector_store.py +++ b/src/openai/types/vector_store.py @@ -3,8 +3,8 @@ from typing import Optional from typing_extensions import Literal -from ..._models import BaseModel -from ..shared.metadata import Metadata +from .._models import BaseModel +from .shared.metadata import Metadata __all__ = ["VectorStore", "FileCounts", "ExpiresAfter"] diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/vector_store_create_params.py similarity index 97% rename from src/openai/types/beta/vector_store_create_params.py rename to src/openai/types/vector_store_create_params.py index faca6d9000..365d0936b1 100644 --- a/src/openai/types/beta/vector_store_create_params.py +++ b/src/openai/types/vector_store_create_params.py @@ -5,7 +5,7 @@ from typing import List, Optional from typing_extensions import Literal, Required, TypedDict -from ..shared_params.metadata import Metadata +from .shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam __all__ = ["VectorStoreCreateParams", "ExpiresAfter"] diff --git a/src/openai/types/beta/vector_store_deleted.py b/src/openai/types/vector_store_deleted.py similarity index 89% rename from src/openai/types/beta/vector_store_deleted.py rename to src/openai/types/vector_store_deleted.py index 21ccda1db5..dfac9ce8bd 100644 --- a/src/openai/types/beta/vector_store_deleted.py +++ b/src/openai/types/vector_store_deleted.py @@ -2,7 +2,7 @@ from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["VectorStoreDeleted"] diff --git a/src/openai/types/beta/vector_store_list_params.py b/src/openai/types/vector_store_list_params.py similarity index 100% rename from src/openai/types/beta/vector_store_list_params.py rename to src/openai/types/vector_store_list_params.py diff --git a/src/openai/types/vector_store_search_params.py b/src/openai/types/vector_store_search_params.py new file mode 100644 index 0000000000..17573d0f61 --- /dev/null +++ b/src/openai/types/vector_store_search_params.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .shared_params.compound_filter import CompoundFilter +from .shared_params.comparison_filter import ComparisonFilter + +__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"] + + +class VectorStoreSearchParams(TypedDict, total=False): + query: Required[Union[str, List[str]]] + """A query string for a search""" + + filters: Filters + """A filter to apply based on file attributes.""" + + max_num_results: int + """The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + """ + + ranking_options: RankingOptions + """Ranking options for search.""" + + rewrite_query: bool + """Whether to rewrite the natural language query for vector search.""" + + +Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter] + + +class RankingOptions(TypedDict, total=False): + ranker: Literal["auto", "default-2024-11-15"] + + score_threshold: float diff --git a/src/openai/types/vector_store_search_response.py b/src/openai/types/vector_store_search_response.py new file mode 100644 index 0000000000..d78b71bfba --- /dev/null +++ b/src/openai/types/vector_store_search_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["VectorStoreSearchResponse", "Content"] + + +class Content(BaseModel): + text: str + """The text content returned from search.""" + + type: Literal["text"] + """The type of content.""" + + +class VectorStoreSearchResponse(BaseModel): + attributes: Optional[Dict[str, Union[str, float, bool]]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + content: List[Content] + """Content chunks from the file.""" + + file_id: str + """The ID of the vector store file.""" + + filename: str + """The name of the vector store file.""" + + score: float + """The similarity score for the result.""" diff --git a/src/openai/types/beta/vector_store_update_params.py b/src/openai/types/vector_store_update_params.py similarity index 96% rename from src/openai/types/beta/vector_store_update_params.py rename to src/openai/types/vector_store_update_params.py index e91b3ba5ad..4f6ac63963 100644 --- a/src/openai/types/beta/vector_store_update_params.py +++ b/src/openai/types/vector_store_update_params.py @@ -5,7 +5,7 @@ from typing import Optional from typing_extensions import Literal, Required, TypedDict -from ..shared_params.metadata import Metadata +from .shared_params.metadata import Metadata __all__ = ["VectorStoreUpdateParams", "ExpiresAfter"] diff --git a/src/openai/types/beta/vector_stores/__init__.py b/src/openai/types/vector_stores/__init__.py similarity index 82% rename from src/openai/types/beta/vector_stores/__init__.py rename to src/openai/types/vector_stores/__init__.py index ff05dd63d8..96ce301481 100644 --- a/src/openai/types/beta/vector_stores/__init__.py +++ b/src/openai/types/vector_stores/__init__.py @@ -5,6 +5,8 @@ from .file_list_params import FileListParams as FileListParams from .vector_store_file import VectorStoreFile as VectorStoreFile from .file_create_params import FileCreateParams as FileCreateParams +from .file_update_params import FileUpdateParams as FileUpdateParams +from .file_content_response import FileContentResponse as FileContentResponse from .vector_store_file_batch import VectorStoreFileBatch as VectorStoreFileBatch from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams from .vector_store_file_deleted import VectorStoreFileDeleted as VectorStoreFileDeleted diff --git a/src/openai/types/beta/vector_stores/file_batch_create_params.py b/src/openai/types/vector_stores/file_batch_create_params.py similarity index 61% rename from src/openai/types/beta/vector_stores/file_batch_create_params.py rename to src/openai/types/vector_stores/file_batch_create_params.py index e42ea99cd1..1a470f757a 100644 --- a/src/openai/types/beta/vector_stores/file_batch_create_params.py +++ b/src/openai/types/vector_stores/file_batch_create_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List +from typing import Dict, List, Union, Optional from typing_extensions import Required, TypedDict from ..file_chunking_strategy_param import FileChunkingStrategyParam @@ -18,6 +18,15 @@ class FileBatchCreateParams(TypedDict, total=False): files. """ + attributes: Optional[Dict[str, Union[str, float, bool]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). diff --git a/src/openai/types/beta/vector_stores/file_batch_list_files_params.py b/src/openai/types/vector_stores/file_batch_list_files_params.py similarity index 100% rename from src/openai/types/beta/vector_stores/file_batch_list_files_params.py rename to src/openai/types/vector_stores/file_batch_list_files_params.py diff --git a/src/openai/types/vector_stores/file_content_response.py b/src/openai/types/vector_stores/file_content_response.py new file mode 100644 index 0000000000..32db2f2ce9 --- /dev/null +++ b/src/openai/types/vector_stores/file_content_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["FileContentResponse"] + + +class FileContentResponse(BaseModel): + text: Optional[str] = None + """The text content""" + + type: Optional[str] = None + """The content type (currently only `"text"`)""" diff --git a/src/openai/types/beta/vector_stores/file_create_params.py b/src/openai/types/vector_stores/file_create_params.py similarity index 60% rename from src/openai/types/beta/vector_stores/file_create_params.py rename to src/openai/types/vector_stores/file_create_params.py index d074d766e6..5b8989251a 100644 --- a/src/openai/types/beta/vector_stores/file_create_params.py +++ b/src/openai/types/vector_stores/file_create_params.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Dict, Union, Optional from typing_extensions import Required, TypedDict from ..file_chunking_strategy_param import FileChunkingStrategyParam @@ -17,6 +18,15 @@ class FileCreateParams(TypedDict, total=False): files. """ + attributes: Optional[Dict[str, Union[str, float, bool]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). diff --git a/src/openai/types/beta/vector_stores/file_list_params.py b/src/openai/types/vector_stores/file_list_params.py similarity index 100% rename from src/openai/types/beta/vector_stores/file_list_params.py rename to src/openai/types/vector_stores/file_list_params.py diff --git a/src/openai/types/vector_stores/file_update_params.py b/src/openai/types/vector_stores/file_update_params.py new file mode 100644 index 0000000000..ebf540d046 --- /dev/null +++ b/src/openai/types/vector_stores/file_update_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["FileUpdateParams"] + + +class FileUpdateParams(TypedDict, total=False): + vector_store_id: Required[str] + + attributes: Required[Optional[Dict[str, Union[str, float, bool]]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/vector_stores/vector_store_file.py similarity index 76% rename from src/openai/types/beta/vector_stores/vector_store_file.py rename to src/openai/types/vector_stores/vector_store_file.py index e4608e159c..b59a61dfb0 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file.py +++ b/src/openai/types/vector_stores/vector_store_file.py @@ -1,9 +1,9 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import Dict, Union, Optional from typing_extensions import Literal -from ...._models import BaseModel +from ..._models import BaseModel from ..file_chunking_strategy import FileChunkingStrategy __all__ = ["VectorStoreFile", "LastError"] @@ -54,5 +54,14 @@ class VectorStoreFile(BaseModel): attached to. """ + attributes: Optional[Dict[str, Union[str, float, bool]]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + chunking_strategy: Optional[FileChunkingStrategy] = None """The strategy used to chunk the file.""" diff --git a/src/openai/types/beta/vector_stores/vector_store_file_batch.py b/src/openai/types/vector_stores/vector_store_file_batch.py similarity index 97% rename from src/openai/types/beta/vector_stores/vector_store_file_batch.py rename to src/openai/types/vector_stores/vector_store_file_batch.py index df130a58de..57dbfbd809 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file_batch.py +++ b/src/openai/types/vector_stores/vector_store_file_batch.py @@ -2,7 +2,7 @@ from typing_extensions import Literal -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["VectorStoreFileBatch", "FileCounts"] diff --git a/src/openai/types/beta/vector_stores/vector_store_file_deleted.py b/src/openai/types/vector_stores/vector_store_file_deleted.py similarity index 89% rename from src/openai/types/beta/vector_stores/vector_store_file_deleted.py rename to src/openai/types/vector_stores/vector_store_file_deleted.py index ae37f84364..5c856f26cd 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file_deleted.py +++ b/src/openai/types/vector_stores/vector_store_file_deleted.py @@ -2,7 +2,7 @@ from typing_extensions import Literal -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["VectorStoreFileDeleted"] diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 760fba0a37..3c4a9e4a19 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -73,9 +73,9 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=0, + seed=-9007199254740991, service_tier="auto", - stop="string", + stop="\n", store=True, stream=False, stream_options={"include_usage": True}, @@ -95,6 +95,18 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: top_logprobs=0, top_p=1, user="user-1234", + web_search_options={ + "search_context_size": "low", + "user_location": { + "approximate": { + "city": "city", + "country": "country", + "region": "region", + "timezone": "timezone", + }, + "type": "approximate", + }, + }, ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -188,9 +200,9 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=0, + seed=-9007199254740991, service_tier="auto", - stop="string", + stop="\n", store=True, stream_options={"include_usage": True}, temperature=1, @@ -209,6 +221,18 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: top_logprobs=0, top_p=1, user="user-1234", + web_search_options={ + "search_context_size": "low", + "user_location": { + "approximate": { + "city": "city", + "country": "country", + "region": "region", + "timezone": "timezone", + }, + "type": "approximate", + }, + }, ) completion_stream.response.close() @@ -459,9 +483,9 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=0, + seed=-9007199254740991, service_tier="auto", - stop="string", + stop="\n", store=True, stream=False, stream_options={"include_usage": True}, @@ -481,6 +505,18 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn top_logprobs=0, top_p=1, user="user-1234", + web_search_options={ + "search_context_size": "low", + "user_location": { + "approximate": { + "city": "city", + "country": "country", + "region": "region", + "timezone": "timezone", + }, + "type": "approximate", + }, + }, ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -574,9 +610,9 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=0, + seed=-9007199254740991, service_tier="auto", - stop="string", + stop="\n", store=True, stream_options={"include_usage": True}, temperature=1, @@ -595,6 +631,18 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn top_logprobs=0, top_p=1, user="user-1234", + web_search_options={ + "search_context_size": "low", + "user_location": { + "approximate": { + "city": "city", + "country": "country", + "region": "region", + "timezone": "timezone", + }, + "type": "approximate", + }, + }, ) await completion_stream.response.aclose() diff --git a/tests/api_resources/beta/vector_stores/__init__.py b/tests/api_resources/responses/__init__.py similarity index 100% rename from tests/api_resources/beta/vector_stores/__init__.py rename to tests/api_resources/responses/__init__.py diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py new file mode 100644 index 0000000000..28c5e8ca1f --- /dev/null +++ b/tests/api_resources/responses/test_input_items.py @@ -0,0 +1,121 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.pagination import SyncCursorPage, AsyncCursorPage +from openai.types.responses.response_item_list import Data + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") + + +class TestInputItems: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_list(self, client: OpenAI) -> None: + input_item = client.responses.input_items.list( + response_id="response_id", + ) + assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: OpenAI) -> None: + input_item = client.responses.input_items.list( + response_id="response_id", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: OpenAI) -> None: + response = client.responses.input_items.with_raw_response.list( + response_id="response_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + input_item = response.parse() + assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: OpenAI) -> None: + with client.responses.input_items.with_streaming_response.list( + response_id="response_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + input_item = response.parse() + assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_list(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + client.responses.input_items.with_raw_response.list( + response_id="", + ) + + +class TestAsyncInputItems: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + async def test_method_list(self, async_client: AsyncOpenAI) -> None: + input_item = await async_client.responses.input_items.list( + response_id="response_id", + ) + assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: + input_item = await async_client.responses.input_items.list( + response_id="response_id", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.input_items.with_raw_response.list( + response_id="response_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + input_item = response.parse() + assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: + async with async_client.responses.input_items.with_streaming_response.list( + response_id="response_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + input_item = await response.parse() + assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + await async_client.responses.input_items.with_raw_response.list( + response_id="", + ) diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py new file mode 100644 index 0000000000..e45a5becf3 --- /dev/null +++ b/tests/api_resources/test_responses.py @@ -0,0 +1,498 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types.responses import Response + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") + + +class TestResponses: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create_overload_1(self, client: OpenAI) -> None: + response = client.responses.create( + input="string", + model="gpt-4o", + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: + response = client.responses.create( + input="string", + model="gpt-4o", + include=["file_search_call.results"], + instructions="instructions", + max_output_tokens=0, + metadata={"foo": "string"}, + parallel_tool_calls=True, + previous_response_id="previous_response_id", + reasoning={ + "effort": "low", + "generate_summary": "concise", + }, + store=True, + stream=False, + temperature=1, + text={"format": {"type": "text"}}, + tool_choice="none", + tools=[ + { + "type": "file_search", + "vector_store_ids": ["string"], + "filters": { + "key": "key", + "type": "eq", + "value": "string", + }, + "max_num_results": 0, + "ranking_options": { + "ranker": "auto", + "score_threshold": 0, + }, + } + ], + top_p=1, + truncation="auto", + user="user-1234", + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + def test_raw_response_create_overload_1(self, client: OpenAI) -> None: + http_response = client.responses.with_raw_response.create( + input="string", + model="gpt-4o", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @parametrize + def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: + with client.responses.with_streaming_response.create( + input="string", + model="gpt-4o", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @parametrize + def test_method_create_overload_2(self, client: OpenAI) -> None: + response_stream = client.responses.create( + input="string", + model="gpt-4o", + stream=True, + ) + response_stream.response.close() + + @parametrize + def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: + response_stream = client.responses.create( + input="string", + model="gpt-4o", + stream=True, + include=["file_search_call.results"], + instructions="instructions", + max_output_tokens=0, + metadata={"foo": "string"}, + parallel_tool_calls=True, + previous_response_id="previous_response_id", + reasoning={ + "effort": "low", + "generate_summary": "concise", + }, + store=True, + temperature=1, + text={"format": {"type": "text"}}, + tool_choice="none", + tools=[ + { + "type": "file_search", + "vector_store_ids": ["string"], + "filters": { + "key": "key", + "type": "eq", + "value": "string", + }, + "max_num_results": 0, + "ranking_options": { + "ranker": "auto", + "score_threshold": 0, + }, + } + ], + top_p=1, + truncation="auto", + user="user-1234", + ) + response_stream.response.close() + + @parametrize + def test_raw_response_create_overload_2(self, client: OpenAI) -> None: + response = client.responses.with_raw_response.create( + input="string", + model="gpt-4o", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + stream.close() + + @parametrize + def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: + with client.responses.with_streaming_response.create( + input="string", + model="gpt-4o", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_retrieve(self, client: OpenAI) -> None: + response = client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + def test_method_retrieve_with_all_params(self, client: OpenAI) -> None: + response = client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + include=["file_search_call.results"], + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: OpenAI) -> None: + http_response = client.responses.with_raw_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: OpenAI) -> None: + with client.responses.with_streaming_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + client.responses.with_raw_response.retrieve( + response_id="", + ) + + @parametrize + def test_method_delete(self, client: OpenAI) -> None: + response = client.responses.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + assert response is None + + @parametrize + def test_raw_response_delete(self, client: OpenAI) -> None: + http_response = client.responses.with_raw_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert response is None + + @parametrize + def test_streaming_response_delete(self, client: OpenAI) -> None: + with client.responses.with_streaming_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = http_response.parse() + assert response is None + + assert cast(Any, http_response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + client.responses.with_raw_response.delete( + "", + ) + + +class TestAsyncResponses: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.create( + input="string", + model="gpt-4o", + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.create( + input="string", + model="gpt-4o", + include=["file_search_call.results"], + instructions="instructions", + max_output_tokens=0, + metadata={"foo": "string"}, + parallel_tool_calls=True, + previous_response_id="previous_response_id", + reasoning={ + "effort": "low", + "generate_summary": "concise", + }, + store=True, + stream=False, + temperature=1, + text={"format": {"type": "text"}}, + tool_choice="none", + tools=[ + { + "type": "file_search", + "vector_store_ids": ["string"], + "filters": { + "key": "key", + "type": "eq", + "value": "string", + }, + "max_num_results": 0, + "ranking_options": { + "ranker": "auto", + "score_threshold": 0, + }, + } + ], + top_p=1, + truncation="auto", + user="user-1234", + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: + http_response = await async_client.responses.with_raw_response.create( + input="string", + model="gpt-4o", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: + async with async_client.responses.with_streaming_response.create( + input="string", + model="gpt-4o", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = await http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: + response_stream = await async_client.responses.create( + input="string", + model="gpt-4o", + stream=True, + ) + await response_stream.response.aclose() + + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: + response_stream = await async_client.responses.create( + input="string", + model="gpt-4o", + stream=True, + include=["file_search_call.results"], + instructions="instructions", + max_output_tokens=0, + metadata={"foo": "string"}, + parallel_tool_calls=True, + previous_response_id="previous_response_id", + reasoning={ + "effort": "low", + "generate_summary": "concise", + }, + store=True, + temperature=1, + text={"format": {"type": "text"}}, + tool_choice="none", + tools=[ + { + "type": "file_search", + "vector_store_ids": ["string"], + "filters": { + "key": "key", + "type": "eq", + "value": "string", + }, + "max_num_results": 0, + "ranking_options": { + "ranker": "auto", + "score_threshold": 0, + }, + } + ], + top_p=1, + truncation="auto", + user="user-1234", + ) + await response_stream.response.aclose() + + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.with_raw_response.create( + input="string", + model="gpt-4o", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + await stream.close() + + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: + async with async_client.responses.with_streaming_response.create( + input="string", + model="gpt-4o", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + include=["file_search_call.results"], + ) + assert_matches_type(Response, response, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: + http_response = await async_client.responses.with_raw_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: + async with async_client.responses.with_streaming_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = await http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + await async_client.responses.with_raw_response.retrieve( + response_id="", + ) + + @parametrize + async def test_method_delete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + assert response is None + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: + http_response = await async_client.responses.with_raw_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert response is None + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: + async with async_client.responses.with_streaming_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = await http_response.parse() + assert response is None + + assert cast(Any, http_response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + await async_client.responses.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/test_vector_stores.py similarity index 64% rename from tests/api_resources/beta/test_vector_stores.py rename to tests/api_resources/test_vector_stores.py index 3216df907b..54bb75bc1d 100644 --- a/tests/api_resources/beta/test_vector_stores.py +++ b/tests/api_resources/test_vector_stores.py @@ -9,11 +9,12 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta import ( +from openai.types import ( VectorStore, VectorStoreDeleted, + VectorStoreSearchResponse, ) +from openai.pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010/") @@ -23,12 +24,12 @@ class TestVectorStores: @parametrize def test_method_create(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.create() + vector_store = client.vector_stores.create() assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.create( + vector_store = client.vector_stores.create( chunking_strategy={"type": "auto"}, expires_after={ "anchor": "last_active_at", @@ -42,7 +43,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: - response = client.beta.vector_stores.with_raw_response.create() + response = client.vector_stores.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -51,7 +52,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: - with client.beta.vector_stores.with_streaming_response.create() as response: + with client.vector_stores.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,14 +63,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.retrieve( + vector_store = client.vector_stores.retrieve( "vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: - response = client.beta.vector_stores.with_raw_response.retrieve( + response = client.vector_stores.with_raw_response.retrieve( "vector_store_id", ) @@ -80,7 +81,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: - with client.beta.vector_stores.with_streaming_response.retrieve( + with client.vector_stores.with_streaming_response.retrieve( "vector_store_id", ) as response: assert not response.is_closed @@ -94,20 +95,20 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.with_raw_response.retrieve( + client.vector_stores.with_raw_response.retrieve( "", ) @parametrize def test_method_update(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.update( + vector_store = client.vector_stores.update( vector_store_id="vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.update( + vector_store = client.vector_stores.update( vector_store_id="vector_store_id", expires_after={ "anchor": "last_active_at", @@ -120,7 +121,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_update(self, client: OpenAI) -> None: - response = client.beta.vector_stores.with_raw_response.update( + response = client.vector_stores.with_raw_response.update( vector_store_id="vector_store_id", ) @@ -131,7 +132,7 @@ def test_raw_response_update(self, client: OpenAI) -> None: @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: - with client.beta.vector_stores.with_streaming_response.update( + with client.vector_stores.with_streaming_response.update( vector_store_id="vector_store_id", ) as response: assert not response.is_closed @@ -145,18 +146,18 @@ def test_streaming_response_update(self, client: OpenAI) -> None: @parametrize def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.with_raw_response.update( + client.vector_stores.with_raw_response.update( vector_store_id="", ) @parametrize def test_method_list(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.list() + vector_store = client.vector_stores.list() assert_matches_type(SyncCursorPage[VectorStore], vector_store, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.list( + vector_store = client.vector_stores.list( after="after", before="before", limit=0, @@ -166,7 +167,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list(self, client: OpenAI) -> None: - response = client.beta.vector_stores.with_raw_response.list() + response = client.vector_stores.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -175,7 +176,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: - with client.beta.vector_stores.with_streaming_response.list() as response: + with client.vector_stores.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -186,14 +187,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_method_delete(self, client: OpenAI) -> None: - vector_store = client.beta.vector_stores.delete( + vector_store = client.vector_stores.delete( "vector_store_id", ) assert_matches_type(VectorStoreDeleted, vector_store, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: - response = client.beta.vector_stores.with_raw_response.delete( + response = client.vector_stores.with_raw_response.delete( "vector_store_id", ) @@ -204,7 +205,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: - with client.beta.vector_stores.with_streaming_response.delete( + with client.vector_stores.with_streaming_response.delete( "vector_store_id", ) as response: assert not response.is_closed @@ -218,22 +219,83 @@ def test_streaming_response_delete(self, client: OpenAI) -> None: @parametrize def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.with_raw_response.delete( + client.vector_stores.with_raw_response.delete( "", ) + @parametrize + def test_method_search(self, client: OpenAI) -> None: + vector_store = client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + ) + assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + @parametrize + def test_method_search_with_all_params(self, client: OpenAI) -> None: + vector_store = client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + filters={ + "key": "key", + "type": "eq", + "value": "string", + }, + max_num_results=1, + ranking_options={ + "ranker": "auto", + "score_threshold": 0, + }, + rewrite_query=True, + ) + assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + @parametrize + def test_raw_response_search(self, client: OpenAI) -> None: + response = client.vector_stores.with_raw_response.search( + vector_store_id="vs_abc123", + query="string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + @parametrize + def test_streaming_response_search(self, client: OpenAI) -> None: + with client.vector_stores.with_streaming_response.search( + vector_store_id="vs_abc123", + query="string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = response.parse() + assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_search(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.with_raw_response.search( + vector_store_id="", + query="string", + ) + class TestAsyncVectorStores: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.create() + vector_store = await async_client.vector_stores.create() assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.create( + vector_store = await async_client.vector_stores.create( chunking_strategy={"type": "auto"}, expires_after={ "anchor": "last_active_at", @@ -247,7 +309,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.with_raw_response.create() + response = await async_client.vector_stores.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -256,7 +318,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.with_streaming_response.create() as response: + async with async_client.vector_stores.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -267,14 +329,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.retrieve( + vector_store = await async_client.vector_stores.retrieve( "vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.with_raw_response.retrieve( + response = await async_client.vector_stores.with_raw_response.retrieve( "vector_store_id", ) @@ -285,7 +347,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.with_streaming_response.retrieve( + async with async_client.vector_stores.with_streaming_response.retrieve( "vector_store_id", ) as response: assert not response.is_closed @@ -299,20 +361,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.with_raw_response.retrieve( + await async_client.vector_stores.with_raw_response.retrieve( "", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.update( + vector_store = await async_client.vector_stores.update( vector_store_id="vector_store_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.update( + vector_store = await async_client.vector_stores.update( vector_store_id="vector_store_id", expires_after={ "anchor": "last_active_at", @@ -325,7 +387,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.with_raw_response.update( + response = await async_client.vector_stores.with_raw_response.update( vector_store_id="vector_store_id", ) @@ -336,7 +398,7 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.with_streaming_response.update( + async with async_client.vector_stores.with_streaming_response.update( vector_store_id="vector_store_id", ) as response: assert not response.is_closed @@ -350,18 +412,18 @@ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.with_raw_response.update( + await async_client.vector_stores.with_raw_response.update( vector_store_id="", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.list() + vector_store = await async_client.vector_stores.list() assert_matches_type(AsyncCursorPage[VectorStore], vector_store, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.list( + vector_store = await async_client.vector_stores.list( after="after", before="before", limit=0, @@ -371,7 +433,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.with_raw_response.list() + response = await async_client.vector_stores.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -380,7 +442,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.with_streaming_response.list() as response: + async with async_client.vector_stores.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -391,14 +453,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: - vector_store = await async_client.beta.vector_stores.delete( + vector_store = await async_client.vector_stores.delete( "vector_store_id", ) assert_matches_type(VectorStoreDeleted, vector_store, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.with_raw_response.delete( + response = await async_client.vector_stores.with_raw_response.delete( "vector_store_id", ) @@ -409,7 +471,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.with_streaming_response.delete( + async with async_client.vector_stores.with_streaming_response.delete( "vector_store_id", ) as response: assert not response.is_closed @@ -423,6 +485,67 @@ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.with_raw_response.delete( + await async_client.vector_stores.with_raw_response.delete( "", ) + + @parametrize + async def test_method_search(self, async_client: AsyncOpenAI) -> None: + vector_store = await async_client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + ) + assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + @parametrize + async def test_method_search_with_all_params(self, async_client: AsyncOpenAI) -> None: + vector_store = await async_client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + filters={ + "key": "key", + "type": "eq", + "value": "string", + }, + max_num_results=1, + ranking_options={ + "ranker": "auto", + "score_threshold": 0, + }, + rewrite_query=True, + ) + assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + @parametrize + async def test_raw_response_search(self, async_client: AsyncOpenAI) -> None: + response = await async_client.vector_stores.with_raw_response.search( + vector_store_id="vs_abc123", + query="string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + @parametrize + async def test_streaming_response_search(self, async_client: AsyncOpenAI) -> None: + async with async_client.vector_stores.with_streaming_response.search( + vector_store_id="vs_abc123", + query="string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = await response.parse() + assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_search(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.with_raw_response.search( + vector_store_id="", + query="string", + ) diff --git a/tests/api_resources/vector_stores/__init__.py b/tests/api_resources/vector_stores/__init__.py new file mode 100644 index 0000000000..fd8019a9a1 --- /dev/null +++ b/tests/api_resources/vector_stores/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/beta/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py similarity index 81% rename from tests/api_resources/beta/vector_stores/test_file_batches.py rename to tests/api_resources/vector_stores/test_file_batches.py index 3281622695..0587cfc56a 100644 --- a/tests/api_resources/beta/vector_stores/test_file_batches.py +++ b/tests/api_resources/vector_stores/test_file_batches.py @@ -10,7 +10,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.vector_stores import ( +from openai.types.vector_stores import ( VectorStoreFile, VectorStoreFileBatch, ) @@ -23,7 +23,7 @@ class TestFileBatches: @parametrize def test_method_create(self, client: OpenAI) -> None: - file_batch = client.beta.vector_stores.file_batches.create( + file_batch = client.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["string"], ) @@ -31,16 +31,17 @@ def test_method_create(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: - file_batch = client.beta.vector_stores.file_batches.create( + file_batch = client.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["string"], + attributes={"foo": "string"}, chunking_strategy={"type": "auto"}, ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: - response = client.beta.vector_stores.file_batches.with_raw_response.create( + response = client.vector_stores.file_batches.with_raw_response.create( vector_store_id="vs_abc123", file_ids=["string"], ) @@ -52,7 +53,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: - with client.beta.vector_stores.file_batches.with_streaming_response.create( + with client.vector_stores.file_batches.with_streaming_response.create( vector_store_id="vs_abc123", file_ids=["string"], ) as response: @@ -67,14 +68,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.file_batches.with_raw_response.create( + client.vector_stores.file_batches.with_raw_response.create( vector_store_id="", file_ids=["string"], ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: - file_batch = client.beta.vector_stores.file_batches.retrieve( + file_batch = client.vector_stores.file_batches.retrieve( batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) @@ -82,7 +83,7 @@ def test_method_retrieve(self, client: OpenAI) -> None: @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: - response = client.beta.vector_stores.file_batches.with_raw_response.retrieve( + response = client.vector_stores.file_batches.with_raw_response.retrieve( batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) @@ -94,7 +95,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: - with client.beta.vector_stores.file_batches.with_streaming_response.retrieve( + with client.vector_stores.file_batches.with_streaming_response.retrieve( batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) as response: @@ -109,20 +110,20 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.file_batches.with_raw_response.retrieve( + client.vector_stores.file_batches.with_raw_response.retrieve( batch_id="vsfb_abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.beta.vector_stores.file_batches.with_raw_response.retrieve( + client.vector_stores.file_batches.with_raw_response.retrieve( batch_id="", vector_store_id="vs_abc123", ) @parametrize def test_method_cancel(self, client: OpenAI) -> None: - file_batch = client.beta.vector_stores.file_batches.cancel( + file_batch = client.vector_stores.file_batches.cancel( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -130,7 +131,7 @@ def test_method_cancel(self, client: OpenAI) -> None: @parametrize def test_raw_response_cancel(self, client: OpenAI) -> None: - response = client.beta.vector_stores.file_batches.with_raw_response.cancel( + response = client.vector_stores.file_batches.with_raw_response.cancel( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -142,7 +143,7 @@ def test_raw_response_cancel(self, client: OpenAI) -> None: @parametrize def test_streaming_response_cancel(self, client: OpenAI) -> None: - with client.beta.vector_stores.file_batches.with_streaming_response.cancel( + with client.vector_stores.file_batches.with_streaming_response.cancel( batch_id="batch_id", vector_store_id="vector_store_id", ) as response: @@ -157,20 +158,20 @@ def test_streaming_response_cancel(self, client: OpenAI) -> None: @parametrize def test_path_params_cancel(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.file_batches.with_raw_response.cancel( + client.vector_stores.file_batches.with_raw_response.cancel( batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.beta.vector_stores.file_batches.with_raw_response.cancel( + client.vector_stores.file_batches.with_raw_response.cancel( batch_id="", vector_store_id="vector_store_id", ) @parametrize def test_method_list_files(self, client: OpenAI) -> None: - file_batch = client.beta.vector_stores.file_batches.list_files( + file_batch = client.vector_stores.file_batches.list_files( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -178,7 +179,7 @@ def test_method_list_files(self, client: OpenAI) -> None: @parametrize def test_method_list_files_with_all_params(self, client: OpenAI) -> None: - file_batch = client.beta.vector_stores.file_batches.list_files( + file_batch = client.vector_stores.file_batches.list_files( batch_id="batch_id", vector_store_id="vector_store_id", after="after", @@ -191,7 +192,7 @@ def test_method_list_files_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list_files(self, client: OpenAI) -> None: - response = client.beta.vector_stores.file_batches.with_raw_response.list_files( + response = client.vector_stores.file_batches.with_raw_response.list_files( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -203,7 +204,7 @@ def test_raw_response_list_files(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list_files(self, client: OpenAI) -> None: - with client.beta.vector_stores.file_batches.with_streaming_response.list_files( + with client.vector_stores.file_batches.with_streaming_response.list_files( batch_id="batch_id", vector_store_id="vector_store_id", ) as response: @@ -218,13 +219,13 @@ def test_streaming_response_list_files(self, client: OpenAI) -> None: @parametrize def test_path_params_list_files(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.file_batches.with_raw_response.list_files( + client.vector_stores.file_batches.with_raw_response.list_files( batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.beta.vector_stores.file_batches.with_raw_response.list_files( + client.vector_stores.file_batches.with_raw_response.list_files( batch_id="", vector_store_id="vector_store_id", ) @@ -235,7 +236,7 @@ class TestAsyncFileBatches: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: - file_batch = await async_client.beta.vector_stores.file_batches.create( + file_batch = await async_client.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["string"], ) @@ -243,16 +244,17 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: - file_batch = await async_client.beta.vector_stores.file_batches.create( + file_batch = await async_client.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["string"], + attributes={"foo": "string"}, chunking_strategy={"type": "auto"}, ) assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.file_batches.with_raw_response.create( + response = await async_client.vector_stores.file_batches.with_raw_response.create( vector_store_id="vs_abc123", file_ids=["string"], ) @@ -264,7 +266,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.file_batches.with_streaming_response.create( + async with async_client.vector_stores.file_batches.with_streaming_response.create( vector_store_id="vs_abc123", file_ids=["string"], ) as response: @@ -279,14 +281,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.file_batches.with_raw_response.create( + await async_client.vector_stores.file_batches.with_raw_response.create( vector_store_id="", file_ids=["string"], ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: - file_batch = await async_client.beta.vector_stores.file_batches.retrieve( + file_batch = await async_client.vector_stores.file_batches.retrieve( batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) @@ -294,7 +296,7 @@ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve( + response = await async_client.vector_stores.file_batches.with_raw_response.retrieve( batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) @@ -306,7 +308,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.file_batches.with_streaming_response.retrieve( + async with async_client.vector_stores.file_batches.with_streaming_response.retrieve( batch_id="vsfb_abc123", vector_store_id="vs_abc123", ) as response: @@ -321,20 +323,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve( + await async_client.vector_stores.file_batches.with_raw_response.retrieve( batch_id="vsfb_abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve( + await async_client.vector_stores.file_batches.with_raw_response.retrieve( batch_id="", vector_store_id="vs_abc123", ) @parametrize async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: - file_batch = await async_client.beta.vector_stores.file_batches.cancel( + file_batch = await async_client.vector_stores.file_batches.cancel( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -342,7 +344,7 @@ async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.file_batches.with_raw_response.cancel( + response = await async_client.vector_stores.file_batches.with_raw_response.cancel( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -354,7 +356,7 @@ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.file_batches.with_streaming_response.cancel( + async with async_client.vector_stores.file_batches.with_streaming_response.cancel( batch_id="batch_id", vector_store_id="vector_store_id", ) as response: @@ -369,20 +371,20 @@ async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.file_batches.with_raw_response.cancel( + await async_client.vector_stores.file_batches.with_raw_response.cancel( batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.beta.vector_stores.file_batches.with_raw_response.cancel( + await async_client.vector_stores.file_batches.with_raw_response.cancel( batch_id="", vector_store_id="vector_store_id", ) @parametrize async def test_method_list_files(self, async_client: AsyncOpenAI) -> None: - file_batch = await async_client.beta.vector_stores.file_batches.list_files( + file_batch = await async_client.vector_stores.file_batches.list_files( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -390,7 +392,7 @@ async def test_method_list_files(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_list_files_with_all_params(self, async_client: AsyncOpenAI) -> None: - file_batch = await async_client.beta.vector_stores.file_batches.list_files( + file_batch = await async_client.vector_stores.file_batches.list_files( batch_id="batch_id", vector_store_id="vector_store_id", after="after", @@ -403,7 +405,7 @@ async def test_method_list_files_with_all_params(self, async_client: AsyncOpenAI @parametrize async def test_raw_response_list_files(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.file_batches.with_raw_response.list_files( + response = await async_client.vector_stores.file_batches.with_raw_response.list_files( batch_id="batch_id", vector_store_id="vector_store_id", ) @@ -415,7 +417,7 @@ async def test_raw_response_list_files(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list_files(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.file_batches.with_streaming_response.list_files( + async with async_client.vector_stores.file_batches.with_streaming_response.list_files( batch_id="batch_id", vector_store_id="vector_store_id", ) as response: @@ -430,13 +432,13 @@ async def test_streaming_response_list_files(self, async_client: AsyncOpenAI) -> @parametrize async def test_path_params_list_files(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.file_batches.with_raw_response.list_files( + await async_client.vector_stores.file_batches.with_raw_response.list_files( batch_id="batch_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.beta.vector_stores.file_batches.with_raw_response.list_files( + await async_client.vector_stores.file_batches.with_raw_response.list_files( batch_id="", vector_store_id="vector_store_id", ) diff --git a/tests/api_resources/beta/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py similarity index 55% rename from tests/api_resources/beta/vector_stores/test_files.py rename to tests/api_resources/vector_stores/test_files.py index 29fc28f39d..c13442261e 100644 --- a/tests/api_resources/beta/vector_stores/test_files.py +++ b/tests/api_resources/vector_stores/test_files.py @@ -9,9 +9,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.vector_stores import ( +from openai.pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage +from openai.types.vector_stores import ( VectorStoreFile, + FileContentResponse, VectorStoreFileDeleted, ) @@ -23,7 +24,7 @@ class TestFiles: @parametrize def test_method_create(self, client: OpenAI) -> None: - file = client.beta.vector_stores.files.create( + file = client.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file_id", ) @@ -31,16 +32,17 @@ def test_method_create(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: - file = client.beta.vector_stores.files.create( + file = client.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file_id", + attributes={"foo": "string"}, chunking_strategy={"type": "auto"}, ) assert_matches_type(VectorStoreFile, file, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: - response = client.beta.vector_stores.files.with_raw_response.create( + response = client.vector_stores.files.with_raw_response.create( vector_store_id="vs_abc123", file_id="file_id", ) @@ -52,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: - with client.beta.vector_stores.files.with_streaming_response.create( + with client.vector_stores.files.with_streaming_response.create( vector_store_id="vs_abc123", file_id="file_id", ) as response: @@ -67,14 +69,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None: @parametrize def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.files.with_raw_response.create( + client.vector_stores.files.with_raw_response.create( vector_store_id="", file_id="file_id", ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: - file = client.beta.vector_stores.files.retrieve( + file = client.vector_stores.files.retrieve( file_id="file-abc123", vector_store_id="vs_abc123", ) @@ -82,7 +84,7 @@ def test_method_retrieve(self, client: OpenAI) -> None: @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: - response = client.beta.vector_stores.files.with_raw_response.retrieve( + response = client.vector_stores.files.with_raw_response.retrieve( file_id="file-abc123", vector_store_id="vs_abc123", ) @@ -94,7 +96,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: - with client.beta.vector_stores.files.with_streaming_response.retrieve( + with client.vector_stores.files.with_streaming_response.retrieve( file_id="file-abc123", vector_store_id="vs_abc123", ) as response: @@ -109,27 +111,80 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.files.with_raw_response.retrieve( + client.vector_stores.files.with_raw_response.retrieve( file_id="file-abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.beta.vector_stores.files.with_raw_response.retrieve( + client.vector_stores.files.with_raw_response.retrieve( file_id="", vector_store_id="vs_abc123", ) + @parametrize + def test_method_update(self, client: OpenAI) -> None: + file = client.vector_stores.files.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + def test_raw_response_update(self, client: OpenAI) -> None: + response = client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + def test_streaming_response_update(self, client: OpenAI) -> None: + with client.vector_stores.files.with_streaming_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_update(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="", + attributes={"foo": "string"}, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.update( + file_id="", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + @parametrize def test_method_list(self, client: OpenAI) -> None: - file = client.beta.vector_stores.files.list( + file = client.vector_stores.files.list( vector_store_id="vector_store_id", ) assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: - file = client.beta.vector_stores.files.list( + file = client.vector_stores.files.list( vector_store_id="vector_store_id", after="after", before="before", @@ -141,7 +196,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_list(self, client: OpenAI) -> None: - response = client.beta.vector_stores.files.with_raw_response.list( + response = client.vector_stores.files.with_raw_response.list( vector_store_id="vector_store_id", ) @@ -152,7 +207,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: - with client.beta.vector_stores.files.with_streaming_response.list( + with client.vector_stores.files.with_streaming_response.list( vector_store_id="vector_store_id", ) as response: assert not response.is_closed @@ -166,13 +221,13 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.files.with_raw_response.list( + client.vector_stores.files.with_raw_response.list( vector_store_id="", ) @parametrize def test_method_delete(self, client: OpenAI) -> None: - file = client.beta.vector_stores.files.delete( + file = client.vector_stores.files.delete( file_id="file_id", vector_store_id="vector_store_id", ) @@ -180,7 +235,7 @@ def test_method_delete(self, client: OpenAI) -> None: @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: - response = client.beta.vector_stores.files.with_raw_response.delete( + response = client.vector_stores.files.with_raw_response.delete( file_id="file_id", vector_store_id="vector_store_id", ) @@ -192,7 +247,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: - with client.beta.vector_stores.files.with_streaming_response.delete( + with client.vector_stores.files.with_streaming_response.delete( file_id="file_id", vector_store_id="vector_store_id", ) as response: @@ -207,24 +262,72 @@ def test_streaming_response_delete(self, client: OpenAI) -> None: @parametrize def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.beta.vector_stores.files.with_raw_response.delete( + client.vector_stores.files.with_raw_response.delete( file_id="file_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.beta.vector_stores.files.with_raw_response.delete( + client.vector_stores.files.with_raw_response.delete( file_id="", vector_store_id="vector_store_id", ) + @parametrize + def test_method_content(self, client: OpenAI) -> None: + file = client.vector_stores.files.content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(SyncPage[FileContentResponse], file, path=["response"]) + + @parametrize + def test_raw_response_content(self, client: OpenAI) -> None: + response = client.vector_stores.files.with_raw_response.content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(SyncPage[FileContentResponse], file, path=["response"]) + + @parametrize + def test_streaming_response_content(self, client: OpenAI) -> None: + with client.vector_stores.files.with_streaming_response.content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(SyncPage[FileContentResponse], file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_content(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.content( + file_id="file-abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.content( + file_id="", + vector_store_id="vs_abc123", + ) + class TestAsyncFiles: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: - file = await async_client.beta.vector_stores.files.create( + file = await async_client.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file_id", ) @@ -232,16 +335,17 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: - file = await async_client.beta.vector_stores.files.create( + file = await async_client.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file_id", + attributes={"foo": "string"}, chunking_strategy={"type": "auto"}, ) assert_matches_type(VectorStoreFile, file, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.files.with_raw_response.create( + response = await async_client.vector_stores.files.with_raw_response.create( vector_store_id="vs_abc123", file_id="file_id", ) @@ -253,7 +357,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.files.with_streaming_response.create( + async with async_client.vector_stores.files.with_streaming_response.create( vector_store_id="vs_abc123", file_id="file_id", ) as response: @@ -268,14 +372,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.files.with_raw_response.create( + await async_client.vector_stores.files.with_raw_response.create( vector_store_id="", file_id="file_id", ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: - file = await async_client.beta.vector_stores.files.retrieve( + file = await async_client.vector_stores.files.retrieve( file_id="file-abc123", vector_store_id="vs_abc123", ) @@ -283,7 +387,7 @@ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.files.with_raw_response.retrieve( + response = await async_client.vector_stores.files.with_raw_response.retrieve( file_id="file-abc123", vector_store_id="vs_abc123", ) @@ -295,7 +399,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.files.with_streaming_response.retrieve( + async with async_client.vector_stores.files.with_streaming_response.retrieve( file_id="file-abc123", vector_store_id="vs_abc123", ) as response: @@ -310,27 +414,80 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.files.with_raw_response.retrieve( + await async_client.vector_stores.files.with_raw_response.retrieve( file_id="file-abc123", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.beta.vector_stores.files.with_raw_response.retrieve( + await async_client.vector_stores.files.with_raw_response.retrieve( file_id="", vector_store_id="vs_abc123", ) + @parametrize + async def test_method_update(self, async_client: AsyncOpenAI) -> None: + file = await async_client.vector_stores.files.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: + response = await async_client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: + async with async_client.vector_stores.files.with_streaming_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="", + attributes={"foo": "string"}, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.update( + file_id="", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: - file = await async_client.beta.vector_stores.files.list( + file = await async_client.vector_stores.files.list( vector_store_id="vector_store_id", ) assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: - file = await async_client.beta.vector_stores.files.list( + file = await async_client.vector_stores.files.list( vector_store_id="vector_store_id", after="after", before="before", @@ -342,7 +499,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.files.with_raw_response.list( + response = await async_client.vector_stores.files.with_raw_response.list( vector_store_id="vector_store_id", ) @@ -353,7 +510,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.files.with_streaming_response.list( + async with async_client.vector_stores.files.with_streaming_response.list( vector_store_id="vector_store_id", ) as response: assert not response.is_closed @@ -367,13 +524,13 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.files.with_raw_response.list( + await async_client.vector_stores.files.with_raw_response.list( vector_store_id="", ) @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: - file = await async_client.beta.vector_stores.files.delete( + file = await async_client.vector_stores.files.delete( file_id="file_id", vector_store_id="vector_store_id", ) @@ -381,7 +538,7 @@ async def test_method_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.vector_stores.files.with_raw_response.delete( + response = await async_client.vector_stores.files.with_raw_response.delete( file_id="file_id", vector_store_id="vector_store_id", ) @@ -393,7 +550,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.vector_stores.files.with_streaming_response.delete( + async with async_client.vector_stores.files.with_streaming_response.delete( file_id="file_id", vector_store_id="vector_store_id", ) as response: @@ -408,13 +565,61 @@ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> Non @parametrize async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.beta.vector_stores.files.with_raw_response.delete( + await async_client.vector_stores.files.with_raw_response.delete( file_id="file_id", vector_store_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.beta.vector_stores.files.with_raw_response.delete( + await async_client.vector_stores.files.with_raw_response.delete( file_id="", vector_store_id="vector_store_id", ) + + @parametrize + async def test_method_content(self, async_client: AsyncOpenAI) -> None: + file = await async_client.vector_stores.files.content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(AsyncPage[FileContentResponse], file, path=["response"]) + + @parametrize + async def test_raw_response_content(self, async_client: AsyncOpenAI) -> None: + response = await async_client.vector_stores.files.with_raw_response.content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(AsyncPage[FileContentResponse], file, path=["response"]) + + @parametrize + async def test_streaming_response_content(self, async_client: AsyncOpenAI) -> None: + async with async_client.vector_stores.files.with_streaming_response.content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(AsyncPage[FileContentResponse], file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_content(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.content( + file_id="file-abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.content( + file_id="", + vector_store_id="vs_abc123", + ) From 559af75aca2ae92c26769773dd17b7ffd76dab16 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 19:52:46 +0000 Subject: [PATCH 181/192] fix(responses): correct computer use enum value (#2180) --- .stats.yml | 2 +- src/openai/types/responses/computer_tool.py | 2 +- src/openai/types/responses/computer_tool_param.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 455874212c..9c4a2e5367 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-9ce5257763fb30c6e0e1ee2bef7e13baf661511e09572207e528d643da8e16b3.yml diff --git a/src/openai/types/responses/computer_tool.py b/src/openai/types/responses/computer_tool.py index f0499cd950..dffb7af7b7 100644 --- a/src/openai/types/responses/computer_tool.py +++ b/src/openai/types/responses/computer_tool.py @@ -17,5 +17,5 @@ class ComputerTool(BaseModel): environment: Literal["mac", "windows", "ubuntu", "browser"] """The type of computer environment to control.""" - type: Literal["computer-preview"] + type: Literal["computer_use_preview"] """The type of the computer use tool. Always `computer_use_preview`.""" diff --git a/src/openai/types/responses/computer_tool_param.py b/src/openai/types/responses/computer_tool_param.py index 685b471378..6b1072ffd2 100644 --- a/src/openai/types/responses/computer_tool_param.py +++ b/src/openai/types/responses/computer_tool_param.py @@ -17,5 +17,5 @@ class ComputerToolParam(TypedDict, total=False): environment: Required[Literal["mac", "windows", "ubuntu", "browser"]] """The type of computer environment to control.""" - type: Required[Literal["computer-preview"]] + type: Required[Literal["computer_use_preview"]] """The type of the computer use tool. Always `computer_use_preview`.""" From 32efcf34670a5aa79ed3cde4729ff636be267654 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 21:44:13 +0000 Subject: [PATCH 182/192] fix(responses): correct reasoning output type (#2181) --- .stats.yml | 2 +- api.md | 1 + src/openai/types/responses/__init__.py | 2 + .../responses/response_input_item_param.py | 33 +--------------- .../types/responses/response_input_param.py | 33 +--------------- .../types/responses/response_output_item.py | 39 +++---------------- .../responses/response_reasoning_item.py | 36 +++++++++++++++++ .../response_reasoning_item_param.py | 36 +++++++++++++++++ 8 files changed, 85 insertions(+), 97 deletions(-) create mode 100644 src/openai/types/responses/response_reasoning_item.py create mode 100644 src/openai/types/responses/response_reasoning_item_param.py diff --git a/.stats.yml b/.stats.yml index 9c4a2e5367..edc2aaf89f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-9ce5257763fb30c6e0e1ee2bef7e13baf661511e09572207e528d643da8e16b3.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml diff --git a/api.md b/api.md index 8a01ba7c5a..b148b0a085 100644 --- a/api.md +++ b/api.md @@ -624,6 +624,7 @@ from openai.types.responses import ( ResponseOutputMessage, ResponseOutputRefusal, ResponseOutputText, + ResponseReasoningItem, ResponseRefusalDeltaEvent, ResponseRefusalDoneEvent, ResponseStatus, diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index d0df31ed86..db7ecabfcf 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -37,6 +37,7 @@ from .response_input_content import ResponseInputContent as ResponseInputContent from .response_output_message import ResponseOutputMessage as ResponseOutputMessage from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal +from .response_reasoning_item import ResponseReasoningItem as ResponseReasoningItem from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent @@ -63,6 +64,7 @@ from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam +from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py index c9daaa6a89..32ac13cabb 100644 --- a/src/openai/types/responses/response_input_item_param.py +++ b/src/openai/types/responses/response_input_item_param.py @@ -7,6 +7,7 @@ from .easy_input_message_param import EasyInputMessageParam from .response_output_message_param import ResponseOutputMessageParam +from .response_reasoning_item_param import ResponseReasoningItemParam from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam @@ -20,8 +21,6 @@ "ComputerCallOutputOutput", "ComputerCallOutputAcknowledgedSafetyCheck", "FunctionCallOutput", - "Reasoning", - "ReasoningContent", "ItemReference", ] @@ -123,34 +122,6 @@ class FunctionCallOutput(TypedDict, total=False): """ -class ReasoningContent(TypedDict, total=False): - text: Required[str] - """ - A short summary of the reasoning used by the model when generating the response. - """ - - type: Required[Literal["reasoning_summary"]] - """The type of the object. Always `text`.""" - - -class Reasoning(TypedDict, total=False): - id: Required[str] - """The unique identifier of the reasoning content.""" - - content: Required[Iterable[ReasoningContent]] - """Reasoning text contents.""" - - type: Required[Literal["reasoning"]] - """The type of the object. Always `reasoning`.""" - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - class ItemReference(TypedDict, total=False): id: Required[str] """The ID of the item to reference.""" @@ -169,6 +140,6 @@ class ItemReference(TypedDict, total=False): ResponseFunctionWebSearchParam, ResponseFunctionToolCallParam, FunctionCallOutput, - Reasoning, + ResponseReasoningItemParam, ItemReference, ] diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py index c81308500d..b942f4868a 100644 --- a/src/openai/types/responses/response_input_param.py +++ b/src/openai/types/responses/response_input_param.py @@ -7,6 +7,7 @@ from .easy_input_message_param import EasyInputMessageParam from .response_output_message_param import ResponseOutputMessageParam +from .response_reasoning_item_param import ResponseReasoningItemParam from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam @@ -21,8 +22,6 @@ "ComputerCallOutputOutput", "ComputerCallOutputAcknowledgedSafetyCheck", "FunctionCallOutput", - "Reasoning", - "ReasoningContent", "ItemReference", ] @@ -124,34 +123,6 @@ class FunctionCallOutput(TypedDict, total=False): """ -class ReasoningContent(TypedDict, total=False): - text: Required[str] - """ - A short summary of the reasoning used by the model when generating the response. - """ - - type: Required[Literal["reasoning_summary"]] - """The type of the object. Always `text`.""" - - -class Reasoning(TypedDict, total=False): - id: Required[str] - """The unique identifier of the reasoning content.""" - - content: Required[Iterable[ReasoningContent]] - """Reasoning text contents.""" - - type: Required[Literal["reasoning"]] - """The type of the object. Always `reasoning`.""" - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - class ItemReference(TypedDict, total=False): id: Required[str] """The ID of the item to reference.""" @@ -170,7 +141,7 @@ class ItemReference(TypedDict, total=False): ResponseFunctionWebSearchParam, ResponseFunctionToolCallParam, FunctionCallOutput, - Reasoning, + ResponseReasoningItemParam, ItemReference, ] diff --git a/src/openai/types/responses/response_output_item.py b/src/openai/types/responses/response_output_item.py index 45d5cc0094..f1e9693195 100644 --- a/src/openai/types/responses/response_output_item.py +++ b/src/openai/types/responses/response_output_item.py @@ -1,46 +1,17 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias +from typing import Union +from typing_extensions import Annotated, TypeAlias from ..._utils import PropertyInfo -from ..._models import BaseModel from .response_output_message import ResponseOutputMessage +from .response_reasoning_item import ResponseReasoningItem from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch from .response_file_search_tool_call import ResponseFileSearchToolCall -__all__ = ["ResponseOutputItem", "Reasoning", "ReasoningContent"] - - -class ReasoningContent(BaseModel): - text: str - """ - A short summary of the reasoning used by the model when generating the response. - """ - - type: Literal["reasoning_summary"] - """The type of the object. Always `text`.""" - - -class Reasoning(BaseModel): - id: str - """The unique identifier of the reasoning content.""" - - content: List[ReasoningContent] - """Reasoning text contents.""" - - type: Literal["reasoning"] - """The type of the object. Always `reasoning`.""" - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - +__all__ = ["ResponseOutputItem"] ResponseOutputItem: TypeAlias = Annotated[ Union[ @@ -49,7 +20,7 @@ class Reasoning(BaseModel): ResponseFunctionToolCall, ResponseFunctionWebSearch, ResponseComputerToolCall, - Reasoning, + ResponseReasoningItem, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/response_reasoning_item.py b/src/openai/types/responses/response_reasoning_item.py new file mode 100644 index 0000000000..57e5fbfe6d --- /dev/null +++ b/src/openai/types/responses/response_reasoning_item.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningItem", "Summary"] + + +class Summary(BaseModel): + text: str + """ + A short summary of the reasoning used by the model when generating the response. + """ + + type: Literal["summary_text"] + """The type of the object. Always `summary_text`.""" + + +class ResponseReasoningItem(BaseModel): + id: str + """The unique identifier of the reasoning content.""" + + summary: List[Summary] + """Reasoning text contents.""" + + type: Literal["reasoning"] + """The type of the object. Always `reasoning`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/openai/types/responses/response_reasoning_item_param.py b/src/openai/types/responses/response_reasoning_item_param.py new file mode 100644 index 0000000000..adb49d6402 --- /dev/null +++ b/src/openai/types/responses/response_reasoning_item_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseReasoningItemParam", "Summary"] + + +class Summary(TypedDict, total=False): + text: Required[str] + """ + A short summary of the reasoning used by the model when generating the response. + """ + + type: Required[Literal["summary_text"]] + """The type of the object. Always `summary_text`.""" + + +class ResponseReasoningItemParam(TypedDict, total=False): + id: Required[str] + """The unique identifier of the reasoning content.""" + + summary: Required[Iterable[Summary]] + """Reasoning text contents.""" + + type: Required[Literal["reasoning"]] + """The type of the object. Always `reasoning`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ From a6371bada8d52c77823b0144aff284ea87abafe7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 12 Mar 2025 19:07:24 +0000 Subject: [PATCH 183/192] fix: update module level client (#2185) --- src/openai/__init__.py | 182 +++++++++++++++++++++++++++++++++++ src/openai/_module_client.py | 113 ++++++++++++++++++++++ tests/test_module_client.py | 89 +++++++++++++++++ 3 files changed, 384 insertions(+) create mode 100644 src/openai/_module_client.py create mode 100644 tests/test_module_client.py diff --git a/src/openai/__init__.py b/src/openai/__init__.py index 5df415547a..7bf7c5660a 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -1,5 +1,9 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from __future__ import annotations + +from typing_extensions import override + from . import types from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes from ._utils import file_from_path @@ -82,3 +86,181 @@ except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass + +# ------ Module level client ------ +import typing as _t + +import httpx as _httpx + +from ._base_client import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES + +api_key: str | None = None + +organization: str | None = None + +project: str | None = None + +base_url: str | _httpx.URL | None = None + +timeout: float | Timeout | None = DEFAULT_TIMEOUT + +max_retries: int = DEFAULT_MAX_RETRIES + +default_headers: _t.Mapping[str, str] | None = None + +default_query: _t.Mapping[str, object] | None = None + +http_client: _httpx.Client | None = None + + +class _ModuleClient(OpenAI): + # Note: we have to use type: ignores here as overriding class members + # with properties is technically unsafe but it is fine for our use case + + @property # type: ignore + @override + def api_key(self) -> str | None: + return api_key + + @api_key.setter # type: ignore + def api_key(self, value: str | None) -> None: # type: ignore + global api_key + + api_key = value + + @property # type: ignore + @override + def organization(self) -> str | None: + return organization + + @organization.setter # type: ignore + def organization(self, value: str | None) -> None: # type: ignore + global organization + + organization = value + + @property # type: ignore + @override + def project(self) -> str | None: + return project + + @project.setter # type: ignore + def project(self, value: str | None) -> None: # type: ignore + global project + + project = value + + @property + @override + def base_url(http://github.com/self) -> _httpx.URL: + if base_url is not None: + return _httpx.URL(base_url) + + return super().base_url + + @base_url.setter + def base_url(http://github.com/self,%20url:%20_httpx.URL%20|%20str) -> None: + super().base_url = url # type: ignore[misc] + + @property # type: ignore + @override + def timeout(self) -> float | Timeout | None: + return timeout + + @timeout.setter # type: ignore + def timeout(self, value: float | Timeout | None) -> None: # type: ignore + global timeout + + timeout = value + + @property # type: ignore + @override + def max_retries(self) -> int: + return max_retries + + @max_retries.setter # type: ignore + def max_retries(self, value: int) -> None: # type: ignore + global max_retries + + max_retries = value + + @property # type: ignore + @override + def _custom_headers(self) -> _t.Mapping[str, str] | None: + return default_headers + + @_custom_headers.setter # type: ignore + def _custom_headers(self, value: _t.Mapping[str, str] | None) -> None: # type: ignore + global default_headers + + default_headers = value + + @property # type: ignore + @override + def _custom_query(self) -> _t.Mapping[str, object] | None: + return default_query + + @_custom_query.setter # type: ignore + def _custom_query(self, value: _t.Mapping[str, object] | None) -> None: # type: ignore + global default_query + + default_query = value + + @property # type: ignore + @override + def _client(self) -> _httpx.Client: + return http_client or super()._client + + @_client.setter # type: ignore + def _client(self, value: _httpx.Client) -> None: # type: ignore + global http_client + + http_client = value + + +_client: OpenAI | None = None + + +def _load_client() -> OpenAI: # type: ignore[reportUnusedFunction] + global _client + + if _client is None: + _client = _ModuleClient( + api_key=api_key, + organization=organization, + project=project, + base_url=base_url, + timeout=timeout, + max_retries=max_retries, + default_headers=default_headers, + default_query=default_query, + http_client=http_client, + ) + return _client + + return _client + + +def _reset_client() -> None: # type: ignore[reportUnusedFunction] + global _client + + _client = None + + +from ._module_client import ( + beta as beta, + chat as chat, + audio as audio, + files as files, + client as client, + images as images, + models as models, + batches as batches, + uploads as uploads, + responses as responses, + embeddings as embeddings, + completions as completions, + fine_tuning as fine_tuning, + moderations as moderations, + vector_stores as vector_stores, +) diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py new file mode 100644 index 0000000000..45b8b9d853 --- /dev/null +++ b/src/openai/_module_client.py @@ -0,0 +1,113 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import override + +from . import resources, _load_client +from ._utils import LazyProxy + + +class ChatProxy(LazyProxy[resources.Chat]): + @override + def __load__(self) -> resources.Chat: + return _load_client().chat + + +class BetaProxy(LazyProxy[resources.Beta]): + @override + def __load__(self) -> resources.Beta: + return _load_client().beta + + +class FilesProxy(LazyProxy[resources.Files]): + @override + def __load__(self) -> resources.Files: + return _load_client().files + + +class AudioProxy(LazyProxy[resources.Audio]): + @override + def __load__(self) -> resources.Audio: + return _load_client().audio + + +class ImagesProxy(LazyProxy[resources.Images]): + @override + def __load__(self) -> resources.Images: + return _load_client().images + + +class ModelsProxy(LazyProxy[resources.Models]): + @override + def __load__(self) -> resources.Models: + return _load_client().models + + +class ClientProxy(LazyProxy[resources.OpenAI]): + @override + def __load__(self) -> resources.OpenAI: + return _load_client().client + + +class BatchesProxy(LazyProxy[resources.Batches]): + @override + def __load__(self) -> resources.Batches: + return _load_client().batches + + +class UploadsProxy(LazyProxy[resources.Uploads]): + @override + def __load__(self) -> resources.Uploads: + return _load_client().uploads + + +class ResponsesProxy(LazyProxy[resources.Responses]): + @override + def __load__(self) -> resources.Responses: + return _load_client().responses + + +class EmbeddingsProxy(LazyProxy[resources.Embeddings]): + @override + def __load__(self) -> resources.Embeddings: + return _load_client().embeddings + + +class CompletionsProxy(LazyProxy[resources.Completions]): + @override + def __load__(self) -> resources.Completions: + return _load_client().completions + + +class ModerationsProxy(LazyProxy[resources.Moderations]): + @override + def __load__(self) -> resources.Moderations: + return _load_client().moderations + + +class FineTuningProxy(LazyProxy[resources.FineTuning]): + @override + def __load__(self) -> resources.FineTuning: + return _load_client().fine_tuning + + +class VectorStoresProxy(LazyProxy[resources.VectorStores]): + @override + def __load__(self) -> resources.VectorStores: + return _load_client().vector_stores + + +chat: resources.Chat = ChatProxy().__as_proxied__() +beta: resources.Beta = BetaProxy().__as_proxied__() +files: resources.Files = FilesProxy().__as_proxied__() +audio: resources.Audio = AudioProxy().__as_proxied__() +images: resources.Images = ImagesProxy().__as_proxied__() +models: resources.Models = ModelsProxy().__as_proxied__() +client: resources.OpenAI = ClientProxy().__as_proxied__() +batches: resources.Batches = BatchesProxy().__as_proxied__() +uploads: resources.Uploads = UploadsProxy().__as_proxied__() +responses: resources.Responses = ResponsesProxy().__as_proxied__() +embeddings: resources.Embeddings = EmbeddingsProxy().__as_proxied__() +completions: resources.Completions = CompletionsProxy().__as_proxied__() +moderations: resources.Moderations = ModerationsProxy().__as_proxied__() +fine_tuning: resources.FineTuning = FineTuningProxy().__as_proxied__() +vector_stores: resources.VectorStores = VectorStoresProxy().__as_proxied__() diff --git a/tests/test_module_client.py b/tests/test_module_client.py new file mode 100644 index 0000000000..5dc474e02d --- /dev/null +++ b/tests/test_module_client.py @@ -0,0 +1,89 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx +import pytest +from httpx import URL + +import openai +from openai import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES + + +def reset_state() -> None: + openai._reset_client() + openai.api_key = None or "My API Key" + openai.organization = None + openai.project = None + openai.base_url = None + openai.timeout = DEFAULT_TIMEOUT + openai.max_retries = DEFAULT_MAX_RETRIES + openai.default_headers = None + openai.default_query = None + openai.http_client = None + + +@pytest.fixture(autouse=True) +def reset_state_fixture() -> None: + reset_state() + + +def test_base_url_option() -> None: + assert openai.base_url is None + assert openai.completions._client.base_url == URL("https://api.openai.com/v1/") + + openai.base_url = "http://foo.com/" + + assert openai.base_url == URL("http://foo.com/") + assert openai.completions._client.base_url == URL("http://foo.com/") + + +def test_timeout_option() -> None: + assert openai.timeout == openai.DEFAULT_TIMEOUT + assert openai.completions._client.timeout == openai.DEFAULT_TIMEOUT + + openai.timeout = 3 + + assert openai.timeout == 3 + assert openai.completions._client.timeout == 3 + + +def test_max_retries_option() -> None: + assert openai.max_retries == openai.DEFAULT_MAX_RETRIES + assert openai.completions._client.max_retries == openai.DEFAULT_MAX_RETRIES + + openai.max_retries = 1 + + assert openai.max_retries == 1 + assert openai.completions._client.max_retries == 1 + + +def test_default_headers_option() -> None: + assert openai.default_headers == None + + openai.default_headers = {"Foo": "Bar"} + + assert openai.default_headers["Foo"] == "Bar" + assert openai.completions._client.default_headers["Foo"] == "Bar" + + +def test_default_query_option() -> None: + assert openai.default_query is None + assert openai.completions._client._custom_query == {} + + openai.default_query = {"Foo": {"nested": 1}} + + assert openai.default_query["Foo"] == {"nested": 1} + assert openai.completions._client._custom_query["Foo"] == {"nested": 1} + + +def test_http_client_option() -> None: + assert openai.http_client is None + + original_http_client = openai.completions._client._client + assert original_http_client is not None + + new_client = httpx.Client() + openai.http_client = new_client + + assert openai.completions._client._client is new_client From 4e302b3acbec8178a71b82e6fc3343f7c61d2f37 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 12 Mar 2025 20:22:32 +0000 Subject: [PATCH 184/192] chore: fix module client (#2188) --- src/openai/__init__.py | 1 - src/openai/_module_client.py | 7 ------- 2 files changed, 8 deletions(-) diff --git a/src/openai/__init__.py b/src/openai/__init__.py index 7bf7c5660a..1107973aed 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -252,7 +252,6 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction] chat as chat, audio as audio, files as files, - client as client, images as images, models as models, batches as batches, diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py index 45b8b9d853..e7d2657860 100644 --- a/src/openai/_module_client.py +++ b/src/openai/_module_client.py @@ -42,12 +42,6 @@ def __load__(self) -> resources.Models: return _load_client().models -class ClientProxy(LazyProxy[resources.OpenAI]): - @override - def __load__(self) -> resources.OpenAI: - return _load_client().client - - class BatchesProxy(LazyProxy[resources.Batches]): @override def __load__(self) -> resources.Batches: @@ -102,7 +96,6 @@ def __load__(self) -> resources.VectorStores: audio: resources.Audio = AudioProxy().__as_proxied__() images: resources.Images = ImagesProxy().__as_proxied__() models: resources.Models = ModelsProxy().__as_proxied__() -client: resources.OpenAI = ClientProxy().__as_proxied__() batches: resources.Batches = BatchesProxy().__as_proxied__() uploads: resources.Uploads = UploadsProxy().__as_proxied__() responses: resources.Responses = ResponsesProxy().__as_proxied__() From 899a10f017408343aefda4a6d8c587591d775180 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 16:48:23 +0000 Subject: [PATCH 185/192] chore(internal): remove extra empty newlines (#2195) --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5f2176755b..13d703bc96 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -153,7 +153,6 @@ reportImplicitOverride = true reportImportCycles = false reportPrivateUsage = false - [tool.ruff] line-length = 120 output-format = "grouped" From a7591e89c56d65bd26cf0ec9194a67815b91bcd7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 15:21:04 +0000 Subject: [PATCH 186/192] chore(internal): bump rye to 0.44.0 (#2200) --- .devcontainer/Dockerfile | 2 +- .github/workflows/ci.yml | 6 +++--- .github/workflows/create-releases.yml | 2 +- .github/workflows/publish-pypi.yml | 2 +- requirements-dev.lock | 1 + requirements.lock | 1 + 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 55d20255c9..ff261bad78 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} USER vscode -RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.35.0" RYE_INSTALL_OPTION="--yes" bash +RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.44.0" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH RUN echo "[[ -d .venv ]] && source .venv/bin/activate || export PATH=\$PATH" >> /home/vscode/.bashrc diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 25b0c0286d..e1ae39d559 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: '0.35.0' + RYE_VERSION: '0.44.0' RYE_INSTALL_OPTION: '--yes' - name: Install dependencies @@ -44,7 +44,7 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: '0.35.0' + RYE_VERSION: '0.44.0' RYE_INSTALL_OPTION: '--yes' - name: Bootstrap @@ -65,7 +65,7 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: '0.35.0' + RYE_VERSION: '0.44.0' RYE_INSTALL_OPTION: '--yes' - name: Install dependencies run: | diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index 2a97049033..b3e1c679d4 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -28,7 +28,7 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: '0.35.0' + RYE_VERSION: '0.44.0' RYE_INSTALL_OPTION: '--yes' - name: Publish to PyPI diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 44027a3c4c..7096ca9832 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -17,7 +17,7 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: '0.35.0' + RYE_VERSION: '0.44.0' RYE_INSTALL_OPTION: '--yes' - name: Publish to PyPI diff --git a/requirements-dev.lock b/requirements-dev.lock index 3e56c5090e..be626d274b 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -7,6 +7,7 @@ # all-features: true # with-sources: false # generate-hashes: false +# universal: false -e file:. annotated-types==0.6.0 diff --git a/requirements.lock b/requirements.lock index 749d24f2cd..c704fd8abd 100644 --- a/requirements.lock +++ b/requirements.lock @@ -7,6 +7,7 @@ # all-features: true # with-sources: false # generate-hashes: false +# universal: false -e file:. annotated-types==0.6.0 From f3f5279b0f543e41027319678db64d96e817099a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 19:21:28 +0000 Subject: [PATCH 187/192] chore(internal): remove CI condition (#2203) --- .github/workflows/ci.yml | 2 -- .github/workflows/create-releases.yml | 39 --------------------------- .github/workflows/publish-pypi.yml | 8 ++++-- .github/workflows/release-doctor.yml | 1 - .release-please-manifest.json | 2 +- .stats.yml | 2 +- bin/check-release-environment | 4 --- pyproject.toml | 2 +- src/openai/_version.py | 2 +- 9 files changed, 10 insertions(+), 52 deletions(-) delete mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e1ae39d559..d86fc0ea53 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,6 @@ jobs: lint: name: lint runs-on: ubuntu-latest - if: github.repository == 'openai/openai-python' steps: - uses: actions/checkout@v4 @@ -34,7 +33,6 @@ jobs: test: name: test runs-on: ubuntu-latest - if: github.repository == 'openai/openai-python' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index b3e1c679d4..0000000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - main - -jobs: - release: - name: release - if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-python' - runs-on: ubuntu-latest - environment: publish - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Install Rye - if: ${{ steps.release.outputs.releases_created }} - run: | - curl -sSf https://rye.astral.sh/get | bash - echo "$HOME/.rye/shims" >> $GITHUB_PATH - env: - RYE_VERSION: '0.44.0' - RYE_INSTALL_OPTION: '--yes' - - - name: Publish to PyPI - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-pypi - env: - PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 7096ca9832..403b895b7e 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to PyPI in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index e078964a6f..445f626d93 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -19,5 +19,4 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1f79fd2d11..6d3d57b7ab 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.30.1" + ".": "1.66.3" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index edc2aaf89f..53c73037d5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml diff --git a/bin/check-release-environment b/bin/check-release-environment index 2cc5ad6352..5471b69edb 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${PYPI_TOKEN}" ]; then errors+=("The OPENAI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi diff --git a/pyproject.toml b/pyproject.toml index 13d703bc96..0341fadab7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.30.1" +version = "1.66.3" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 83411041ae..6c4a192efc 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.30.1" # x-release-please-version +__version__ = "1.66.3" # x-release-please-version From e4d424dc1590da55ee91de7122a2b5b3e61e9adf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 22:11:11 +0000 Subject: [PATCH 188/192] fix(types): handle more discriminated union shapes (#2206) --- src/openai/_models.py | 7 +++++-- tests/test_models.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index c4401ff868..b51a1bf5f9 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -65,7 +65,7 @@ from ._constants import RAW_RESPONSE_HEADER if TYPE_CHECKING: - from pydantic_core.core_schema import ModelField, LiteralSchema, ModelFieldsSchema + from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema __all__ = ["BaseModel", "GenericModel"] @@ -646,15 +646,18 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None: schema = model.__pydantic_core_schema__ + if schema["type"] == "definitions": + schema = schema["schema"] + if schema["type"] != "model": return None + schema = cast("ModelSchema", schema) fields_schema = schema["schema"] if fields_schema["type"] != "model-fields": return None fields_schema = cast("ModelFieldsSchema", fields_schema) - field = fields_schema["fields"].get(field_name) if not field: return None diff --git a/tests/test_models.py b/tests/test_models.py index 30b17e3ac0..b9be1f3ea3 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -854,3 +854,35 @@ class Model(BaseModel): m = construct_type(value={"cls": "foo"}, type_=Model) assert isinstance(m, Model) assert isinstance(m.cls, str) + + +def test_discriminated_union_case() -> None: + class A(BaseModel): + type: Literal["a"] + + data: bool + + class B(BaseModel): + type: Literal["b"] + + data: List[Union[A, object]] + + class ModelA(BaseModel): + type: Literal["modelA"] + + data: int + + class ModelB(BaseModel): + type: Literal["modelB"] + + required: str + + data: Union[A, B] + + # when constructing ModelA | ModelB, value data doesn't match ModelB exactly - missing `required` + m = construct_type( + value={"type": "modelB", "data": {"type": "a", "data": True}}, + type_=cast(Any, Annotated[Union[ModelA, ModelB], PropertyInfo(discriminator="type")]), + ) + + assert isinstance(m, ModelB) From 3fb740a33a454f386ee1a13fa2b43097f10a288c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:04:17 +0000 Subject: [PATCH 189/192] fix(ci): ensure pip is always available (#2207) --- bin/publish-pypi | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/publish-pypi b/bin/publish-pypi index 05bfccbb71..ebebf91657 100644 --- a/bin/publish-pypi +++ b/bin/publish-pypi @@ -5,5 +5,6 @@ mkdir -p dist rye build --clean # Patching importlib-metadata version until upstream library version is updated # https://github.com/pypa/twine/issues/977#issuecomment-2189800841 +"$HOME/.rye/self/bin/python3" -m ensurepip "$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1' rye publish --yes --token=$PYPI_TOKEN From b0ae776665ffc5d941ed7fc7418b2560bf8c78aa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 15:52:16 +0000 Subject: [PATCH 190/192] fix(ci): remove publishing patch (#2208) --- bin/publish-pypi | 4 ---- pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/bin/publish-pypi b/bin/publish-pypi index ebebf91657..826054e924 100644 --- a/bin/publish-pypi +++ b/bin/publish-pypi @@ -3,8 +3,4 @@ set -eux mkdir -p dist rye build --clean -# Patching importlib-metadata version until upstream library version is updated -# https://github.com/pypa/twine/issues/977#issuecomment-2189800841 -"$HOME/.rye/self/bin/python3" -m ensurepip -"$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1' rye publish --yes --token=$PYPI_TOKEN diff --git a/pyproject.toml b/pyproject.toml index 0341fadab7..93a4f69782 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -88,7 +88,7 @@ typecheck = { chain = [ "typecheck:mypy" = "mypy ." [build-system] -requires = ["hatchling", "hatch-fancy-pypi-readme"] +requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"] build-backend = "hatchling.build" [tool.hatch.build] From 810c32fe5d3c3b5257c346d696967cb8b0d6e59c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 16:51:14 +0000 Subject: [PATCH 191/192] chore(internal): version bump (#2210) --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/openai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6d3d57b7ab..dac37ce406 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.66.3" + ".": "1.66.4" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 93a4f69782..585db285c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.66.3" +version = "1.66.4" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 6c4a192efc..df2f60a7dc 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.66.3" # x-release-please-version +__version__ = "1.66.4" # x-release-please-version From 168200104d203c5b30101fc55c7d641ad9a55805 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:53:53 +0000 Subject: [PATCH 192/192] fix(api): correct some Responses types --- .stats.yml | 2 +- src/openai/resources/batches.py | 16 ++++++++-------- src/openai/types/batch_create_params.py | 9 +++++---- src/openai/types/chat/chat_completion_chunk.py | 7 +++++-- .../chat/chat_completion_content_part_param.py | 2 +- .../chat/chat_completion_stream_options_param.py | 7 +++++-- .../responses/response_function_tool_call.py | 6 +++--- .../response_function_tool_call_param.py | 6 +++--- src/openai/types/responses/response_usage.py | 13 ++++++++++++- src/openai/types/shared/reasoning.py | 2 +- src/openai/types/shared_params/reasoning.py | 6 +++--- tests/api_resources/test_batches.py | 16 ++++++++-------- 12 files changed, 55 insertions(+), 37 deletions(-) diff --git a/.stats.yml b/.stats.yml index 53c73037d5..1e04d7c268 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 7e7ec19ec2..b7a299be12 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -49,7 +49,7 @@ def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -67,9 +67,9 @@ def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. @@ -259,7 +259,7 @@ async def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -277,9 +277,9 @@ async def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index e5be1d2bac..cc95afd3ba 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -17,12 +17,13 @@ class BatchCreateParams(TypedDict, total=False): Currently only `24h` is supported. """ - endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] + endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] """The endpoint to be used for all requests in the batch. - Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are - supported. Note that `/v1/embeddings` batches are also restricted to a maximum - of 50,000 embedding inputs across all requests in the batch. + Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and + `/v1/completions` are supported. Note that `/v1/embeddings` batches are also + restricted to a maximum of 50,000 embedding inputs across all requests in the + batch. """ input_file_id: Required[str] diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index dede513f1e..31b9cb5456 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -142,6 +142,9 @@ class ChatCompletionChunk(BaseModel): """ An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. When present, it - contains a null value except for the last chunk which contains the token usage - statistics for the entire request. + contains a null value **except for the last chunk** which contains the token + usage statistics for the entire request. + + **NOTE:** If the stream is interrupted or cancelled, you may not receive the + final usage chunk which contains the total token usage for the request. """ diff --git a/src/openai/types/chat/chat_completion_content_part_param.py b/src/openai/types/chat/chat_completion_content_part_param.py index 1293c54312..cbedc853ba 100644 --- a/src/openai/types/chat/chat_completion_content_part_param.py +++ b/src/openai/types/chat/chat_completion_content_part_param.py @@ -22,7 +22,7 @@ class FileFile(TypedDict, total=False): file_id: str """The ID of an uploaded file to use as input.""" - file_name: str + filename: str """The name of the file, used when passing the file to the model as a string.""" diff --git a/src/openai/types/chat/chat_completion_stream_options_param.py b/src/openai/types/chat/chat_completion_stream_options_param.py index fbf7291821..471e0eba98 100644 --- a/src/openai/types/chat/chat_completion_stream_options_param.py +++ b/src/openai/types/chat/chat_completion_stream_options_param.py @@ -12,6 +12,9 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False): """If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire - request, and the `choices` field will always be an empty array. All other chunks - will also include a `usage` field, but with a null value. + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. """ diff --git a/src/openai/types/responses/response_function_tool_call.py b/src/openai/types/responses/response_function_tool_call.py index 5d82906cb7..2a8482204e 100644 --- a/src/openai/types/responses/response_function_tool_call.py +++ b/src/openai/types/responses/response_function_tool_call.py @@ -9,9 +9,6 @@ class ResponseFunctionToolCall(BaseModel): - id: str - """The unique ID of the function tool call.""" - arguments: str """A JSON string of the arguments to pass to the function.""" @@ -24,6 +21,9 @@ class ResponseFunctionToolCall(BaseModel): type: Literal["function_call"] """The type of the function tool call. Always `function_call`.""" + id: Optional[str] = None + """The unique ID of the function tool call.""" + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None """The status of the item. diff --git a/src/openai/types/responses/response_function_tool_call_param.py b/src/openai/types/responses/response_function_tool_call_param.py index 51b947a764..eaa263cf67 100644 --- a/src/openai/types/responses/response_function_tool_call_param.py +++ b/src/openai/types/responses/response_function_tool_call_param.py @@ -8,9 +8,6 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): - id: Required[str] - """The unique ID of the function tool call.""" - arguments: Required[str] """A JSON string of the arguments to pass to the function.""" @@ -23,6 +20,9 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): type: Required[Literal["function_call"]] """The type of the function tool call. Always `function_call`.""" + id: str + """The unique ID of the function tool call.""" + status: Literal["in_progress", "completed", "incomplete"] """The status of the item. diff --git a/src/openai/types/responses/response_usage.py b/src/openai/types/responses/response_usage.py index ef631c5882..9ad36bd326 100644 --- a/src/openai/types/responses/response_usage.py +++ b/src/openai/types/responses/response_usage.py @@ -3,7 +3,15 @@ from ..._models import BaseModel -__all__ = ["ResponseUsage", "OutputTokensDetails"] +__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"] + + +class InputTokensDetails(BaseModel): + cached_tokens: int + """The number of tokens that were retrieved from the cache. + + [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + """ class OutputTokensDetails(BaseModel): @@ -15,6 +23,9 @@ class ResponseUsage(BaseModel): input_tokens: int """The number of input tokens.""" + input_tokens_details: InputTokensDetails + """A detailed breakdown of the input tokens.""" + output_tokens: int """The number of output tokens.""" diff --git a/src/openai/types/shared/reasoning.py b/src/openai/types/shared/reasoning.py index 50821a1727..78a396d738 100644 --- a/src/openai/types/shared/reasoning.py +++ b/src/openai/types/shared/reasoning.py @@ -20,7 +20,7 @@ class Reasoning(BaseModel): """ generate_summary: Optional[Literal["concise", "detailed"]] = None - """**o-series models only** + """**computer_use_preview only** A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or diff --git a/src/openai/types/shared_params/reasoning.py b/src/openai/types/shared_params/reasoning.py index f2b5c5963a..2953b895c4 100644 --- a/src/openai/types/shared_params/reasoning.py +++ b/src/openai/types/shared_params/reasoning.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, TypedDict from ..shared.reasoning_effort import ReasoningEffort @@ -11,7 +11,7 @@ class Reasoning(TypedDict, total=False): - effort: Required[Optional[ReasoningEffort]] + effort: Optional[ReasoningEffort] """**o-series models only** Constrains effort on reasoning for @@ -21,7 +21,7 @@ class Reasoning(TypedDict, total=False): """ generate_summary: Optional[Literal["concise", "detailed"]] - """**o-series models only** + """**computer_use_preview only** A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index 047b8bae12..02eade0963 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -22,7 +22,7 @@ class TestBatches: def test_method_create(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) assert_matches_type(Batch, batch, path=["response"]) @@ -31,7 +31,7 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", metadata={"foo": "string"}, ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: def test_raw_response_create(self, client: OpenAI) -> None: response = client.batches.with_raw_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: def test_streaming_response_create(self, client: OpenAI) -> None: with client.batches.with_streaming_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) as response: assert not response.is_closed @@ -182,7 +182,7 @@ class TestAsyncBatches: async def test_method_create(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) assert_matches_type(Batch, batch, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", metadata={"foo": "string"}, ) @@ -201,7 +201,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.batches.with_raw_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) @@ -214,7 +214,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.batches.with_streaming_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) as response: assert not response.is_closed