diff --git a/.fernignore b/.fernignore
index 1fc3a34a..a222f8ab 100644
--- a/.fernignore
+++ b/.fernignore
@@ -1,13 +1,23 @@
# Specify files that shouldn't be modified by Fern
+# Ignore manually created SDK wrappers
src/elevenlabs/client.py
+src/elevenlabs/conversational_ai/conversation.py
+src/elevenlabs/conversational_ai/default_audio_interface.py
src/elevenlabs/play.py
src/elevenlabs/realtime_tts.py
-.github/workflows/ci.yml
-.github/workflows/tests.yml
+# Ignore CI files
+.github/
README.md
assets/
+# Ignore custom tests
tests/
+
+
+
+
+
+
diff --git a/.github/ISSUE_TEMPLATE/1-bug_report.yml b/.github/ISSUE_TEMPLATE/1-bug_report.yml
new file mode 100644
index 00000000..2f879fc0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/1-bug_report.yml
@@ -0,0 +1,51 @@
+name: Bug report
+description: Create a bug report for the ElevenLabs Python SDK
+labels: [bug]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thank you for reporting a bug with the **ElevenLabs Python SDK**. Please fill out the sections below to help us understand and resolve the issue.
+
+ **Note:** The ElevenLabs Python SDK is **auto-generated from [Fern](https://www.buildwithfern.com/)** and is a wrapper around our OpenAPI specification. Direct modifications to the SDK code may be overwritten in future releases.
+
+ - type: textarea
+ attributes:
+ label: Description
+ description: |
+ **Describe the bug in detail and provide clear steps to reproduce it.**
+
+ Include information such as:
+ - What you were trying to achieve.
+ - What happened instead.
+ - Any relevant parameters or configurations.
+ placeholder: |
+ **Steps to reproduce:**
+ 1. ...
+ 2. ...
+
+ **Expected behavior:**
+
+ **Actual behavior:**
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: Code example
+ description: Provide an example code snippet that has the problem (Make sure to **NOT** upload or expose your API key).
+ placeholder: |
+ ```python
+ from elevenlabs import generate, play
+
+ # Your code here
+ ```
+ - type: textarea
+ attributes:
+ label: Additional context
+ description: |
+ Add any other context or screenshots about the problem here.
+ placeholder: |
+ - Related issues:
+ - Possible workaround:
+ - Logs
+ - ...
diff --git a/.github/ISSUE_TEMPLATE/2-feature_request.yml b/.github/ISSUE_TEMPLATE/2-feature_request.yml
new file mode 100644
index 00000000..1f6384f4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/2-feature_request.yml
@@ -0,0 +1,52 @@
+name: Feature Request
+description: Propose a new feature for the ElevenLabs Python SDK
+labels: [enhancement]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thank you for taking the time to propose a new feature for the **ElevenLabs Python SDK**. Please provide detailed information below to help us understand your proposal.
+
+ **Note:** The ElevenLabs Python SDK is **auto-generated from [Fern](https://fern.dev)** and is a wrapper around our OpenAPI specification. Additions made directly to the SDK code may be overwritten in future releases. We recommend opening an issue to discuss your feature request so we can explore how it fits into our API.
+
+ - type: textarea
+ attributes:
+ label: Feature Description
+ description: A detailed description of the feature you are proposing for the SDK. Include information such as the problem it solves, how it would work, and any relevant APIs or modules it would involve.
+ placeholder: |
+ **Feature description:**
+
+ - What is the feature?
+ - What problem does it solve?
+ - How do you envision it working?
+ validations:
+ required: false
+ - type: textarea
+ attributes:
+ label: Use Case
+ description: Provide one or more use cases where this feature would be beneficial.
+ placeholder: |
+ **Use case:**
+
+ - Describe a scenario where this feature would be useful.
+ - type: textarea
+ attributes:
+ label: Alternatives Considered
+ description: Describe any alternative solutions or features you've considered.
+ placeholder: |
+ **Alternatives considered:**
+
+ - Other ways to solve the problem?
+ - Why do these alternatives fall short?
+ validations:
+ required: false
+ - type: textarea
+ attributes:
+ label: Additional Context
+ description: Any extra information, references, or screenshots that might help us understand your feature request.
+ placeholder: |
+ - Related issues or discussions:
+ - Relevant links:
+ - Screenshots or mockups:
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000..86bca335
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: true
+contact_links:
+ - name: Ask a question (Discord)
+ url: https://discord.com/invite/elevenlabs
+ about: Please ask questions in our discussions forum.
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
new file mode 100644
index 00000000..129451f6
--- /dev/null
+++ b/.github/SECURITY.md
@@ -0,0 +1,9 @@
+# Reporting Security Issues
+
+If you believe you have found a security vulnerability, we encourage you to let us know right away.
+
+We will investigate all legitimate reports and do our best to quickly fix the problem.
+
+## Bug Bounty Program
+
+Please note that ElevenLabs does not offer cash-based rewards for vulnerability reports at this time.
diff --git a/.gitignore b/.gitignore
index 0da665fe..83bacf16 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@ dist/
__pycache__/
poetry.toml
.ruff_cache/
+.DS_Store
diff --git a/README.md b/README.md
index 884c8df7..32d915ad 100644
--- a/README.md
+++ b/README.md
@@ -20,104 +20,37 @@ Check out the [HTTP API documentation](https://elevenlabs.io/docs/api-reference)
pip install elevenlabs
```
-## v0.x to v1.x Migration Guide
-> The SDK was rewritten in v1 and is now programmatically generated from our OpenAPI spec. As part of this release
-> there are some breaking changes.
-
-
-### Client Instantiation
-The SDK now exports a client class that you must instantiate to call various
-endpoints in our API.
-
-```python
-from elevenlabs.client import ElevenLabs
-
-client = ElevenLabs(
- api_key="..." # Defaults to ELEVEN_API_KEY
-)
-```
-As part of this change, there is no longer a `set_api_key` and `get_api_key` method exported.
-
-### HTTPX
-The SDK now uses httpx under the hood. This allows us to export an async client in addition to
-a synchronous client. Note that you can pass in your own httpx client as well.
-
-```python
-from elevenlabs.client import AsyncElevenLabs
-
-client = AsyncElevenLabs(
- api_key="...", # Defaults to ELEVEN_API_KEY
- httpx_client=httpx.AsyncClient(...)
-)
-```
-
-### Removing Static Methods
-There are no longer static methods exposed directly on objects. For example,
-instead of `Models.from_api()` you can now do `client.models.get_all()`.
-
-The renames are specified below:
-
- `User.from_api()` -> `client.users.get()`
-
- `Models.from_api()` -> `client.models.get_all()`
-
- `Voices.from_api()` -> `client.voices.get_all()`
-
- `History.from_api()` -> `client.history.get_all()`
-
-
-### Exported functions
-The SDK no longer exports top level functions `generate`, `clone`, and `voices`. Instead,
-everything is now directly attached to the client instance.
-
-#### `generate` -> `client.generate`
-
-The generate method is a helper function that makes it easier to consume the
-text-to-speech APIs. If you'd rather access the raw APIs, simply use `client.text_to_speech`.
-
-#### `clone` -> `client.clone`
-
-The clone method is a helper function that wraps the voices add and
-get APIs. If you'd rather access the raw APIs, simply use `client.voices.add()`.
+## 🗣️ Usage
-#### `voice` -> `client.voices.get_all()`
+[](https://huggingface.co/spaces/elevenlabs/tts)
+[](https://colab.research.google.com/gist/flavioschneider/49468d728a816c6538fd2f56b3b50b96/elevenlabs-python.ipynb)
-To get all your voices, use `client.voices.get_all()`.
+### Main Models
-#### `play`, `stream` and `save`
+1. **Eleven Multilingual v2** (`eleven_multilingual_v2`)
-The SDK continues to export the `play`, `stream` and `save` methods. Under the hood, these methods
-use ffmpeg and mpv to play audio streams.
+ - Excels in stability, language diversity, and accent accuracy
+ - Supports 29 languages
+ - Recommended for most use cases
-```python
-from elevenlabs import play, stream, save
+2. **Eleven Turbo v2.5** (`eleven_turbo_v2_5`)
+ - High quality, lowest latency
+ - Ideal for developer use cases where speed is crucial
+ - Supports 32 languages
-# plays audio using ffmpeg
-play(audio)
-# streams audio using mpv
-stream(audio)
-# saves audio to file
-save(audio, "my-file.mp3")
-```
-
-
-## 🗣️ Usage
-[](https://huggingface.co/spaces/elevenlabs/tts)
-[](https://colab.research.google.com/gist/flavioschneider/49468d728a816c6538fd2f56b3b50b96/elevenlabs-python.ipynb)
-
-We support two main models: the newest `eleven_multilingual_v2`, a single foundational model supporting 29 languages including English, Chinese, Spanish, Hindi, Portuguese, French, German, Japanese, Arabic, Korean, Indonesian, Italian, Dutch, Turkish, Polish, Swedish, Filipino, Malay, Russian, Romanian, Ukrainian, Greek, Czech, Danish, Finnish, Bulgarian, Croatian, Slovak, and Tamil; and `eleven_monolingual_v1`, a low-latency model specifically trained for English speech.
+For more detailed information about these models and others, visit the [ElevenLabs Models documentation](https://elevenlabs.io/docs/speech-synthesis/models).
```py
from elevenlabs import play
from elevenlabs.client import ElevenLabs
client = ElevenLabs(
- api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY
+ api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY
)
audio = client.generate(
text="Hello! 你好! Hola! नमस्ते! Bonjour! こんにちは! مرحبا! 안녕하세요! Ciao! Cześć! Привіт! வணக்கம்!",
- voice="Rachel",
+ voice="Brian",
model="eleven_multilingual_v2"
)
play(audio)
@@ -125,20 +58,19 @@ play(audio)
Play
- Don't forget to unmute the player!
-
-[audio (3).webm](https://github.com/elevenlabs/elevenlabs-python/assets/12028621/778fd3ed-0a3a-4d66-8f73-faee099dfdd6)
+🎧 **Try it out!** Want to hear our voices in action? Visit the [ElevenLabs Voice Lab](https://elevenlabs.io/voice-lab) to experiment with different voices, languages, and settings.
## 🗣️ Voices
List all your available voices with `voices()`.
+
```py
from elevenlabs.client import ElevenLabs
client = ElevenLabs(
- api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY
+ api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY
)
response = client.voices.get_all()
@@ -146,28 +78,9 @@ audio = client.generate(text="Hello there!", voice=response.voices[0])
print(response.voices)
```
- Show output
-
-```py
-[
- Voice(
- voice_id='21m00Tcm4TlvDq8ikWAM',
- name='Rachel',
- category='premade',
- settings=None,
- ),
- Voice(
- voice_id='AZnzlk1XvdvUeBnXmlld',
- name='Domi',
- category='premade',
- settings=None,
- ),
-]
-```
-
-
+For information about the structure of the voices output, please refer to the [official ElevenLabs API documentation for Get Voices](https://elevenlabs.io/docs/api-reference/get-voices).
-Build a voice object with custom settings to personalize the voice style, or call
+Build a voice object with custom settings to personalize the voice style, or call
`client.voices.get_settings("your-voice-id")` to get the default settings for the voice.
```py
@@ -175,13 +88,13 @@ from elevenlabs import Voice, VoiceSettings, play
from elevenlabs.client import ElevenLabs
client = ElevenLabs(
- api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY
+ api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY
)
audio = client.generate(
- text="Hello! My name is Bella.",
+ text="Hello! My name is Brian.",
voice=Voice(
- voice_id='EXAVITQu4vr4xnSDxMaL',
+ voice_id='nPczCjzI2devNBz1zQrb',
settings=VoiceSettings(stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True)
)
)
@@ -200,7 +113,7 @@ from elevenlabs.client import ElevenLabs
from elevenlabs import play
client = ElevenLabs(
- api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY
+ api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY
)
voice = client.clone(
@@ -223,7 +136,7 @@ from elevenlabs.client import ElevenLabs
from elevenlabs import stream
client = ElevenLabs(
- api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY
+ api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY
)
audio_stream = client.generate(
@@ -235,16 +148,18 @@ stream(audio_stream)
```
Note that `generate` is a helper function. If you'd like to access
-the raw method, simply use `client.text_to_speech.convert_as_stream`.
+the raw method, simply use `client.text_to_speech.convert_as_stream`.
### Input streaming
+
Stream text chunks into audio as it's being generated, with <1s latency. Note: if chunks don't end with space or punctuation (" ", ".", "?", "!"), the stream will wait for more text.
+
```py
from elevenlabs.client import ElevenLabs
from elevenlabs import stream
client = ElevenLabs(
- api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY
+ api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY
)
def text_stream():
@@ -253,8 +168,8 @@ def text_stream():
audio_stream = client.generate(
text=text_stream(),
- voice="Nicole",
- model="eleven_monolingual_v1",
+ voice="Brian",
+ model="eleven_multilingual_v2",
stream=True
)
@@ -262,11 +177,11 @@ stream(audio_stream)
```
Note that `generate` is a helper function. If you'd like to access
-the raw method, simply use `client.text_to_speech.convert_realtime`.
+the raw method, simply use `client.text_to_speech.convert_realtime`.
+## Async Client
-## Async Client
-Use `AsyncElevenLabs` if you want to make API calls asynchronously.
+Use `AsyncElevenLabs` if you want to make API calls asynchronously.
```python
import asyncio
@@ -274,7 +189,7 @@ import asyncio
from elevenlabs.client import AsyncElevenLabs
eleven = AsyncElevenLabs(
- api_key="MY_API_KEY" # Defaults to ELEVEN_API_KEY
+ api_key="MY_API_KEY" # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY
)
async def print_models() -> None:
@@ -284,19 +199,14 @@ async def print_models() -> None:
asyncio.run(print_models())
```
-## Elevenlabs module
-All of the ElevenLabs models are nested within the elevenlabs module.
-
-
-
## Languages Supported
-We support 29 languages and 100+ accents. Explore [all languages](https://elevenlabs.io/languages).
+We support 32 languages and 100+ accents. Explore [all languages](https://elevenlabs.io/languages).
## Contributing
-While we value open-source contributions to this SDK, this library is generated programmatically. Additions made directly to this library would have to be moved over to our generation code, otherwise they would be overwritten upon the next generated release. Feel free to open a PR as a proof of concept, but know that we will not be able to merge it as-is. We suggest opening an issue first to discuss with us!
+While we value open-source contributions to this SDK, this library is generated programmatically. Additions made directly to this library would have to be moved over to our generation code, otherwise they would be overwritten upon the next generated release. Feel free to open a PR as a proof of concept, but know that we will not be able to merge it as-is. We suggest opening an issue first to discuss with us!
On the other hand, contributions to the README are always very welcome!
diff --git a/assets/module.png b/assets/module.png
deleted file mode 100644
index f5f151d9..00000000
Binary files a/assets/module.png and /dev/null differ
diff --git a/poetry.lock b/poetry.lock
index f51b9619..5dc74a47 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -16,13 +16,13 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
[[package]]
name = "anyio"
-version = "4.4.0"
+version = "4.5.2"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
- {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
- {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
+ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
+ {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
]
[package.dependencies]
@@ -32,118 +32,133 @@ sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
-doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
-test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
-trio = ["trio (>=0.23)"]
+doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
+trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
-version = "2024.8.30"
+version = "2024.12.14"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"},
- {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
+ {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
+ {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
]
[[package]]
name = "charset-normalizer"
-version = "3.3.2"
+version = "3.4.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
- {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
- {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
- {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
- {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
- {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
- {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
- {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"},
+ {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"},
+ {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
]
[[package]]
@@ -184,13 +199,13 @@ files = [
[[package]]
name = "httpcore"
-version = "1.0.5"
+version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
- {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
+ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
+ {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
]
[package.dependencies]
@@ -201,17 +216,17 @@ h11 = ">=0.13,<0.15"
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
-trio = ["trio (>=0.22.0,<0.26.0)"]
+trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
-version = "0.27.2"
+version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
- {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
+ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
+ {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
]
[package.dependencies]
@@ -219,7 +234,6 @@ anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
-sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
@@ -312,13 +326,13 @@ files = [
[[package]]
name = "packaging"
-version = "24.1"
+version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
- {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
+ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
+ {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
[[package]]
@@ -336,24 +350,46 @@ files = [
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
+[[package]]
+name = "pyaudio"
+version = "0.2.14"
+description = "Cross-platform audio I/O with PortAudio"
+optional = true
+python-versions = "*"
+files = [
+ {file = "PyAudio-0.2.14-cp310-cp310-win32.whl", hash = "sha256:126065b5e82a1c03ba16e7c0404d8f54e17368836e7d2d92427358ad44fefe61"},
+ {file = "PyAudio-0.2.14-cp310-cp310-win_amd64.whl", hash = "sha256:2a166fc88d435a2779810dd2678354adc33499e9d4d7f937f28b20cc55893e83"},
+ {file = "PyAudio-0.2.14-cp311-cp311-win32.whl", hash = "sha256:506b32a595f8693811682ab4b127602d404df7dfc453b499c91a80d0f7bad289"},
+ {file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"},
+ {file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"},
+ {file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"},
+ {file = "PyAudio-0.2.14-cp313-cp313-win32.whl", hash = "sha256:95328285b4dab57ea8c52a4a996cb52be6d629353315be5bfda403d15932a497"},
+ {file = "PyAudio-0.2.14-cp313-cp313-win_amd64.whl", hash = "sha256:692d8c1446f52ed2662120bcd9ddcb5aa2b71f38bda31e58b19fb4672fffba69"},
+ {file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"},
+ {file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"},
+ {file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"},
+ {file = "PyAudio-0.2.14-cp39-cp39-win_amd64.whl", hash = "sha256:009f357ee5aa6bc8eb19d69921cd30e98c42cddd34210615d592a71d09c4bd57"},
+ {file = "PyAudio-0.2.14.tar.gz", hash = "sha256:78dfff3879b4994d1f4fc6485646a57755c6ee3c19647a491f790a0895bd2f87"},
+]
+
+[package.extras]
+test = ["numpy"]
+
[[package]]
name = "pydantic"
-version = "2.9.2"
+version = "2.10.3"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
- {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
+ {file = "pydantic-2.10.3-py3-none-any.whl", hash = "sha256:be04d85bbc7b65651c5f8e6b9976ed9c6f41782a55524cef079a34a0bb82144d"},
+ {file = "pydantic-2.10.3.tar.gz", hash = "sha256:cb5ac360ce894ceacd69c403187900a02c4b20b693a9dd1d643e1effab9eadf9"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.23.4"
-typing-extensions = [
- {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
- {version = ">=4.6.1", markers = "python_version < \"3.13\""},
-]
+pydantic-core = "2.27.1"
+typing-extensions = ">=4.12.2"
[package.extras]
email = ["email-validator (>=2.0.0)"]
@@ -361,100 +397,111 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.23.4"
+version = "2.27.1"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
- {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
- {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
- {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
- {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
- {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
- {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
- {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
- {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
- {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
- {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
- {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
- {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
- {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
- {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
- {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
- {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
- {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
- {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
- {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
- {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
- {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
- {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
- {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
- {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
- {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
- {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
- {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
- {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
- {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
- {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
- {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
- {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
- {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
- {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
- {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
- {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"},
+ {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"},
+ {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"},
+ {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"},
+ {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"},
+ {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"},
+ {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"},
+ {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"},
+ {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"},
+ {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"},
+ {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"},
+ {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"},
+ {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"},
+ {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"},
+ {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"},
+ {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"},
+ {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"},
]
[package.dependencies]
@@ -564,13 +611,13 @@ files = [
[[package]]
name = "six"
-version = "1.16.0"
+version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
- {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
- {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
+ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
]
[[package]]
@@ -586,24 +633,65 @@ files = [
[[package]]
name = "tomli"
-version = "2.0.1"
+version = "2.2.1"
description = "A lil' TOML parser"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
+files = [
+ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
+ {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
+ {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
+ {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
+ {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
+ {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
+ {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
+ {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
+ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
+ {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
+]
+
+[[package]]
+name = "types-pyaudio"
+version = "0.2.16.20240516"
+description = "Typing stubs for pyaudio"
+optional = false
+python-versions = ">=3.8"
files = [
- {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
- {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+ {file = "types-pyaudio-0.2.16.20240516.tar.gz", hash = "sha256:f1c419ccc78b00d26c6c1ae4fcb17f7e4f08af2c2b9b73b12fcbc4a4ffa3a2c7"},
+ {file = "types_pyaudio-0.2.16.20240516-py3-none-any.whl", hash = "sha256:40063f13ae15a422cbd4a2a783653eb3e1091bdd23fc7ab8ca3abc21ad0d13f8"},
]
[[package]]
name = "types-python-dateutil"
-version = "2.9.0.20240906"
+version = "2.9.0.20241206"
description = "Typing stubs for python-dateutil"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"},
- {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"},
+ {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
+ {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
]
[[package]]
@@ -636,100 +724,103 @@ zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "websockets"
-version = "13.0.1"
+version = "13.1"
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1841c9082a3ba4a05ea824cf6d99570a6a2d8849ef0db16e9c826acb28089e8f"},
- {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c5870b4a11b77e4caa3937142b650fbbc0914a3e07a0cf3131f35c0587489c1c"},
- {file = "websockets-13.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f1d3d1f2eb79fe7b0fb02e599b2bf76a7619c79300fc55f0b5e2d382881d4f7f"},
- {file = "websockets-13.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c7d62ee071fa94a2fc52c2b472fed4af258d43f9030479d9c4a2de885fd543"},
- {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6724b554b70d6195ba19650fef5759ef11346f946c07dbbe390e039bcaa7cc3d"},
- {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a952fa2ae57a42ba7951e6b2605e08a24801a4931b5644dfc68939e041bc7f"},
- {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17118647c0ea14796364299e942c330d72acc4b248e07e639d34b75067b3cdd8"},
- {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64a11aae1de4c178fa653b07d90f2fb1a2ed31919a5ea2361a38760192e1858b"},
- {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0617fd0b1d14309c7eab6ba5deae8a7179959861846cbc5cb528a7531c249448"},
- {file = "websockets-13.0.1-cp310-cp310-win32.whl", hash = "sha256:11f9976ecbc530248cf162e359a92f37b7b282de88d1d194f2167b5e7ad80ce3"},
- {file = "websockets-13.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c3c493d0e5141ec055a7d6809a28ac2b88d5b878bb22df8c621ebe79a61123d0"},
- {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:699ba9dd6a926f82a277063603fc8d586b89f4cb128efc353b749b641fcddda7"},
- {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cf2fae6d85e5dc384bf846f8243ddaa9197f3a1a70044f59399af001fd1f51d4"},
- {file = "websockets-13.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52aed6ef21a0f1a2a5e310fb5c42d7555e9c5855476bbd7173c3aa3d8a0302f2"},
- {file = "websockets-13.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb2b9a318542153674c6e377eb8cb9ca0fc011c04475110d3477862f15d29f0"},
- {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5df891c86fe68b2c38da55b7aea7095beca105933c697d719f3f45f4220a5e0e"},
- {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2d146ff30d9dd2fcf917e5d147db037a5c573f0446c564f16f1f94cf87462"},
- {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b8ac5b46fd798bbbf2ac6620e0437c36a202b08e1f827832c4bf050da081b501"},
- {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:46af561eba6f9b0848b2c9d2427086cabadf14e0abdd9fde9d72d447df268418"},
- {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b5a06d7f60bc2fc378a333978470dfc4e1415ee52f5f0fce4f7853eb10c1e9df"},
- {file = "websockets-13.0.1-cp311-cp311-win32.whl", hash = "sha256:556e70e4f69be1082e6ef26dcb70efcd08d1850f5d6c5f4f2bcb4e397e68f01f"},
- {file = "websockets-13.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:67494e95d6565bf395476e9d040037ff69c8b3fa356a886b21d8422ad86ae075"},
- {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f9c9e258e3d5efe199ec23903f5da0eeaad58cf6fccb3547b74fd4750e5ac47a"},
- {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6b41a1b3b561f1cba8321fb32987552a024a8f67f0d05f06fcf29f0090a1b956"},
- {file = "websockets-13.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f73e676a46b0fe9426612ce8caeca54c9073191a77c3e9d5c94697aef99296af"},
- {file = "websockets-13.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f613289f4a94142f914aafad6c6c87903de78eae1e140fa769a7385fb232fdf"},
- {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f52504023b1480d458adf496dc1c9e9811df4ba4752f0bc1f89ae92f4f07d0c"},
- {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:139add0f98206cb74109faf3611b7783ceafc928529c62b389917a037d4cfdf4"},
- {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47236c13be337ef36546004ce8c5580f4b1150d9538b27bf8a5ad8edf23ccfab"},
- {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c44ca9ade59b2e376612df34e837013e2b273e6c92d7ed6636d0556b6f4db93d"},
- {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9bbc525f4be3e51b89b2a700f5746c2a6907d2e2ef4513a8daafc98198b92237"},
- {file = "websockets-13.0.1-cp312-cp312-win32.whl", hash = "sha256:3624fd8664f2577cf8de996db3250662e259bfbc870dd8ebdcf5d7c6ac0b5185"},
- {file = "websockets-13.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0513c727fb8adffa6d9bf4a4463b2bade0186cbd8c3604ae5540fae18a90cb99"},
- {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1ee4cc030a4bdab482a37462dbf3ffb7e09334d01dd37d1063be1136a0d825fa"},
- {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dbb0b697cc0655719522406c059eae233abaa3243821cfdfab1215d02ac10231"},
- {file = "websockets-13.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:acbebec8cb3d4df6e2488fbf34702cbc37fc39ac7abf9449392cefb3305562e9"},
- {file = "websockets-13.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63848cdb6fcc0bf09d4a155464c46c64ffdb5807ede4fb251da2c2692559ce75"},
- {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:872afa52a9f4c414d6955c365b6588bc4401272c629ff8321a55f44e3f62b553"},
- {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e70fec7c54aad4d71eae8e8cab50525e899791fc389ec6f77b95312e4e9920"},
- {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e82db3756ccb66266504f5a3de05ac6b32f287faacff72462612120074103329"},
- {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4e85f46ce287f5c52438bb3703d86162263afccf034a5ef13dbe4318e98d86e7"},
- {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f3fea72e4e6edb983908f0db373ae0732b275628901d909c382aae3b592589f2"},
- {file = "websockets-13.0.1-cp313-cp313-win32.whl", hash = "sha256:254ecf35572fca01a9f789a1d0f543898e222f7b69ecd7d5381d8d8047627bdb"},
- {file = "websockets-13.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:ca48914cdd9f2ccd94deab5bcb5ac98025a5ddce98881e5cce762854a5de330b"},
- {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b74593e9acf18ea5469c3edaa6b27fa7ecf97b30e9dabd5a94c4c940637ab96e"},
- {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:132511bfd42e77d152c919147078460c88a795af16b50e42a0bd14f0ad71ddd2"},
- {file = "websockets-13.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:165bedf13556f985a2aa064309baa01462aa79bf6112fbd068ae38993a0e1f1b"},
- {file = "websockets-13.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e801ca2f448850685417d723ec70298feff3ce4ff687c6f20922c7474b4746ae"},
- {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30d3a1f041360f029765d8704eae606781e673e8918e6b2c792e0775de51352f"},
- {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67648f5e50231b5a7f6d83b32f9c525e319f0ddc841be0de64f24928cd75a603"},
- {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4f0426d51c8f0926a4879390f53c7f5a855e42d68df95fff6032c82c888b5f36"},
- {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ef48e4137e8799998a343706531e656fdec6797b80efd029117edacb74b0a10a"},
- {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:249aab278810bee585cd0d4de2f08cfd67eed4fc75bde623be163798ed4db2eb"},
- {file = "websockets-13.0.1-cp38-cp38-win32.whl", hash = "sha256:06c0a667e466fcb56a0886d924b5f29a7f0886199102f0a0e1c60a02a3751cb4"},
- {file = "websockets-13.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1f3cf6d6ec1142412d4535adabc6bd72a63f5f148c43fe559f06298bc21953c9"},
- {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1fa082ea38d5de51dd409434edc27c0dcbd5fed2b09b9be982deb6f0508d25bc"},
- {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a365bcb7be554e6e1f9f3ed64016e67e2fa03d7b027a33e436aecf194febb63"},
- {file = "websockets-13.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:10a0dc7242215d794fb1918f69c6bb235f1f627aaf19e77f05336d147fce7c37"},
- {file = "websockets-13.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59197afd478545b1f73367620407b0083303569c5f2d043afe5363676f2697c9"},
- {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d20516990d8ad557b5abeb48127b8b779b0b7e6771a265fa3e91767596d7d97"},
- {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1a2e272d067030048e1fe41aa1ec8cfbbaabce733b3d634304fa2b19e5c897f"},
- {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ad327ac80ba7ee61da85383ca8822ff808ab5ada0e4a030d66703cc025b021c4"},
- {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:518f90e6dd089d34eaade01101fd8a990921c3ba18ebbe9b0165b46ebff947f0"},
- {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68264802399aed6fe9652e89761031acc734fc4c653137a5911c2bfa995d6d6d"},
- {file = "websockets-13.0.1-cp39-cp39-win32.whl", hash = "sha256:a5dc0c42ded1557cc7c3f0240b24129aefbad88af4f09346164349391dea8e58"},
- {file = "websockets-13.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b448a0690ef43db5ef31b3a0d9aea79043882b4632cfc3eaab20105edecf6097"},
- {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:faef9ec6354fe4f9a2c0bbb52fb1ff852effc897e2a4501e25eb3a47cb0a4f89"},
- {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:03d3f9ba172e0a53e37fa4e636b86cc60c3ab2cfee4935e66ed1d7acaa4625ad"},
- {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d450f5a7a35662a9b91a64aefa852f0c0308ee256122f5218a42f1d13577d71e"},
- {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f55b36d17ac50aa8a171b771e15fbe1561217510c8768af3d546f56c7576cdc"},
- {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14b9c006cac63772b31abbcd3e3abb6228233eec966bf062e89e7fa7ae0b7333"},
- {file = "websockets-13.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b79915a1179a91f6c5f04ece1e592e2e8a6bd245a0e45d12fd56b2b59e559a32"},
- {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f40de079779acbcdbb6ed4c65af9f018f8b77c5ec4e17a4b737c05c2db554491"},
- {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80e4ba642fc87fa532bac07e5ed7e19d56940b6af6a8c61d4429be48718a380f"},
- {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a02b0161c43cc9e0232711eff846569fad6ec836a7acab16b3cf97b2344c060"},
- {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6aa74a45d4cdc028561a7d6ab3272c8b3018e23723100b12e58be9dfa5a24491"},
- {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00fd961943b6c10ee6f0b1130753e50ac5dcd906130dcd77b0003c3ab797d026"},
- {file = "websockets-13.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d93572720d781331fb10d3da9ca1067817d84ad1e7c31466e9f5e59965618096"},
- {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:71e6e5a3a3728886caee9ab8752e8113670936a193284be9d6ad2176a137f376"},
- {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c4a6343e3b0714e80da0b0893543bf9a5b5fa71b846ae640e56e9abc6fbc4c83"},
- {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a678532018e435396e37422a95e3ab87f75028ac79570ad11f5bf23cd2a7d8c"},
- {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6716c087e4aa0b9260c4e579bb82e068f84faddb9bfba9906cb87726fa2e870"},
- {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e33505534f3f673270dd67f81e73550b11de5b538c56fe04435d63c02c3f26b5"},
- {file = "websockets-13.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acab3539a027a85d568c2573291e864333ec9d912675107d6efceb7e2be5d980"},
- {file = "websockets-13.0.1-py3-none-any.whl", hash = "sha256:b80f0c51681c517604152eb6a572f5a9378f877763231fddb883ba2f968e8817"},
- {file = "websockets-13.0.1.tar.gz", hash = "sha256:4d6ece65099411cfd9a48d13701d7438d9c34f479046b34c50ff60bb8834e43e"},
+ {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"},
+ {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"},
+ {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"},
+ {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"},
+ {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"},
+ {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"},
+ {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"},
+ {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"},
+ {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"},
+ {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"},
+ {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"},
+ {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"},
+ {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"},
+ {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"},
+ {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"},
+ {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"},
+ {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"},
+ {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"},
+ {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"},
+ {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"},
+ {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"},
+ {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"},
+ {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"},
+ {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"},
+ {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"},
+ {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"},
+ {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"},
+ {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"},
+ {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"},
+ {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"},
+ {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"},
+ {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"},
+ {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"},
+ {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"},
+ {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"},
+ {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"},
+ {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"},
+ {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"},
+ {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"},
+ {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"},
+ {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"},
+ {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"},
+ {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"},
+ {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"},
+ {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"},
+ {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"},
+ {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"},
+ {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"},
+ {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"},
+ {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"},
+ {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"},
+ {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"},
+ {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"},
+ {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"},
+ {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"},
+ {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"},
+ {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"},
+ {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"},
+ {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"},
+ {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"},
+ {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"},
+ {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"},
+ {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"},
+ {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"},
+ {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"},
+ {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"},
+ {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"},
+ {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"},
]
+[extras]
+pyaudio = ["pyaudio"]
+
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
-content-hash = "a53420244251981fe047bbb97d6005fffb6b63447718cc640562750fffcc8c75"
+content-hash = "af57dd0aacaa752d61d29db9f958f2d8d0950d51ab868c925a2a973689de5ff7"
diff --git a/pyproject.toml b/pyproject.toml
index c9d0bb26..a4d95ad1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,9 @@
+[project]
+name = "elevenlabs"
+
[tool.poetry]
name = "elevenlabs"
-version = "1.9.0"
+version = "1.50.3"
description = ""
readme = "README.md"
authors = []
@@ -34,6 +37,7 @@ Repository = 'https://github.com/elevenlabs/elevenlabs-python'
[tool.poetry.dependencies]
python = "^3.8"
httpx = ">=0.21.2"
+pyaudio = { version = ">=0.2.14", optional = true}
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
requests = ">=2.20"
@@ -47,6 +51,7 @@ pytest-asyncio = "^0.23.5"
python-dateutil = "^2.9.0"
types-python-dateutil = "^2.9.0.20240316"
ruff = "^0.5.6"
+types-pyaudio = "^0.2.16.20240516"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
@@ -62,3 +67,6 @@ line-length = 120
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
+
+[tool.poetry.extras]
+pyaudio=["pyaudio"]
diff --git a/reference.md b/reference.md
index d5a0de04..3b867cd7 100644
--- a/reference.md
+++ b/reference.md
@@ -32,10 +32,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.history.get_all(
- page_size=1,
- voice_id="pMsXgVXv3BLzUgSXRplE",
-)
+client.history.get_all()
```
@@ -75,6 +72,22 @@ client.history.get_all(
-
+**search:** `typing.Optional[str]` — search term used for filtering
+
+
+
+
+
+-
+
+**source:** `typing.Optional[HistoryGetAllRequestSource]` — Source of the generated history item
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -120,7 +133,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.history.get(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
```
@@ -190,7 +203,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.history.delete(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
```
@@ -260,7 +273,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.history.get_audio(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
```
@@ -285,7 +298,7 @@ client.history.get_audio(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -330,7 +343,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.history.download(
- history_item_ids=["ja9xsmfGhxYcymxGcOGB"],
+ history_item_ids=["HISTORY_ITEM_ID"],
)
```
@@ -409,9 +422,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_sound_effects.convert(
- text="string",
- duration_seconds=1.1,
- prompt_influence=1.1,
+ text="Spacious braam suitable for high-impact movie trailer moments",
)
```
@@ -452,7 +463,7 @@ client.text_to_sound_effects.convert(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -465,147 +476,7 @@ client.text_to_sound_effects.convert(
## AudioIsolation
-client.audio_isolation.audio_isolation(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Removes background noise from audio
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.audio_isolation.audio_isolation()
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**audio:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.audio_isolation.audio_isolation_stream(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Removes background noise from audio and streams the result
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.audio_isolation.audio_isolation_stream()
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**audio:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-## Samples
+## samples
client.samples.delete(...)
-
@@ -639,8 +510,8 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.samples.delete(
- voice_id="ja9xsmfGhxYcymxGcOGB",
- sample_id="pMsXgVXv3BLzUgSXRplE",
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
)
```
@@ -718,8 +589,8 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.samples.get_audio(
- voice_id="ja9xsmfGhxYcymxGcOGB",
- sample_id="pMsXgVXv3BLzUgSXRplE",
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
)
```
@@ -752,7 +623,7 @@ client.samples.get_audio(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -792,21 +663,16 @@ Converts text into speech using a voice of your choice and returns audio.
-
```python
-from elevenlabs import ElevenLabs, VoiceSettings
+from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert(
- voice_id="pMsXgVXv3BLzUgSXRplE",
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
- voice_settings=VoiceSettings(
- stability=0.1,
- similarity_boost=0.3,
- style=0.2,
- ),
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
```
@@ -847,7 +713,16 @@ client.text_to_speech.convert(
-
-**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+**optimize_streaming_latency:** `typing.Optional[int]`
+
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+0 - default mode (no latency optimizations)
+1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+3 - max latency optimizations
+4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+Defaults to None.
@@ -895,7 +770,7 @@ client.text_to_speech.convert(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -935,7 +810,23 @@ client.text_to_speech.convert(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+
+
+
+
+-
+
+**apply_text_normalization:** `typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -980,8 +871,10 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert_with_timestamps(
- voice_id="21m00Tcm4TlvDq8ikWAM",
- text="text",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
```
@@ -1022,7 +915,16 @@ client.text_to_speech.convert_with_timestamps(
-
-**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+**optimize_streaming_latency:** `typing.Optional[int]`
+
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+0 - default mode (no latency optimizations)
+1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+3 - max latency optimizations
+4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+Defaults to None.
@@ -1070,7 +972,7 @@ client.text_to_speech.convert_with_timestamps(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1110,6 +1012,24 @@ client.text_to_speech.convert_with_timestamps(
-
+**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+
+
+
+
+-
+
+**apply_text_normalization:** `typing.Optional[
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
+]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -1149,21 +1069,16 @@ Converts text into speech using a voice of your choice and returns audio as an a
-
```python
-from elevenlabs import ElevenLabs, VoiceSettings
+from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert_as_stream(
- voice_id="pMsXgVXv3BLzUgSXRplE",
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
- voice_settings=VoiceSettings(
- stability=0.1,
- similarity_boost=0.3,
- style=0.2,
- ),
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
```
@@ -1204,7 +1119,16 @@ client.text_to_speech.convert_as_stream(
-
-**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+**optimize_streaming_latency:** `typing.Optional[int]`
+
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+0 - default mode (no latency optimizations)
+1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+3 - max latency optimizations
+4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+Defaults to None.
@@ -1252,7 +1176,7 @@ client.text_to_speech.convert_as_stream(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1292,7 +1216,25 @@ client.text_to_speech.convert_as_stream(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+
+
+
+
+-
+
+**apply_text_normalization:** `typing.Optional[
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization
+]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -1336,10 +1278,14 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.text_to_speech.stream_with_timestamps(
- voice_id="21m00Tcm4TlvDq8ikWAM",
- text="text",
+response = client.text_to_speech.stream_with_timestamps(
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
+for chunk in response:
+ yield chunk
```
@@ -1379,7 +1325,16 @@ client.text_to_speech.stream_with_timestamps(
-
-**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+**optimize_streaming_latency:** `typing.Optional[int]`
+
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+0 - default mode (no latency optimizations)
+1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+3 - max latency optimizations
+4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+Defaults to None.
@@ -1427,7 +1382,7 @@ client.text_to_speech.stream_with_timestamps(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1467,6 +1422,24 @@ client.text_to_speech.stream_with_timestamps(
-
+**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+
+
+
+
+-
+
+**apply_text_normalization:** `typing.Optional[
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
+]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -1513,10 +1486,9 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.speech_to_speech.convert(
- voice_id="string",
- enable_logging=True,
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ model_id="eleven_multilingual_sts_v2",
)
```
@@ -1559,7 +1531,16 @@ core.File` — See core.File for more documentation
-
-**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+**optimize_streaming_latency:** `typing.Optional[int]`
+
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+0 - default mode (no latency optimizations)
+1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+3 - max latency optimizations
+4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+Defaults to None.
@@ -1591,7 +1572,7 @@ core.File` — See core.File for more documentation
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1599,7 +1580,15 @@ core.File` — See core.File for more documentation
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -1644,10 +1633,9 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.speech_to_speech.convert_as_stream(
- voice_id="string",
- enable_logging="0",
- optimize_streaming_latency="mp3_22050_32",
- output_format="string",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ model_id="eleven_multilingual_sts_v2",
)
```
@@ -1682,7 +1670,7 @@ core.File` — See core.File for more documentation
-
-**enable_logging:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
@@ -1690,7 +1678,16 @@ core.File` — See core.File for more documentation
-
-**optimize_streaming_latency:** `typing.Optional[OutputFormat]` — The output format of the generated audio.
+**optimize_streaming_latency:** `typing.Optional[int]`
+
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+0 - default mode (no latency optimizations)
+1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+3 - max latency optimizations
+4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+Defaults to None.
@@ -1698,20 +1695,7 @@ core.File` — See core.File for more documentation
-
-**output_format:** `typing.Optional[str]`
-
-Output format of the generated audio. Must be one of:
-mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
-mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
-mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
-mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
-mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
-mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
-pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
-pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
-pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
-pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
-ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio.
@@ -1735,7 +1719,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1743,7 +1727,15 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -1878,7 +1870,7 @@ client.voice_generation.generate(
-
-**accent:** `str` — Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian.
+**accent:** `str` — Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian.
@@ -1910,7 +1902,7 @@ client.voice_generation.generate(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -1998,6 +1990,219 @@ client.voice_generation.create_a_previously_generated_voice(
-
+**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
+
+
+
+
+
+-
+
+**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## TextToVoice
+client.text_to_voice.create_previews(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_voice.create_previews(
+ voice_description="A sassy little squeaky mouse",
+ text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**voice_description:** `str` — Description to use for the created voice.
+
+
+
+
+
+-
+
+**text:** `str` — Text to generate, text length has to be between 100 and 1000.
+
+
+
+
+
+-
+
+**output_format:** `typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]`
+
+Output format of the generated audio. Must be one of:
+mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
+mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
+mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
+mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
+mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
+mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
+pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
+pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
+pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
+pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
+ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+
+
+
+
+
+-
+
+**auto_generate_text:** `typing.Optional[bool]` — Whether to automatically generate a text suitable for the voice description.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.text_to_voice.create_voice_from_preview(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_voice.create_voice_from_preview(
+ voice_name="Little squeaky mouse",
+ voice_description="A sassy little squeaky mouse",
+ generated_voice_id="37HceQefKmEi3bGovXjL",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**voice_name:** `str` — Name to use for the created voice.
+
+
+
+
+
+-
+
+**voice_description:** `str` — Description to use for the created voice.
+
+
+
+
+
+-
+
+**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
+
+
+
+
+
+-
+
**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
@@ -2006,6 +2211,14 @@ client.voice_generation.create_a_previously_generated_voice(
-
+**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2301,7 +2514,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.voices.get_settings(
- voice_id="2EiwWnXFnvU5JabPnv8n",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
)
```
@@ -2371,7 +2584,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.voices.get(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
)
```
@@ -2449,7 +2662,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.voices.delete(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="VOICE_ID",
)
```
@@ -2519,7 +2732,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.voices.edit_settings(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="VOICE_ID",
request=VoiceSettings(
stability=0.1,
similarity_boost=0.3,
@@ -2637,6 +2850,14 @@ typing.List[core.File]` — See core.File for more documentation
-
+**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
+
+
+
+
+-
+
**description:** `typing.Optional[str]` — How would you describe the voice?
@@ -2698,7 +2919,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.voices.edit(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
+ voice_id="VOICE_ID",
name="George",
)
@@ -2742,6 +2963,14 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
+**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
+
+
+
+
+-
+
**description:** `typing.Optional[str]` — How would you describe the voice?
@@ -2846,14 +3075,6 @@ client.voices.add_sharing_voice(
-
-**xi_app_check_token:** `typing.Optional[str]` — Your app check token.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3198,7 +3419,7 @@ client.voices.get_a_profile_page(
-## Projects
+## projects
client.projects.get_all()
-
@@ -3366,9 +3587,9 @@ typing.Optional[core.File]` — See core.File for more documentation
Output quality of the generated audio. Must be one of:
standard - standard output format, 128kbps with 44.1kHz sample rate.
-high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the character cost by 20%.
-ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the character cost by 50%.
-ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the character cost by 100%.
+high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%.
+ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%.
+ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%.
@@ -3393,7 +3614,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
+**description:** `typing.Optional[str]` — An optional description of the project.
@@ -3401,7 +3622,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**acx_volume_normalization:** `typing.Optional[bool]` — [Deprecated] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+**genres:** `typing.Optional[typing.List[str]]` — An optional list of genres associated with the project.
@@ -3409,7 +3630,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+**target_audience:** `typing.Optional[ProjectsAddRequestTargetAudience]` — An optional target audience of the project.
@@ -3417,7 +3638,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**pronunciation_dictionary_locators:** `typing.Optional[typing.List[str]]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+**language:** `typing.Optional[str]` — An optional language of the project. Two-letter language code (ISO 639-1).
@@ -3425,12 +3646,84 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**content_type:** `typing.Optional[str]` — An optional content type of the project.
-
-
+
+
+-
+
+**original_publication_date:** `typing.Optional[str]` — An optional original publication date of the project, in the format YYYY-MM-DD or YYYY.
+
+
+
+
+
+-
+
+**mature_content:** `typing.Optional[bool]` — An optional mature content of the project.
+
+
+
+
+
+-
+
+**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
+
+
+
+
+
+-
+
+**acx_volume_normalization:** `typing.Optional[bool]` — [Deprecated] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+
+
+
+
+
+-
+
+**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+
+
+
+
+
+-
+
+**pronunciation_dictionary_locators:** `typing.Optional[typing.List[str]]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+
+
+
+
+
+-
+
+**fiction:** `typing.Optional[ProjectsAddRequestFiction]` — An optional fiction of the project.
+
+
+
+
+
+-
+
+**quality_check_on:** `typing.Optional[bool]` — Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
@@ -3624,6 +3917,14 @@ client.projects.edit_basic_project_info(
-
+**quality_check_on:** `typing.Optional[bool]` — Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3846,7 +4147,7 @@ client.projects.get_snapshots(
-client.projects.stream_audio(...)
+client.projects.stream_archive(...)
-
@@ -3858,7 +4159,7 @@ client.projects.get_snapshots(
-
-Stream the audio from a project snapshot.
+Streams archive with project audio.
@@ -3878,10 +4179,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.stream_audio(
- project_id="string",
- project_snapshot_id="string",
- convert_to_mpeg=True,
+client.projects.stream_archive(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ project_snapshot_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -3914,14 +4214,6 @@ client.projects.stream_audio(
-
-**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3934,7 +4226,7 @@ client.projects.stream_audio(
-client.projects.stream_archive(...)
+client.projects.update_pronunciation_dictionaries(...)
-
@@ -3946,7 +4238,7 @@ client.projects.stream_audio(
-
-Streams archive with project audio.
+Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
@@ -3961,14 +4253,19 @@ Streams archive with project audio.
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.stream_archive(
+client.projects.update_pronunciation_dictionaries(
project_id="21m00Tcm4TlvDq8ikWAM",
- project_snapshot_id="21m00Tcm4TlvDq8ikWAM",
+ pronunciation_dictionary_locators=[
+ PronunciationDictionaryVersionLocator(
+ pronunciation_dictionary_id="pronunciation_dictionary_id",
+ version_id="version_id",
+ )
+ ],
)
```
@@ -3993,7 +4290,7 @@ client.projects.stream_archive(
-
-**project_snapshot_id:** `str` — The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project.
+**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
@@ -4013,7 +4310,8 @@ client.projects.stream_archive(
-client.projects.update_pronunciation_dictionaries(...)
+## Chapters
+client.chapters.get_all(...)
-
@@ -4025,7 +4323,7 @@ client.projects.stream_archive(
-
-Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
+Returns a list of your chapters for a project together and its metadata.
@@ -4040,19 +4338,13 @@ Updates the set of pronunciation dictionaries acting on a project. This will aut
-
```python
-from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator
+from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.update_pronunciation_dictionaries(
+client.chapters.get_all(
project_id="21m00Tcm4TlvDq8ikWAM",
- pronunciation_dictionary_locators=[
- PronunciationDictionaryVersionLocator(
- pronunciation_dictionary_id="pronunciation_dictionary_id",
- version_id="version_id",
- )
- ],
)
```
@@ -4077,14 +4369,6 @@ client.projects.update_pronunciation_dictionaries(
-
-**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4097,8 +4381,7 @@ client.projects.update_pronunciation_dictionaries(
-## Chapters
-client.chapters.get_all(...)
+client.chapters.get(...)
-
@@ -4110,7 +4393,7 @@ client.projects.update_pronunciation_dictionaries(
-
-Returns a list of your chapters for a project together and its metadata.
+Returns information about a specific chapter.
@@ -4130,8 +4413,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.get_all(
+client.chapters.get(
project_id="21m00Tcm4TlvDq8ikWAM",
+ chapter_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -4156,6 +4440,14 @@ client.chapters.get_all(
-
+**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4168,7 +4460,7 @@ client.chapters.get_all(
-client.chapters.get(...)
+client.chapters.delete(...)
-
@@ -4180,7 +4472,7 @@ client.chapters.get_all(
-
-Returns information about a specific chapter.
+Delete a chapter by its chapter_id.
@@ -4200,7 +4492,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.get(
+client.chapters.delete(
project_id="21m00Tcm4TlvDq8ikWAM",
chapter_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -4247,7 +4539,7 @@ client.chapters.get(
-client.chapters.delete(...)
+client.chapters.create(...)
-
@@ -4259,7 +4551,7 @@ client.chapters.get(
-
-Delete a chapter by its chapter_id.
+Creates a new chapter either as blank or from a URL.
@@ -4279,9 +4571,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.delete(
+client.chapters.create(
project_id="21m00Tcm4TlvDq8ikWAM",
- chapter_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
)
```
@@ -4306,7 +4598,15 @@ client.chapters.delete(
-
-**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+**name:** `str` — The name of the chapter, used for identification only.
+
+
+
+
+
+-
+
+**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
@@ -4713,69 +5013,15 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues.
-
-
-
-
-
-
-
-
-
-
-client.dubbing.get_dubbing_project_metadata(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Returns metadata about a dubbing project, including whether it's still in progress or not
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.dubbing.get_dubbing_project_metadata(
- dubbing_id="dubbing_id",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
-**dubbing_id:** `str` — ID of the dubbing project.
+**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]'
@@ -4795,7 +5041,7 @@ client.dubbing.get_dubbing_project_metadata(
-client.dubbing.delete_dubbing_project(...)
+client.dubbing.get_dubbing_project_metadata(...)
-
@@ -4807,7 +5053,7 @@ client.dubbing.get_dubbing_project_metadata(
-
-Deletes a dubbing project.
+Returns metadata about a dubbing project, including whether it's still in progress or not
@@ -4827,7 +5073,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.dubbing.delete_dubbing_project(
+client.dubbing.get_dubbing_project_metadata(
dubbing_id="dubbing_id",
)
@@ -4865,7 +5111,7 @@ client.dubbing.delete_dubbing_project(
-client.dubbing.get_dubbed_file(...)
+client.dubbing.delete_dubbing_project(...)
-
@@ -4877,7 +5123,7 @@ client.dubbing.delete_dubbing_project(
-
-Returns dubbed file as a streamed file. Videos will be returned in MP4 format and audio only dubs will be returned in MP3.
+Deletes a dubbing project.
@@ -4897,9 +5143,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.dubbing.get_dubbed_file(
- dubbing_id="string",
- language_code="string",
+client.dubbing.delete_dubbing_project(
+ dubbing_id="dubbing_id",
)
```
@@ -4924,14 +5169,6 @@ client.dubbing.get_dubbed_file(
-
-**language_code:** `str` — ID of the language.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5011,9 +5248,7 @@ client.dubbing.get_transcript_for_dub(
-
-**format_type:** `typing.Optional[
- GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
-]` — Format to use for the subtitle file, either 'srt' or 'webvtt'
+**format_type:** `typing.Optional[DubbingGetTranscriptForDubRequestFormatType]` — Format to use for the subtitle file, either 'srt' or 'webvtt'
@@ -5033,7 +5268,7 @@ client.dubbing.get_transcript_for_dub(
-## Models
+## models
client.models.get_all()
-
@@ -5268,7 +5503,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Returns the characters usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
+Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
@@ -5331,7 +5566,7 @@ client.usage.get_characters_usage_metrics(
-
-**breakdown_type:** `typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False.
+**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False.
@@ -5448,7 +5683,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...)
+client.pronunciation_dictionary.add_rules(...)
-
@@ -5483,7 +5718,7 @@ from elevenlabs.pronunciation_dictionary import (
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
+client.pronunciation_dictionary.add_rules(
pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
rules=[
PronunciationDictionaryRule_Phoneme(
@@ -5540,7 +5775,7 @@ List of pronunciation rules. Rule can be either:
-client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...)
+client.pronunciation_dictionary.remove_rules(...)
-
@@ -5572,7 +5807,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
+client.pronunciation_dictionary.remove_rules(
pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
rule_strings=["rule_strings"],
)
@@ -6073,3 +6308,1731 @@ client.workspace.update_member(
+## ConversationalAi
+client.conversational_ai.get_signed_url(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a signed url to start a conversation with an agent with an agent that requires authorization
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_signed_url(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of the agent you're taking the action on.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.create_agent(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an agent from a config object
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ConversationalConfig, ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.create_agent(
+ conversation_config=ConversationalConfig(),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**conversation_config:** `ConversationalConfig` — Conversation configuration for an agent
+
+
+
+
+
+-
+
+**platform_settings:** `typing.Optional[AgentPlatformSettings]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — A name to make the agent easier to find
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_agent(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve config for an agent
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.delete_agent(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete an agent
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.delete_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.update_agent(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Patches an Agent settings
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.update_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**conversation_config:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Conversation configuration for an agent
+
+
+
+
+
+-
+
+**platform_settings:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+
+
+
+
+-
+
+**secrets:** `typing.Optional[
+ typing.Sequence[
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem
+ ]
+]` — A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — A name to make the agent easier to find
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_agent_widget(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the widget configuration for an agent
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_agent_widget(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**conversation_signature:** `typing.Optional[str]` — An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_agent_link(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get the current link used to share the agent with others
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_agent_link(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.post_agent_avatar(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Sets the avatar for an agent displayed in the widget
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.post_agent_avatar(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**avatar_file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_agent_knowledge_base_document_by_id(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get details about a specific documentation making up the agent's knowledge base
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_agent_knowledge_base_document_by_id(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**documentation_id:** `str` — The id of a document from the agent's knowledge base. This is returned on document addition.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.add_agent_secret(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.add_agent_secret(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ secret_value="secret_value",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**name:** `str` — A name to help identify a particular agent secret
+
+
+
+
+
+-
+
+**secret_value:** `str` — A value to be encrypted and used by the agent
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.add_to_agent_knowledge_base(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.add_to_agent_knowledge_base(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**url:** `typing.Optional[str]` — URL to a page of documentation that the agent will have access to in order to interact with users.
+
+
+
+
+
+-
+
+**file:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_agents(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Returns a page of your agents and their metadata.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_agents()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
+
+
+
+
+
+-
+
+**page_size:** `typing.Optional[int]` — How many Agents to return at maximum. Can not exceed 100, defaults to 30.
+
+
+
+
+
+-
+
+**search:** `typing.Optional[str]` — Search by agents name.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_conversations(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get all conversations of agents that user owns. With option to restrict to a specific agent.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_conversations(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
+
+
+
+
+
+-
+
+**agent_id:** `typing.Optional[str]` — The id of the agent you're taking the action on.
+
+
+
+
+
+-
+
+**call_successful:** `typing.Optional[EvaluationSuccessResult]` — The result of the success evaluation
+
+
+
+
+
+-
+
+**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_conversation(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get the details of a particular conversation
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.delete_conversation(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a particular conversation
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.delete_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_conversation_audio(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get the audio recording of a particular conversation
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_conversation_audio(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.post_conversation_feedback(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Send the feedback for the given conversation
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.post_conversation_feedback(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ feedback="like",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
+
+
+
+
+
+-
+
+**feedback:** `UserFeedbackScore` — Either 'like' or 'dislike' to indicate the feedback for the conversation.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.create_phone_number(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Import Phone Number from Twilio configuration
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.create_phone_number(
+ phone_number="phone_number",
+ label="label",
+ sid="sid",
+ token="token",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**phone_number:** `str` — Phone number
+
+
+
+
+
+-
+
+**label:** `str` — Label for the phone number
+
+
+
+
+
+-
+
+**sid:** `str` — Twilio Account SID
+
+
+
+
+
+-
+
+**token:** `str` — Twilio Token
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_phone_number(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve Phone Number details by ID
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.delete_phone_number(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete Phone Number by ID
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.delete_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.update_phone_number(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update Phone Number details by ID
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.update_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**agent_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.conversational_ai.get_phone_numbers()
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve all Phone Numbers
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.get_phone_numbers()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py
index 8ca7e4ed..a25daaea 100644
--- a/src/elevenlabs/__init__.py
+++ b/src/elevenlabs/__init__.py
@@ -2,101 +2,226 @@
from .types import (
Accent,
+ AddAgentSecretResponseModel,
+ AddChapterResponseModel,
+ AddKnowledgeBaseResponseModel,
AddProjectResponseModel,
AddPronunciationDictionaryResponseModel,
AddPronunciationDictionaryRulesResponseModel,
+ AddVoiceIvcResponseModel,
AddVoiceResponseModel,
Age,
+ AgentBan,
+ AgentConfig,
+ AgentConfigOverride,
+ AgentConfigOverrideConfig,
+ AgentMetadataResponseModel,
+ AgentPlatformSettings,
+ AgentSummaryResponseModel,
+ AllowlistItem,
+ ArrayJsonSchemaProperty,
+ ArrayJsonSchemaPropertyItems,
+ AsrConversationalConfig,
+ AsrInputFormat,
+ AsrProvider,
+ AsrQuality,
AudioNativeCreateProjectResponseModel,
- AudioNativeGetEmbedCodeResponseModel,
- AudioOutput,
- Category,
+ AuthSettings,
+ AuthorizationMethod,
+ BanReasonType,
+ BreakdownTypes,
ChapterResponse,
ChapterSnapshotResponse,
ChapterSnapshotsResponse,
ChapterState,
ChapterStatisticsResponse,
- CloseConnection,
+ ClientEvent,
+ ClientToolConfig,
+ ConvAiNewSecretConfig,
+ ConvAiSecretLocator,
+ ConvAiStoredSecretConfig,
+ ConversationChargingCommonModel,
+ ConversationConfig,
+ ConversationConfigClientOverride,
+ ConversationConfigClientOverrideConfig,
+ ConversationHistoryAnalysisCommonModel,
+ ConversationHistoryEvaluationCriteriaResultCommonModel,
+ ConversationHistoryFeedbackCommonModel,
+ ConversationHistoryMetadataCommonModel,
+ ConversationHistoryTranscriptCommonModel,
+ ConversationHistoryTranscriptCommonModelRole,
+ ConversationHistoryTranscriptToolCallCommonModel,
+ ConversationHistoryTranscriptToolResultCommonModel,
+ ConversationInitiationClientData,
+ ConversationInitiationClientDataConfig,
+ ConversationSignedUrlResponseModel,
+ ConversationSummaryResponseModel,
+ ConversationSummaryResponseModelStatus,
+ ConversationTokenDbModel,
+ ConversationTokenPurpose,
+ ConversationalConfig,
+ CreateAgentResponseModel,
+ CreatePhoneNumberResponseModel,
Currency,
+ CustomLlm,
+ DataCollectionResultCommonModel,
DoDubbingResponse,
DubbingMetadataResponse,
EditProjectResponseModel,
+ EmbedConfig,
+ EmbedConfigAvatar,
+ EmbedConfigAvatar_Image,
+ EmbedConfigAvatar_Orb,
+ EmbedConfigAvatar_Url,
+ EmbedVariant,
+ EvaluationSettings,
+ EvaluationSuccessResult,
ExtendedSubscriptionResponseModelBillingPeriod,
ExtendedSubscriptionResponseModelCharacterRefreshPeriod,
+ ExtendedSubscriptionResponseModelCurrency,
FeedbackItem,
FineTuningResponse,
FineTuningResponseModelStateValue,
Gender,
- GenerationConfig,
+ GetAgentEmbedResponseModel,
+ GetAgentLinkResponseModel,
+ GetAgentResponseModel,
+ GetAgentsPageResponseModel,
GetChaptersResponse,
+ GetConversationResponseModel,
+ GetConversationResponseModelStatus,
+ GetConversationsPageResponseModel,
+ GetKnowledgeBaseReponseModel,
+ GetKnowledgeBaseReponseModelType,
GetLibraryVoicesResponse,
+ GetPhoneNumberResponseModel,
GetProjectsResponse,
GetPronunciationDictionariesMetadataResponseModel,
GetPronunciationDictionaryMetadataResponse,
GetSpeechHistoryResponse,
GetVoicesResponse,
- History,
HistoryAlignmentResponseModel,
HistoryAlignmentsResponseModel,
HistoryItem,
HttpValidationError,
- InitializeConnection,
+ ImageAvatar,
Invoice,
+ KnowledgeBaseLocator,
+ KnowledgeBaseLocatorType,
LanguageResponse,
LibraryVoiceResponse,
+ LibraryVoiceResponseModelCategory,
+ LiteralJsonSchemaProperty,
+ LiteralJsonSchemaPropertyType,
+ Llm,
ManualVerificationFileResponse,
ManualVerificationResponse,
Model,
- NormalizedAlignment,
- OptimizeStreamingLatency,
+ ModelRatesResponseModel,
+ ModelResponseModelConcurrencyGroup,
+ ModerationStatusResponseModel,
+ ModerationStatusResponseModelSafetyStatus,
+ ModerationStatusResponseModelWarningStatus,
+ ObjectJsonSchemaProperty,
+ ObjectJsonSchemaPropertyPropertiesValue,
+ OrbAvatar,
OutputFormat,
+ PhoneNumberAgentInfo,
+ PostAgentAvatarResponseModel,
+ PrivacyConfig,
ProfilePageResponseModel,
+ ProjectCreationMetaResponseModel,
+ ProjectCreationMetaResponseModelStatus,
+ ProjectCreationMetaResponseModelType,
ProjectExtendedResponseModel,
+ ProjectExtendedResponseModelAccessLevel,
+ ProjectExtendedResponseModelApplyTextNormalization,
+ ProjectExtendedResponseModelFiction,
+ ProjectExtendedResponseModelQualityPreset,
+ ProjectExtendedResponseModelTargetAudience,
ProjectResponse,
+ ProjectResponseModelAccessLevel,
+ ProjectResponseModelFiction,
+ ProjectResponseModelTargetAudience,
ProjectSnapshotResponse,
ProjectSnapshotUploadResponseModel,
+ ProjectSnapshotUploadResponseModelStatus,
ProjectSnapshotsResponse,
ProjectState,
+ PromptAgent,
+ PromptAgentOverride,
+ PromptAgentOverrideConfig,
+ PromptAgentToolsItem,
+ PromptAgentToolsItem_Client,
+ PromptAgentToolsItem_Webhook,
+ PromptEvaluationCriteria,
PronunciationDictionaryAliasRuleRequestModel,
PronunciationDictionaryPhonemeRuleRequestModel,
PronunciationDictionaryVersionLocator,
- RealtimeVoiceSettings,
+ PronunciationDictionaryVersionResponseModel,
+ PydanticPronunciationDictionaryVersionLocator,
+ QueryParamsJsonSchema,
+ ReaderResourceResponseModel,
+ ReaderResourceResponseModelResourceType,
RecordingResponse,
RemovePronunciationDictionaryRulesResponseModel,
ReviewStatus,
- SendText,
- Source,
+ Safety,
+ SafetyEvaluation,
+ SafetyRule,
SpeechHistoryItemResponse,
+ SpeechHistoryItemResponseModelSource,
SpeechHistoryItemResponseModelVoiceCategory,
- SsoProviderResponseModel,
- SsoProviderResponseModelProviderType,
- Status,
Subscription,
SubscriptionResponse,
SubscriptionResponseModelBillingPeriod,
SubscriptionResponseModelCharacterRefreshPeriod,
+ SubscriptionResponseModelCurrency,
SubscriptionStatus,
+ TelephonyProvider,
TextToSpeechAsStreamRequest,
+ TtsConversationalConfig,
+ TtsConversationalConfigOverride,
+ TtsConversationalConfigOverrideConfig,
+ TtsConversationalModel,
+ TtsOptimizeStreamingLatency,
+ TtsOutputFormat,
+ TurnConfig,
+ TurnMode,
+ UrlAvatar,
UsageCharactersResponseModel,
User,
+ UserFeedback,
+ UserFeedbackScore,
ValidationError,
ValidationErrorLocItem,
VerificationAttemptResponse,
Voice,
VoiceGenerationParameterOptionResponse,
VoiceGenerationParameterResponse,
+ VoicePreviewResponseModel,
+ VoicePreviewsResponseModel,
+ VoiceResponseModelCategory,
VoiceResponseModelSafetyControl,
VoiceSample,
VoiceSettings,
+ VoiceSharingModerationCheckResponseModel,
VoiceSharingResponse,
+ VoiceSharingResponseModelCategory,
VoiceSharingState,
VoiceVerificationResponse,
+ WebhookToolApiSchemaConfig,
+ WebhookToolApiSchemaConfigMethod,
+ WebhookToolApiSchemaConfigRequestHeadersValue,
+ WebhookToolConfig,
+ WidgetFeedbackMode,
)
from .errors import UnprocessableEntityError
from . import (
audio_isolation,
audio_native,
chapters,
+ conversational_ai,
dubbing,
history,
models,
@@ -106,6 +231,7 @@
speech_to_speech,
text_to_sound_effects,
text_to_speech,
+ text_to_voice,
usage,
user,
voice_generation,
@@ -113,82 +239,205 @@
workspace,
)
from .client import AsyncElevenLabs, ElevenLabs
-from .dubbing import GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
+from .conversational_ai import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+)
+from .dubbing import DubbingGetTranscriptForDubRequestFormatType
from .environment import ElevenLabsEnvironment
+from .history import HistoryGetAllRequestSource
from .play import play, save, stream
+from .projects import ProjectsAddRequestFiction, ProjectsAddRequestTargetAudience
from .pronunciation_dictionary import (
PronunciationDictionaryAddFromFileRequestWorkspaceAccess,
PronunciationDictionaryRule,
PronunciationDictionaryRule_Alias,
PronunciationDictionaryRule_Phoneme,
)
-from .text_to_speech import SendMessage
-from .usage import UsageGetCharactersUsageMetricsRequestBreakdownType
+from .text_to_speech import (
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization,
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
+ BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
+ TextToSpeechStreamWithTimestampsResponse,
+ TextToSpeechStreamWithTimestampsResponseAlignment,
+ TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
+)
+from .text_to_voice import TextToVoiceCreatePreviewsRequestOutputFormat
from .version import __version__
from .workspace import BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole
__all__ = [
"Accent",
+ "AddAgentSecretResponseModel",
+ "AddChapterResponseModel",
+ "AddKnowledgeBaseResponseModel",
"AddProjectResponseModel",
"AddPronunciationDictionaryResponseModel",
"AddPronunciationDictionaryRulesResponseModel",
+ "AddVoiceIvcResponseModel",
"AddVoiceResponseModel",
"Age",
+ "AgentBan",
+ "AgentConfig",
+ "AgentConfigOverride",
+ "AgentConfigOverrideConfig",
+ "AgentMetadataResponseModel",
+ "AgentPlatformSettings",
+ "AgentSummaryResponseModel",
+ "AllowlistItem",
+ "ArrayJsonSchemaProperty",
+ "ArrayJsonSchemaPropertyItems",
+ "AsrConversationalConfig",
+ "AsrInputFormat",
+ "AsrProvider",
+ "AsrQuality",
"AsyncElevenLabs",
"AudioNativeCreateProjectResponseModel",
- "AudioNativeGetEmbedCodeResponseModel",
- "AudioOutput",
+ "AuthSettings",
+ "AuthorizationMethod",
+ "BanReasonType",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
+ "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization",
+ "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization",
+ "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization",
+ "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization",
"BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole",
- "Category",
+ "BreakdownTypes",
"ChapterResponse",
"ChapterSnapshotResponse",
"ChapterSnapshotsResponse",
"ChapterState",
"ChapterStatisticsResponse",
- "CloseConnection",
+ "ClientEvent",
+ "ClientToolConfig",
+ "ConvAiNewSecretConfig",
+ "ConvAiSecretLocator",
+ "ConvAiStoredSecretConfig",
+ "ConversationChargingCommonModel",
+ "ConversationConfig",
+ "ConversationConfigClientOverride",
+ "ConversationConfigClientOverrideConfig",
+ "ConversationHistoryAnalysisCommonModel",
+ "ConversationHistoryEvaluationCriteriaResultCommonModel",
+ "ConversationHistoryFeedbackCommonModel",
+ "ConversationHistoryMetadataCommonModel",
+ "ConversationHistoryTranscriptCommonModel",
+ "ConversationHistoryTranscriptCommonModelRole",
+ "ConversationHistoryTranscriptToolCallCommonModel",
+ "ConversationHistoryTranscriptToolResultCommonModel",
+ "ConversationInitiationClientData",
+ "ConversationInitiationClientDataConfig",
+ "ConversationSignedUrlResponseModel",
+ "ConversationSummaryResponseModel",
+ "ConversationSummaryResponseModelStatus",
+ "ConversationTokenDbModel",
+ "ConversationTokenPurpose",
+ "ConversationalConfig",
+ "CreateAgentResponseModel",
+ "CreatePhoneNumberResponseModel",
"Currency",
+ "CustomLlm",
+ "DataCollectionResultCommonModel",
"DoDubbingResponse",
+ "DubbingGetTranscriptForDubRequestFormatType",
"DubbingMetadataResponse",
"EditProjectResponseModel",
"ElevenLabs",
"ElevenLabsEnvironment",
+ "EmbedConfig",
+ "EmbedConfigAvatar",
+ "EmbedConfigAvatar_Image",
+ "EmbedConfigAvatar_Orb",
+ "EmbedConfigAvatar_Url",
+ "EmbedVariant",
+ "EvaluationSettings",
+ "EvaluationSuccessResult",
"ExtendedSubscriptionResponseModelBillingPeriod",
"ExtendedSubscriptionResponseModelCharacterRefreshPeriod",
+ "ExtendedSubscriptionResponseModelCurrency",
"FeedbackItem",
"FineTuningResponse",
"FineTuningResponseModelStateValue",
"Gender",
- "GenerationConfig",
+ "GetAgentEmbedResponseModel",
+ "GetAgentLinkResponseModel",
+ "GetAgentResponseModel",
+ "GetAgentsPageResponseModel",
"GetChaptersResponse",
+ "GetConversationResponseModel",
+ "GetConversationResponseModelStatus",
+ "GetConversationsPageResponseModel",
+ "GetKnowledgeBaseReponseModel",
+ "GetKnowledgeBaseReponseModelType",
"GetLibraryVoicesResponse",
+ "GetPhoneNumberResponseModel",
"GetProjectsResponse",
"GetPronunciationDictionariesMetadataResponseModel",
"GetPronunciationDictionaryMetadataResponse",
"GetSpeechHistoryResponse",
- "GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType",
"GetVoicesResponse",
- "History",
"HistoryAlignmentResponseModel",
"HistoryAlignmentsResponseModel",
+ "HistoryGetAllRequestSource",
"HistoryItem",
"HttpValidationError",
- "InitializeConnection",
+ "ImageAvatar",
"Invoice",
+ "KnowledgeBaseLocator",
+ "KnowledgeBaseLocatorType",
"LanguageResponse",
"LibraryVoiceResponse",
+ "LibraryVoiceResponseModelCategory",
+ "LiteralJsonSchemaProperty",
+ "LiteralJsonSchemaPropertyType",
+ "Llm",
"ManualVerificationFileResponse",
"ManualVerificationResponse",
"Model",
- "NormalizedAlignment",
- "OptimizeStreamingLatency",
+ "ModelRatesResponseModel",
+ "ModelResponseModelConcurrencyGroup",
+ "ModerationStatusResponseModel",
+ "ModerationStatusResponseModelSafetyStatus",
+ "ModerationStatusResponseModelWarningStatus",
+ "ObjectJsonSchemaProperty",
+ "ObjectJsonSchemaPropertyPropertiesValue",
+ "OrbAvatar",
"OutputFormat",
+ "PhoneNumberAgentInfo",
+ "PostAgentAvatarResponseModel",
+ "PrivacyConfig",
"ProfilePageResponseModel",
+ "ProjectCreationMetaResponseModel",
+ "ProjectCreationMetaResponseModelStatus",
+ "ProjectCreationMetaResponseModelType",
"ProjectExtendedResponseModel",
+ "ProjectExtendedResponseModelAccessLevel",
+ "ProjectExtendedResponseModelApplyTextNormalization",
+ "ProjectExtendedResponseModelFiction",
+ "ProjectExtendedResponseModelQualityPreset",
+ "ProjectExtendedResponseModelTargetAudience",
"ProjectResponse",
+ "ProjectResponseModelAccessLevel",
+ "ProjectResponseModelFiction",
+ "ProjectResponseModelTargetAudience",
"ProjectSnapshotResponse",
"ProjectSnapshotUploadResponseModel",
+ "ProjectSnapshotUploadResponseModelStatus",
"ProjectSnapshotsResponse",
"ProjectState",
+ "ProjectsAddRequestFiction",
+ "ProjectsAddRequestTargetAudience",
+ "PromptAgent",
+ "PromptAgentOverride",
+ "PromptAgentOverrideConfig",
+ "PromptAgentToolsItem",
+ "PromptAgentToolsItem_Client",
+ "PromptAgentToolsItem_Webhook",
+ "PromptEvaluationCriteria",
"PronunciationDictionaryAddFromFileRequestWorkspaceAccess",
"PronunciationDictionaryAliasRuleRequestModel",
"PronunciationDictionaryPhonemeRuleRequestModel",
@@ -196,44 +445,73 @@
"PronunciationDictionaryRule_Alias",
"PronunciationDictionaryRule_Phoneme",
"PronunciationDictionaryVersionLocator",
- "RealtimeVoiceSettings",
+ "PronunciationDictionaryVersionResponseModel",
+ "PydanticPronunciationDictionaryVersionLocator",
+ "QueryParamsJsonSchema",
+ "ReaderResourceResponseModel",
+ "ReaderResourceResponseModelResourceType",
"RecordingResponse",
"RemovePronunciationDictionaryRulesResponseModel",
"ReviewStatus",
- "SendMessage",
- "SendText",
- "Source",
+ "Safety",
+ "SafetyEvaluation",
+ "SafetyRule",
"SpeechHistoryItemResponse",
+ "SpeechHistoryItemResponseModelSource",
"SpeechHistoryItemResponseModelVoiceCategory",
- "SsoProviderResponseModel",
- "SsoProviderResponseModelProviderType",
- "Status",
"Subscription",
"SubscriptionResponse",
"SubscriptionResponseModelBillingPeriod",
"SubscriptionResponseModelCharacterRefreshPeriod",
+ "SubscriptionResponseModelCurrency",
"SubscriptionStatus",
+ "TelephonyProvider",
"TextToSpeechAsStreamRequest",
+ "TextToSpeechStreamWithTimestampsResponse",
+ "TextToSpeechStreamWithTimestampsResponseAlignment",
+ "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment",
+ "TextToVoiceCreatePreviewsRequestOutputFormat",
+ "TtsConversationalConfig",
+ "TtsConversationalConfigOverride",
+ "TtsConversationalConfigOverrideConfig",
+ "TtsConversationalModel",
+ "TtsOptimizeStreamingLatency",
+ "TtsOutputFormat",
+ "TurnConfig",
+ "TurnMode",
"UnprocessableEntityError",
+ "UrlAvatar",
"UsageCharactersResponseModel",
- "UsageGetCharactersUsageMetricsRequestBreakdownType",
"User",
+ "UserFeedback",
+ "UserFeedbackScore",
"ValidationError",
"ValidationErrorLocItem",
"VerificationAttemptResponse",
"Voice",
"VoiceGenerationParameterOptionResponse",
"VoiceGenerationParameterResponse",
+ "VoicePreviewResponseModel",
+ "VoicePreviewsResponseModel",
+ "VoiceResponseModelCategory",
"VoiceResponseModelSafetyControl",
"VoiceSample",
"VoiceSettings",
+ "VoiceSharingModerationCheckResponseModel",
"VoiceSharingResponse",
+ "VoiceSharingResponseModelCategory",
"VoiceSharingState",
"VoiceVerificationResponse",
+ "WebhookToolApiSchemaConfig",
+ "WebhookToolApiSchemaConfigMethod",
+ "WebhookToolApiSchemaConfigRequestHeadersValue",
+ "WebhookToolConfig",
+ "WidgetFeedbackMode",
"__version__",
"audio_isolation",
"audio_native",
"chapters",
+ "conversational_ai",
"dubbing",
"history",
"models",
@@ -246,6 +524,7 @@
"stream",
"text_to_sound_effects",
"text_to_speech",
+ "text_to_voice",
"usage",
"user",
"voice_generation",
diff --git a/src/elevenlabs/audio_isolation/client.py b/src/elevenlabs/audio_isolation/client.py
index 2e831108..b52b3203 100644
--- a/src/elevenlabs/audio_isolation/client.py
+++ b/src/elevenlabs/audio_isolation/client.py
@@ -31,21 +31,12 @@ def audio_isolation(
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.audio_isolation.audio_isolation()
"""
with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation",
@@ -59,7 +50,8 @@ def audio_isolation(
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -90,21 +82,12 @@ def audio_isolation_stream(
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.audio_isolation.audio_isolation_stream()
"""
with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation/stream",
@@ -118,7 +101,8 @@ def audio_isolation_stream(
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -154,29 +138,12 @@ async def audio_isolation(
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.audio_isolation.audio_isolation()
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation",
@@ -190,7 +157,8 @@ async def main() -> None:
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -221,29 +189,12 @@ async def audio_isolation_stream(
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.audio_isolation.audio_isolation_stream()
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation/stream",
@@ -257,7 +208,8 @@ async def main() -> None:
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
diff --git a/src/elevenlabs/audio_native/client.py b/src/elevenlabs/audio_native/client.py
index e0f7768e..fa998d8d 100644
--- a/src/elevenlabs/audio_native/client.py
+++ b/src/elevenlabs/audio_native/client.py
@@ -24,17 +24,17 @@ def create(
self,
*,
name: str,
- image: typing.Optional[str] = None,
- author: typing.Optional[str] = None,
- title: typing.Optional[str] = None,
- small: typing.Optional[bool] = None,
- text_color: typing.Optional[str] = None,
- background_color: typing.Optional[str] = None,
- sessionization: typing.Optional[int] = None,
- voice_id: typing.Optional[str] = None,
- model_id: typing.Optional[str] = None,
- file: typing.Optional[core.File] = None,
- auto_convert: typing.Optional[bool] = None,
+ image: typing.Optional[str] = OMIT,
+ author: typing.Optional[str] = OMIT,
+ title: typing.Optional[str] = OMIT,
+ small: typing.Optional[bool] = OMIT,
+ text_color: typing.Optional[str] = OMIT,
+ background_color: typing.Optional[str] = OMIT,
+ sessionization: typing.Optional[int] = OMIT,
+ voice_id: typing.Optional[str] = OMIT,
+ model_id: typing.Optional[str] = OMIT,
+ file: typing.Optional[core.File] = OMIT,
+ auto_convert: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioNativeCreateProjectResponseModel:
"""
@@ -152,17 +152,17 @@ async def create(
self,
*,
name: str,
- image: typing.Optional[str] = None,
- author: typing.Optional[str] = None,
- title: typing.Optional[str] = None,
- small: typing.Optional[bool] = None,
- text_color: typing.Optional[str] = None,
- background_color: typing.Optional[str] = None,
- sessionization: typing.Optional[int] = None,
- voice_id: typing.Optional[str] = None,
- model_id: typing.Optional[str] = None,
- file: typing.Optional[core.File] = None,
- auto_convert: typing.Optional[bool] = None,
+ image: typing.Optional[str] = OMIT,
+ author: typing.Optional[str] = OMIT,
+ title: typing.Optional[str] = OMIT,
+ small: typing.Optional[bool] = OMIT,
+ text_color: typing.Optional[str] = OMIT,
+ background_color: typing.Optional[str] = OMIT,
+ sessionization: typing.Optional[int] = OMIT,
+ voice_id: typing.Optional[str] = OMIT,
+ model_id: typing.Optional[str] = OMIT,
+ file: typing.Optional[core.File] = OMIT,
+ auto_convert: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioNativeCreateProjectResponseModel:
"""
diff --git a/src/elevenlabs/base_client.py b/src/elevenlabs/base_client.py
index 977ed580..dd5a5dbf 100644
--- a/src/elevenlabs/base_client.py
+++ b/src/elevenlabs/base_client.py
@@ -12,6 +12,7 @@
from .text_to_speech.client import TextToSpeechClient
from .speech_to_speech.client import SpeechToSpeechClient
from .voice_generation.client import VoiceGenerationClient
+from .text_to_voice.client import TextToVoiceClient
from .user.client import UserClient
from .voices.client import VoicesClient
from .projects.client import ProjectsClient
@@ -22,6 +23,7 @@
from .usage.client import UsageClient
from .pronunciation_dictionary.client import PronunciationDictionaryClient
from .workspace.client import WorkspaceClient
+from .conversational_ai.client import ConversationalAiClient
from .core.client_wrapper import AsyncClientWrapper
from .history.client import AsyncHistoryClient
from .text_to_sound_effects.client import AsyncTextToSoundEffectsClient
@@ -30,6 +32,7 @@
from .text_to_speech.client import AsyncTextToSpeechClient
from .speech_to_speech.client import AsyncSpeechToSpeechClient
from .voice_generation.client import AsyncVoiceGenerationClient
+from .text_to_voice.client import AsyncTextToVoiceClient
from .user.client import AsyncUserClient
from .voices.client import AsyncVoicesClient
from .projects.client import AsyncProjectsClient
@@ -40,6 +43,7 @@
from .usage.client import AsyncUsageClient
from .pronunciation_dictionary.client import AsyncPronunciationDictionaryClient
from .workspace.client import AsyncWorkspaceClient
+from .conversational_ai.client import AsyncConversationalAiClient
class BaseElevenLabs:
@@ -84,7 +88,7 @@ def __init__(
*,
base_url: typing.Optional[str] = None,
environment: ElevenLabsEnvironment = ElevenLabsEnvironment.PRODUCTION,
- api_key: typing.Optional[str] = os.getenv("ELEVEN_API_KEY"),
+ api_key: typing.Optional[str] = os.getenv("ELEVENLABS_API_KEY") or os.getenv("ELEVEN_API_KEY"),
timeout: typing.Optional[float] = None,
follow_redirects: typing.Optional[bool] = True,
httpx_client: typing.Optional[httpx.Client] = None,
@@ -107,6 +111,7 @@ def __init__(
self.text_to_speech = TextToSpeechClient(client_wrapper=self._client_wrapper)
self.speech_to_speech = SpeechToSpeechClient(client_wrapper=self._client_wrapper)
self.voice_generation = VoiceGenerationClient(client_wrapper=self._client_wrapper)
+ self.text_to_voice = TextToVoiceClient(client_wrapper=self._client_wrapper)
self.user = UserClient(client_wrapper=self._client_wrapper)
self.voices = VoicesClient(client_wrapper=self._client_wrapper)
self.projects = ProjectsClient(client_wrapper=self._client_wrapper)
@@ -117,6 +122,7 @@ def __init__(
self.usage = UsageClient(client_wrapper=self._client_wrapper)
self.pronunciation_dictionary = PronunciationDictionaryClient(client_wrapper=self._client_wrapper)
self.workspace = WorkspaceClient(client_wrapper=self._client_wrapper)
+ self.conversational_ai = ConversationalAiClient(client_wrapper=self._client_wrapper)
class AsyncBaseElevenLabs:
@@ -161,7 +167,7 @@ def __init__(
*,
base_url: typing.Optional[str] = None,
environment: ElevenLabsEnvironment = ElevenLabsEnvironment.PRODUCTION,
- api_key: typing.Optional[str] = os.getenv("ELEVEN_API_KEY"),
+ api_key: typing.Optional[str] = os.getenv("ELEVENLABS_API_KEY") or os.getenv("ELEVEN_API_KEY"),
timeout: typing.Optional[float] = None,
follow_redirects: typing.Optional[bool] = True,
httpx_client: typing.Optional[httpx.AsyncClient] = None,
@@ -184,6 +190,7 @@ def __init__(
self.text_to_speech = AsyncTextToSpeechClient(client_wrapper=self._client_wrapper)
self.speech_to_speech = AsyncSpeechToSpeechClient(client_wrapper=self._client_wrapper)
self.voice_generation = AsyncVoiceGenerationClient(client_wrapper=self._client_wrapper)
+ self.text_to_voice = AsyncTextToVoiceClient(client_wrapper=self._client_wrapper)
self.user = AsyncUserClient(client_wrapper=self._client_wrapper)
self.voices = AsyncVoicesClient(client_wrapper=self._client_wrapper)
self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
@@ -194,6 +201,7 @@ def __init__(
self.usage = AsyncUsageClient(client_wrapper=self._client_wrapper)
self.pronunciation_dictionary = AsyncPronunciationDictionaryClient(client_wrapper=self._client_wrapper)
self.workspace = AsyncWorkspaceClient(client_wrapper=self._client_wrapper)
+ self.conversational_ai = AsyncConversationalAiClient(client_wrapper=self._client_wrapper)
def _get_base_url(https://github.com/*,%20base_url:%20typing.Optional[str]%20=%20None,%20environment:%20ElevenLabsEnvironment) -> str:
diff --git a/src/elevenlabs/chapters/client.py b/src/elevenlabs/chapters/client.py
index af4de411..9c064995 100644
--- a/src/elevenlabs/chapters/client.py
+++ b/src/elevenlabs/chapters/client.py
@@ -11,6 +11,7 @@
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
from ..types.chapter_response import ChapterResponse
+from ..types.add_chapter_response_model import AddChapterResponseModel
from ..types.chapter_snapshots_response import ChapterSnapshotsResponse
from ..core.client_wrapper import AsyncClientWrapper
@@ -207,6 +208,85 @@ def delete(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def create(
+ self,
+ project_id: str,
+ *,
+ name: str,
+ from_url: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AddChapterResponseModel:
+ """
+ Creates a new chapter either as blank or from a URL.
+
+ Parameters
+ ----------
+ project_id : str
+ The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+ name : str
+ The name of the chapter, used for identification only.
+
+ from_url : typing.Optional[str]
+ An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddChapterResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.chapters.create(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/projects/{jsonable_encoder(project_id)}/chapters/add",
+ method="POST",
+ json={
+ "name": name,
+ "from_url": from_url,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddChapterResponseModel,
+ construct_type(
+ type_=AddChapterResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def convert(
self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]:
@@ -385,6 +465,9 @@ def stream_snapshot(
json={
"convert_to_mpeg": convert_to_mpeg,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -620,6 +703,93 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def create(
+ self,
+ project_id: str,
+ *,
+ name: str,
+ from_url: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AddChapterResponseModel:
+ """
+ Creates a new chapter either as blank or from a URL.
+
+ Parameters
+ ----------
+ project_id : str
+ The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+ name : str
+ The name of the chapter, used for identification only.
+
+ from_url : typing.Optional[str]
+ An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddChapterResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.chapters.create(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/projects/{jsonable_encoder(project_id)}/chapters/add",
+ method="POST",
+ json={
+ "name": name,
+ "from_url": from_url,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddChapterResponseModel,
+ construct_type(
+ type_=AddChapterResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
async def convert(
self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]:
@@ -822,6 +992,9 @@ async def main() -> None:
json={
"convert_to_mpeg": convert_to_mpeg,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/elevenlabs/client.py b/src/elevenlabs/client.py
index a0dac65d..75fd3abc 100644
--- a/src/elevenlabs/client.py
+++ b/src/elevenlabs/client.py
@@ -19,7 +19,7 @@
DEFAULT_VOICE = Voice(
voice_id="EXAVITQu4vr4xnSDxMaL",
- name="Rachel",
+ name="Sarah",
settings=VoiceSettings(
stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True
),
@@ -68,7 +68,7 @@ def __init__(
*,
base_url: typing.Optional[str] = None,
environment: ElevenLabsEnvironment = ElevenLabsEnvironment.PRODUCTION,
- api_key: typing.Optional[str] = os.getenv("ELEVEN_API_KEY"),
+ api_key: typing.Optional[str] = os.getenv("ELEVENLABS_API_KEY") or os.getenv("ELEVEN_API_KEY"),
timeout: typing.Optional[float] = 60,
httpx_client: typing.Optional[httpx.Client] = None
):
@@ -122,7 +122,7 @@ def generate(
text: Union[str, Iterator[str]],
voice: Union[VoiceId, VoiceName, Voice] = DEFAULT_VOICE,
voice_settings: typing.Optional[VoiceSettings] = DEFAULT_VOICE.settings,
- model: Union[ModelId, Model] = "eleven_monolingual_v1",
+ model: Union[ModelId, Model] = "eleven_multilingual_v2",
optimize_streaming_latency: typing.Optional[int] = 0,
stream: bool = False,
output_format: Optional[OutputFormat] = "mp3_44100_128",
@@ -134,7 +134,7 @@ def generate(
"""
- text: Union[str, Iterator[str]]. The string or stream of strings that will get converted into speech.
- - voice: str. A voice id, name, or voice response. Defaults to the Rachel voice.
+ - voice: str. A voice id, name, or voice response. Defaults to the Sarah voice.
- model: typing.Optional[str]. Identifier of the model that will be used, you can query them using GET /v1/models.
The model needs to have support for text to speech, you can check this using the
@@ -302,7 +302,7 @@ async def generate(
text: str,
voice: Union[VoiceId, VoiceName, Voice] = DEFAULT_VOICE,
voice_settings: typing.Optional[VoiceSettings] = DEFAULT_VOICE.settings,
- model: Union[ModelId, Model] = "eleven_monolingual_v1",
+ model: Union[ModelId, Model] = "eleven_multilingual_v2",
optimize_streaming_latency: typing.Optional[int] = 0,
stream: bool = False,
output_format: Optional[OutputFormat] = "mp3_44100_128",
diff --git a/src/elevenlabs/conversational_ai/__init__.py b/src/elevenlabs/conversational_ai/__init__.py
new file mode 100644
index 00000000..a05e4b59
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/__init__.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+)
+
+__all__ = [
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
+]
diff --git a/src/elevenlabs/conversational_ai/client.py b/src/elevenlabs/conversational_ai/client.py
new file mode 100644
index 00000000..d3686ba5
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/client.py
@@ -0,0 +1,3216 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from ..core.request_options import RequestOptions
+from ..types.conversation_signed_url_response_model import ConversationSignedUrlResponseModel
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.conversational_config import ConversationalConfig
+from ..types.agent_platform_settings import AgentPlatformSettings
+from ..types.create_agent_response_model import CreateAgentResponseModel
+from ..core.serialization import convert_and_respect_annotation_metadata
+from ..types.get_agent_response_model import GetAgentResponseModel
+from ..core.jsonable_encoder import jsonable_encoder
+from .types.body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+)
+from ..types.get_agent_embed_response_model import GetAgentEmbedResponseModel
+from ..types.get_agent_link_response_model import GetAgentLinkResponseModel
+from .. import core
+from ..types.post_agent_avatar_response_model import PostAgentAvatarResponseModel
+from ..types.get_knowledge_base_reponse_model import GetKnowledgeBaseReponseModel
+from ..types.add_agent_secret_response_model import AddAgentSecretResponseModel
+from ..types.add_knowledge_base_response_model import AddKnowledgeBaseResponseModel
+from ..types.get_agents_page_response_model import GetAgentsPageResponseModel
+from ..types.evaluation_success_result import EvaluationSuccessResult
+from ..types.get_conversations_page_response_model import GetConversationsPageResponseModel
+from ..types.get_conversation_response_model import GetConversationResponseModel
+from ..types.user_feedback_score import UserFeedbackScore
+from ..types.create_phone_number_response_model import CreatePhoneNumberResponseModel
+from ..types.get_phone_number_response_model import GetPhoneNumberResponseModel
+from ..core.client_wrapper import AsyncClientWrapper
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class ConversationalAiClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def get_signed_url(
+ self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ConversationSignedUrlResponseModel:
+ """
+ Get a signed url to start a conversation with an agent with an agent that requires authorization
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of the agent you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ConversationSignedUrlResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_signed_url(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/conversation/get_signed_url",
+ method="GET",
+ params={
+ "agent_id": agent_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ ConversationSignedUrlResponseModel,
+ construct_type(
+ type_=ConversationSignedUrlResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_agent(
+ self,
+ *,
+ conversation_config: ConversationalConfig,
+ platform_settings: typing.Optional[AgentPlatformSettings] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentResponseModel:
+ """
+ Create an agent from a config object
+
+ Parameters
+ ----------
+ conversation_config : ConversationalConfig
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[AgentPlatformSettings]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ConversationalConfig, ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.create_agent(
+ conversation_config=ConversationalConfig(),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/agents/create",
+ method="POST",
+ json={
+ "conversation_config": convert_and_respect_annotation_metadata(
+ object_=conversation_config, annotation=ConversationalConfig, direction="write"
+ ),
+ "platform_settings": convert_and_respect_annotation_metadata(
+ object_=platform_settings, annotation=AgentPlatformSettings, direction="write"
+ ),
+ "name": name,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ CreateAgentResponseModel,
+ construct_type(
+ type_=CreateAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentResponseModel:
+ """
+ Retrieve config for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Dict[str, str]:
+ """
+ Delete an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Dict[str, str]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.delete_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Dict[str, str],
+ construct_type(
+ type_=typing.Dict[str, str], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_agent(
+ self,
+ agent_id: str,
+ *,
+ conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ secrets: typing.Optional[
+ typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]
+ ] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentResponseModel:
+ """
+ Patches an Agent settings
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]]
+ A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.update_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="PATCH",
+ json={
+ "conversation_config": conversation_config,
+ "platform_settings": platform_settings,
+ "secrets": convert_and_respect_annotation_metadata(
+ object_=secrets,
+ annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem],
+ direction="write",
+ ),
+ "name": name,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_agent_widget(
+ self,
+ agent_id: str,
+ *,
+ conversation_signature: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentEmbedResponseModel:
+ """
+ Retrieve the widget configuration for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_signature : typing.Optional[str]
+ An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentEmbedResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_agent_widget(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget",
+ method="GET",
+ params={
+ "conversation_signature": conversation_signature,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentEmbedResponseModel,
+ construct_type(
+ type_=GetAgentEmbedResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_agent_link(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentLinkResponseModel:
+ """
+ Get the current link used to share the agent with others
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentLinkResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_agent_link(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/link",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentLinkResponseModel,
+ construct_type(
+ type_=GetAgentLinkResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_agent_avatar(
+ self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
+ ) -> PostAgentAvatarResponseModel:
+ """
+ Sets the avatar for an agent displayed in the widget
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ avatar_file : core.File
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PostAgentAvatarResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.post_agent_avatar(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar",
+ method="POST",
+ data={},
+ files={
+ "avatar_file": avatar_file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PostAgentAvatarResponseModel,
+ construct_type(
+ type_=PostAgentAvatarResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_agent_knowledge_base_document_by_id(
+ self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetKnowledgeBaseReponseModel:
+ """
+ Get details about a specific documentation making up the agent's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ documentation_id : str
+ The id of a document from the agent's knowledge base. This is returned on document addition.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetKnowledgeBaseReponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_agent_knowledge_base_document_by_id(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetKnowledgeBaseReponseModel,
+ construct_type(
+ type_=GetKnowledgeBaseReponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_agent_secret(
+ self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AddAgentSecretResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ name : str
+ A name to help identify a particular agent secret
+
+ secret_value : str
+ A value to be encrypted and used by the agent
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddAgentSecretResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.add_agent_secret(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ secret_value="secret_value",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret",
+ method="POST",
+ json={
+ "name": name,
+ "secret_value": secret_value,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddAgentSecretResponseModel,
+ construct_type(
+ type_=AddAgentSecretResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_to_agent_knowledge_base(
+ self,
+ agent_id: str,
+ *,
+ url: typing.Optional[str] = OMIT,
+ file: typing.Optional[core.File] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AddKnowledgeBaseResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ url : typing.Optional[str]
+ URL to a page of documentation that the agent will have access to in order to interact with users.
+
+ file : typing.Optional[core.File]
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddKnowledgeBaseResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.add_to_agent_knowledge_base(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base",
+ method="POST",
+ data={
+ "url": url,
+ },
+ files={
+ "file": file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddKnowledgeBaseResponseModel,
+ construct_type(
+ type_=AddKnowledgeBaseResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_agents(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ page_size: typing.Optional[int] = None,
+ search: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentsPageResponseModel:
+ """
+ Returns a page of your agents and their metadata.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ page_size : typing.Optional[int]
+ How many Agents to return at maximum. Can not exceed 100, defaults to 30.
+
+ search : typing.Optional[str]
+ Search by agents name.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_agents()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/agents",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "page_size": page_size,
+ "search": search,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentsPageResponseModel,
+ construct_type(
+ type_=GetAgentsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_conversations(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ agent_id: typing.Optional[str] = None,
+ call_successful: typing.Optional[EvaluationSuccessResult] = None,
+ page_size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetConversationsPageResponseModel:
+ """
+ Get all conversations of agents that user owns. With option to restrict to a specific agent.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ agent_id : typing.Optional[str]
+ The id of the agent you're taking the action on.
+
+ call_successful : typing.Optional[EvaluationSuccessResult]
+ The result of the success evaluation
+
+ page_size : typing.Optional[int]
+ How many conversations to return at maximum. Can not exceed 100, defaults to 30.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_conversations(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/conversations",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "agent_id": agent_id,
+ "call_successful": call_successful,
+ "page_size": page_size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationsPageResponseModel,
+ construct_type(
+ type_=GetConversationsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_conversation(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetConversationResponseModel:
+ """
+ Get the details of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationResponseModel,
+ construct_type(
+ type_=GetConversationResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_conversation(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Delete a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.delete_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_conversation_audio(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Get the audio recording of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_conversation_audio(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_conversation_feedback(
+ self,
+ conversation_id: str,
+ *,
+ feedback: UserFeedbackScore,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Optional[typing.Any]:
+ """
+ Send the feedback for the given conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ feedback : UserFeedbackScore
+ Either 'like' or 'dislike' to indicate the feedback for the conversation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.post_conversation_feedback(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ feedback="like",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/feedback",
+ method="POST",
+ json={
+ "feedback": feedback,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_phone_number(
+ self,
+ *,
+ phone_number: str,
+ label: str,
+ sid: str,
+ token: str,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreatePhoneNumberResponseModel:
+ """
+ Import Phone Number from Twilio configuration
+
+ Parameters
+ ----------
+ phone_number : str
+ Phone number
+
+ label : str
+ Label for the phone number
+
+ sid : str
+ Twilio Account SID
+
+ token : str
+ Twilio Token
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreatePhoneNumberResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.create_phone_number(
+ phone_number="phone_number",
+ label="label",
+ sid="sid",
+ token="token",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/phone-numbers/create",
+ method="POST",
+ json={
+ "phone_number": phone_number,
+ "label": label,
+ "sid": sid,
+ "token": token,
+ "provider": "twilio",
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ CreatePhoneNumberResponseModel,
+ construct_type(
+ type_=CreatePhoneNumberResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_phone_number(
+ self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetPhoneNumberResponseModel:
+ """
+ Retrieve Phone Number details by ID
+
+ Parameters
+ ----------
+ phone_number_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetPhoneNumberResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetPhoneNumberResponseModel,
+ construct_type(
+ type_=GetPhoneNumberResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_phone_number(
+ self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Delete Phone Number by ID
+
+ Parameters
+ ----------
+ phone_number_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.delete_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_phone_number(
+ self,
+ phone_number_id: str,
+ *,
+ agent_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetPhoneNumberResponseModel:
+ """
+ Update Phone Number details by ID
+
+ Parameters
+ ----------
+ phone_number_id : str
+ The id of an agent. This is returned on agent creation.
+
+ agent_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetPhoneNumberResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.update_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
+ method="PATCH",
+ json={
+ "agent_id": agent_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetPhoneNumberResponseModel,
+ construct_type(
+ type_=GetPhoneNumberResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_phone_numbers(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[GetPhoneNumberResponseModel]:
+ """
+ Retrieve all Phone Numbers
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[GetPhoneNumberResponseModel]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_phone_numbers()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/phone-numbers/",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.List[GetPhoneNumberResponseModel],
+ construct_type(
+ type_=typing.List[GetPhoneNumberResponseModel], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncConversationalAiClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def get_signed_url(
+ self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ConversationSignedUrlResponseModel:
+ """
+ Get a signed url to start a conversation with an agent with an agent that requires authorization
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of the agent you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ConversationSignedUrlResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_signed_url(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/conversation/get_signed_url",
+ method="GET",
+ params={
+ "agent_id": agent_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ ConversationSignedUrlResponseModel,
+ construct_type(
+ type_=ConversationSignedUrlResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_agent(
+ self,
+ *,
+ conversation_config: ConversationalConfig,
+ platform_settings: typing.Optional[AgentPlatformSettings] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentResponseModel:
+ """
+ Create an agent from a config object
+
+ Parameters
+ ----------
+ conversation_config : ConversationalConfig
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[AgentPlatformSettings]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs, ConversationalConfig
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.create_agent(
+ conversation_config=ConversationalConfig(),
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/agents/create",
+ method="POST",
+ json={
+ "conversation_config": convert_and_respect_annotation_metadata(
+ object_=conversation_config, annotation=ConversationalConfig, direction="write"
+ ),
+ "platform_settings": convert_and_respect_annotation_metadata(
+ object_=platform_settings, annotation=AgentPlatformSettings, direction="write"
+ ),
+ "name": name,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ CreateAgentResponseModel,
+ construct_type(
+ type_=CreateAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentResponseModel:
+ """
+ Retrieve config for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Dict[str, str]:
+ """
+ Delete an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Dict[str, str]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.delete_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Dict[str, str],
+ construct_type(
+ type_=typing.Dict[str, str], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_agent(
+ self,
+ agent_id: str,
+ *,
+ conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ secrets: typing.Optional[
+ typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]
+ ] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentResponseModel:
+ """
+ Patches an Agent settings
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]]
+ A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.update_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="PATCH",
+ json={
+ "conversation_config": conversation_config,
+ "platform_settings": platform_settings,
+ "secrets": convert_and_respect_annotation_metadata(
+ object_=secrets,
+ annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem],
+ direction="write",
+ ),
+ "name": name,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_agent_widget(
+ self,
+ agent_id: str,
+ *,
+ conversation_signature: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentEmbedResponseModel:
+ """
+ Retrieve the widget configuration for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_signature : typing.Optional[str]
+ An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentEmbedResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_agent_widget(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget",
+ method="GET",
+ params={
+ "conversation_signature": conversation_signature,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentEmbedResponseModel,
+ construct_type(
+ type_=GetAgentEmbedResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_agent_link(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentLinkResponseModel:
+ """
+ Get the current link used to share the agent with others
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentLinkResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_agent_link(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/link",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentLinkResponseModel,
+ construct_type(
+ type_=GetAgentLinkResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def post_agent_avatar(
+ self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
+ ) -> PostAgentAvatarResponseModel:
+ """
+ Sets the avatar for an agent displayed in the widget
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ avatar_file : core.File
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PostAgentAvatarResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.post_agent_avatar(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar",
+ method="POST",
+ data={},
+ files={
+ "avatar_file": avatar_file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PostAgentAvatarResponseModel,
+ construct_type(
+ type_=PostAgentAvatarResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_agent_knowledge_base_document_by_id(
+ self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetKnowledgeBaseReponseModel:
+ """
+ Get details about a specific documentation making up the agent's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ documentation_id : str
+ The id of a document from the agent's knowledge base. This is returned on document addition.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetKnowledgeBaseReponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_agent_knowledge_base_document_by_id(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetKnowledgeBaseReponseModel,
+ construct_type(
+ type_=GetKnowledgeBaseReponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_agent_secret(
+ self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AddAgentSecretResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ name : str
+ A name to help identify a particular agent secret
+
+ secret_value : str
+ A value to be encrypted and used by the agent
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddAgentSecretResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.add_agent_secret(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ secret_value="secret_value",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret",
+ method="POST",
+ json={
+ "name": name,
+ "secret_value": secret_value,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddAgentSecretResponseModel,
+ construct_type(
+ type_=AddAgentSecretResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_to_agent_knowledge_base(
+ self,
+ agent_id: str,
+ *,
+ url: typing.Optional[str] = OMIT,
+ file: typing.Optional[core.File] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AddKnowledgeBaseResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ url : typing.Optional[str]
+ URL to a page of documentation that the agent will have access to in order to interact with users.
+
+ file : typing.Optional[core.File]
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddKnowledgeBaseResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.add_to_agent_knowledge_base(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base",
+ method="POST",
+ data={
+ "url": url,
+ },
+ files={
+ "file": file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddKnowledgeBaseResponseModel,
+ construct_type(
+ type_=AddKnowledgeBaseResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_agents(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ page_size: typing.Optional[int] = None,
+ search: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentsPageResponseModel:
+ """
+ Returns a page of your agents and their metadata.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ page_size : typing.Optional[int]
+ How many Agents to return at maximum. Can not exceed 100, defaults to 30.
+
+ search : typing.Optional[str]
+ Search by agents name.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_agents()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/agents",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "page_size": page_size,
+ "search": search,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentsPageResponseModel,
+ construct_type(
+ type_=GetAgentsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_conversations(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ agent_id: typing.Optional[str] = None,
+ call_successful: typing.Optional[EvaluationSuccessResult] = None,
+ page_size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetConversationsPageResponseModel:
+ """
+ Get all conversations of agents that user owns. With option to restrict to a specific agent.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ agent_id : typing.Optional[str]
+ The id of the agent you're taking the action on.
+
+ call_successful : typing.Optional[EvaluationSuccessResult]
+ The result of the success evaluation
+
+ page_size : typing.Optional[int]
+ How many conversations to return at maximum. Can not exceed 100, defaults to 30.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_conversations(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/conversations",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "agent_id": agent_id,
+ "call_successful": call_successful,
+ "page_size": page_size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationsPageResponseModel,
+ construct_type(
+ type_=GetConversationsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_conversation(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetConversationResponseModel:
+ """
+ Get the details of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationResponseModel,
+ construct_type(
+ type_=GetConversationResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_conversation(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Delete a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.delete_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_conversation_audio(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Get the audio recording of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_conversation_audio(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def post_conversation_feedback(
+ self,
+ conversation_id: str,
+ *,
+ feedback: UserFeedbackScore,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Optional[typing.Any]:
+ """
+ Send the feedback for the given conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ feedback : UserFeedbackScore
+ Either 'like' or 'dislike' to indicate the feedback for the conversation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.post_conversation_feedback(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ feedback="like",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/feedback",
+ method="POST",
+ json={
+ "feedback": feedback,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_phone_number(
+ self,
+ *,
+ phone_number: str,
+ label: str,
+ sid: str,
+ token: str,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreatePhoneNumberResponseModel:
+ """
+ Import Phone Number from Twilio configuration
+
+ Parameters
+ ----------
+ phone_number : str
+ Phone number
+
+ label : str
+ Label for the phone number
+
+ sid : str
+ Twilio Account SID
+
+ token : str
+ Twilio Token
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreatePhoneNumberResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.create_phone_number(
+ phone_number="phone_number",
+ label="label",
+ sid="sid",
+ token="token",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/phone-numbers/create",
+ method="POST",
+ json={
+ "phone_number": phone_number,
+ "label": label,
+ "sid": sid,
+ "token": token,
+ "provider": "twilio",
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ CreatePhoneNumberResponseModel,
+ construct_type(
+ type_=CreatePhoneNumberResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_phone_number(
+ self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetPhoneNumberResponseModel:
+ """
+ Retrieve Phone Number details by ID
+
+ Parameters
+ ----------
+ phone_number_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetPhoneNumberResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetPhoneNumberResponseModel,
+ construct_type(
+ type_=GetPhoneNumberResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_phone_number(
+ self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Delete Phone Number by ID
+
+ Parameters
+ ----------
+ phone_number_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.delete_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_phone_number(
+ self,
+ phone_number_id: str,
+ *,
+ agent_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetPhoneNumberResponseModel:
+ """
+ Update Phone Number details by ID
+
+ Parameters
+ ----------
+ phone_number_id : str
+ The id of an agent. This is returned on agent creation.
+
+ agent_id : typing.Optional[str]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetPhoneNumberResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.update_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
+ method="PATCH",
+ json={
+ "agent_id": agent_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetPhoneNumberResponseModel,
+ construct_type(
+ type_=GetPhoneNumberResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_phone_numbers(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[GetPhoneNumberResponseModel]:
+ """
+ Retrieve all Phone Numbers
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[GetPhoneNumberResponseModel]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_phone_numbers()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/phone-numbers/",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.List[GetPhoneNumberResponseModel],
+ construct_type(
+ type_=typing.List[GetPhoneNumberResponseModel], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py
new file mode 100644
index 00000000..5678e634
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/conversation.py
@@ -0,0 +1,365 @@
+from abc import ABC, abstractmethod
+import base64
+import json
+import threading
+from typing import Callable, Optional, Awaitable, Union, Any
+import asyncio
+from concurrent.futures import ThreadPoolExecutor
+
+from websockets.sync.client import connect
+
+from ..base_client import BaseElevenLabs
+
+
+class AudioInterface(ABC):
+ """AudioInterface provides an abstraction for handling audio input and output."""
+
+ @abstractmethod
+ def start(self, input_callback: Callable[[bytes], None]):
+ """Starts the audio interface.
+
+ Called one time before the conversation starts.
+ The `input_callback` should be called regularly with input audio chunks from
+ the user. The audio should be in 16-bit PCM mono format at 16kHz. Recommended
+ chunk size is 4000 samples (250 milliseconds).
+ """
+ pass
+
+ @abstractmethod
+ def stop(self):
+ """Stops the audio interface.
+
+ Called one time after the conversation ends. Should clean up any resources
+ used by the audio interface and stop any audio streams. Do not call the
+ `input_callback` from `start` after this method is called.
+ """
+ pass
+
+ @abstractmethod
+ def output(self, audio: bytes):
+ """Output audio to the user.
+
+ The `audio` input is in 16-bit PCM mono format at 16kHz. Implementations can
+ choose to do additional buffering. This method should return quickly and not
+ block the calling thread.
+ """
+ pass
+
+ @abstractmethod
+ def interrupt(self):
+ """Interruption signal to stop any audio output.
+
+ User has interrupted the agent and all previosly buffered audio output should
+ be stopped.
+ """
+ pass
+
+
+class ClientTools:
+ """Handles registration and execution of client-side tools that can be called by the agent.
+
+ Supports both synchronous and asynchronous tools running in a dedicated event loop,
+ ensuring non-blocking operation of the main conversation thread.
+ """
+
+ def __init__(self):
+ self.tools: dict[str, tuple[Union[Callable[[dict], Any], Callable[[dict], Awaitable[Any]]], bool]] = {}
+ self.lock = threading.Lock()
+ self._loop = None
+ self._thread = None
+ self._running = threading.Event()
+ self.thread_pool = ThreadPoolExecutor()
+
+ def start(self):
+ """Start the event loop in a separate thread for handling async operations."""
+ if self._running.is_set():
+ return
+
+ def run_event_loop():
+ self._loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(self._loop)
+ self._running.set()
+ try:
+ self._loop.run_forever()
+ finally:
+ self._running.clear()
+ self._loop.close()
+ self._loop = None
+
+ self._thread = threading.Thread(target=run_event_loop, daemon=True, name="ClientTools-EventLoop")
+ self._thread.start()
+ # Wait for loop to be ready
+ self._running.wait()
+
+ def stop(self):
+ """Gracefully stop the event loop and clean up resources."""
+ if self._loop and self._running.is_set():
+ self._loop.call_soon_threadsafe(self._loop.stop)
+ self._thread.join()
+ self.thread_pool.shutdown(wait=False)
+
+ def register(
+ self,
+ tool_name: str,
+ handler: Union[Callable[[dict], Any], Callable[[dict], Awaitable[Any]]],
+ is_async: bool = False,
+ ) -> None:
+ """Register a new tool that can be called by the AI agent.
+
+ Args:
+ tool_name: Unique identifier for the tool
+ handler: Function that implements the tool's logic
+ is_async: Whether the handler is an async function
+ """
+ with self.lock:
+ if not callable(handler):
+ raise ValueError("Handler must be callable")
+ if tool_name in self.tools:
+ raise ValueError(f"Tool '{tool_name}' is already registered")
+ self.tools[tool_name] = (handler, is_async)
+
+ async def handle(self, tool_name: str, parameters: dict) -> Any:
+ """Execute a registered tool with the given parameters.
+
+ Returns the result of the tool execution.
+ """
+ with self.lock:
+ if tool_name not in self.tools:
+ raise ValueError(f"Tool '{tool_name}' is not registered")
+ handler, is_async = self.tools[tool_name]
+
+ if is_async:
+ return await handler(parameters)
+ else:
+ return await asyncio.get_event_loop().run_in_executor(self.thread_pool, handler, parameters)
+
+ def execute_tool(self, tool_name: str, parameters: dict, callback: Callable[[dict], None]):
+ """Execute a tool and send its result via the provided callback.
+
+ This method is non-blocking and handles both sync and async tools.
+ """
+ if not self._running.is_set():
+ raise RuntimeError("ClientTools event loop is not running")
+
+ async def _execute_and_callback():
+ try:
+ result = await self.handle(tool_name, parameters)
+ response = {
+ "type": "client_tool_result",
+ "tool_call_id": parameters.get("tool_call_id"),
+ "result": result or f"Client tool: {tool_name} called successfully.",
+ "is_error": False,
+ }
+ except Exception as e:
+ response = {
+ "type": "client_tool_result",
+ "tool_call_id": parameters.get("tool_call_id"),
+ "result": str(e),
+ "is_error": True,
+ }
+ callback(response)
+
+ asyncio.run_coroutine_threadsafe(_execute_and_callback(), self._loop)
+
+
+class ConversationConfig:
+ """Configuration options for the Conversation."""
+
+ def __init__(
+ self,
+ extra_body: Optional[dict] = None,
+ conversation_config_override: Optional[dict] = None,
+ ):
+ self.extra_body = extra_body or {}
+ self.conversation_config_override = conversation_config_override or {}
+
+
+class Conversation:
+ client: BaseElevenLabs
+ agent_id: str
+ requires_auth: bool
+ config: ConversationConfig
+ audio_interface: AudioInterface
+ client_tools: Optional[ClientTools]
+ callback_agent_response: Optional[Callable[[str], None]]
+ callback_agent_response_correction: Optional[Callable[[str, str], None]]
+ callback_user_transcript: Optional[Callable[[str], None]]
+ callback_latency_measurement: Optional[Callable[[int], None]]
+
+ _thread: Optional[threading.Thread]
+ _should_stop: threading.Event
+ _conversation_id: Optional[str]
+ _last_interrupt_id: int
+
+ def __init__(
+ self,
+ client: BaseElevenLabs,
+ agent_id: str,
+ *,
+ requires_auth: bool,
+ audio_interface: AudioInterface,
+ config: Optional[ConversationConfig] = None,
+ client_tools: Optional[ClientTools] = None,
+ callback_agent_response: Optional[Callable[[str], None]] = None,
+ callback_agent_response_correction: Optional[Callable[[str, str], None]] = None,
+ callback_user_transcript: Optional[Callable[[str], None]] = None,
+ callback_latency_measurement: Optional[Callable[[int], None]] = None,
+ ):
+ """Conversational AI session.
+
+ BETA: This API is subject to change without regard to backwards compatibility.
+
+ Args:
+ client: The ElevenLabs client to use for the conversation.
+ agent_id: The ID of the agent to converse with.
+ requires_auth: Whether the agent requires authentication.
+ audio_interface: The audio interface to use for input and output.
+ client_tools: The client tools to use for the conversation.
+ callback_agent_response: Callback for agent responses.
+ callback_agent_response_correction: Callback for agent response corrections.
+ First argument is the original response (previously given to
+ callback_agent_response), second argument is the corrected response.
+ callback_user_transcript: Callback for user transcripts.
+ callback_latency_measurement: Callback for latency measurements (in milliseconds).
+ """
+
+ self.client = client
+ self.agent_id = agent_id
+ self.requires_auth = requires_auth
+ self.audio_interface = audio_interface
+ self.callback_agent_response = callback_agent_response
+ self.config = config or ConversationConfig()
+ self.client_tools = client_tools or ClientTools()
+ self.callback_agent_response_correction = callback_agent_response_correction
+ self.callback_user_transcript = callback_user_transcript
+ self.callback_latency_measurement = callback_latency_measurement
+
+ self.client_tools.start()
+
+ self._thread = None
+ self._should_stop = threading.Event()
+ self._conversation_id = None
+ self._last_interrupt_id = 0
+
+ def start_session(self):
+ """Starts the conversation session.
+
+ Will run in background thread until `end_session` is called.
+ """
+ ws_url = self._get_signed_url() if self.requires_auth else self._get_wss_url()
+ self._thread = threading.Thread(target=self._run, args=(ws_url,))
+ self._thread.start()
+
+ def end_session(self):
+ """Ends the conversation session and cleans up resources."""
+ self.audio_interface.stop()
+ self.client_tools.stop()
+ self._should_stop.set()
+
+ def wait_for_session_end(self) -> Optional[str]:
+ """Waits for the conversation session to end.
+
+ You must call `end_session` before calling this method, otherwise it will block.
+
+ Returns the conversation ID, if available.
+ """
+ if not self._thread:
+ raise RuntimeError("Session not started.")
+ self._thread.join()
+ return self._conversation_id
+
+ def _run(self, ws_url: str):
+ with connect(ws_url) as ws:
+ ws.send(
+ json.dumps(
+ {
+ "type": "conversation_initiation_client_data",
+ "custom_llm_extra_body": self.config.extra_body,
+ "conversation_config_override": self.config.conversation_config_override,
+ }
+ )
+ )
+
+ def input_callback(audio):
+ ws.send(
+ json.dumps(
+ {
+ "user_audio_chunk": base64.b64encode(audio).decode(),
+ }
+ )
+ )
+
+ self.audio_interface.start(input_callback)
+ while not self._should_stop.is_set():
+ try:
+ message = json.loads(ws.recv(timeout=0.5))
+ if self._should_stop.is_set():
+ return
+ self._handle_message(message, ws)
+ except TimeoutError:
+ pass
+
+ def _handle_message(self, message, ws):
+ if message["type"] == "conversation_initiation_metadata":
+ event = message["conversation_initiation_metadata_event"]
+ assert self._conversation_id is None
+ self._conversation_id = event["conversation_id"]
+
+ elif message["type"] == "audio":
+ event = message["audio_event"]
+ if int(event["event_id"]) <= self._last_interrupt_id:
+ return
+ audio = base64.b64decode(event["audio_base_64"])
+ self.audio_interface.output(audio)
+ elif message["type"] == "agent_response":
+ if self.callback_agent_response:
+ event = message["agent_response_event"]
+ self.callback_agent_response(event["agent_response"].strip())
+ elif message["type"] == "agent_response_correction":
+ if self.callback_agent_response_correction:
+ event = message["agent_response_correction_event"]
+ self.callback_agent_response_correction(
+ event["original_agent_response"].strip(), event["corrected_agent_response"].strip()
+ )
+ elif message["type"] == "user_transcript":
+ if self.callback_user_transcript:
+ event = message["user_transcription_event"]
+ self.callback_user_transcript(event["user_transcript"].strip())
+ elif message["type"] == "interruption":
+ event = message["interruption_event"]
+ self._last_interrupt_id = int(event["event_id"])
+ self.audio_interface.interrupt()
+ elif message["type"] == "ping":
+ event = message["ping_event"]
+ ws.send(
+ json.dumps(
+ {
+ "type": "pong",
+ "event_id": event["event_id"],
+ }
+ )
+ )
+ if self.callback_latency_measurement and event["ping_ms"]:
+ self.callback_latency_measurement(int(event["ping_ms"]))
+ elif message["type"] == "client_tool_call":
+ tool_call = message.get("client_tool_call", {})
+ tool_name = tool_call.get("tool_name")
+ parameters = {"tool_call_id": tool_call["tool_call_id"], **tool_call.get("parameters", {})}
+
+ def send_response(response):
+ if not self._should_stop.is_set():
+ ws.send(json.dumps(response))
+
+ self.client_tools.execute_tool(tool_name, parameters, send_response)
+ else:
+ pass # Ignore all other message types.
+
+ def _get_wss_url(https://github.com/self):
+ base_url = self.client._client_wrapper._base_url
+ # Replace http(s) with ws(s).
+ base_ws_url = base_url.replace("http", "ws", 1) # First occurrence only.
+ return f"{base_ws_url}/v1/convai/conversation?agent_id={self.agent_id}"
+
+ def _get_signed_url(https://github.com/self):
+ response = self.client.conversational_ai.get_signed_url(https://github.com/agent_id=self.agent_id)
+ return response.signed_url
diff --git a/src/elevenlabs/conversational_ai/default_audio_interface.py b/src/elevenlabs/conversational_ai/default_audio_interface.py
new file mode 100644
index 00000000..b1660d85
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/default_audio_interface.py
@@ -0,0 +1,83 @@
+from typing import Callable
+import queue
+import threading
+
+from .conversation import AudioInterface
+
+
+class DefaultAudioInterface(AudioInterface):
+ INPUT_FRAMES_PER_BUFFER = 4000 # 250ms @ 16kHz
+ OUTPUT_FRAMES_PER_BUFFER = 1000 # 62.5ms @ 16kHz
+
+ def __init__(self):
+ try:
+ import pyaudio
+ except ImportError:
+ raise ImportError("To use DefaultAudioInterface you must install pyaudio.")
+ self.pyaudio = pyaudio
+
+ def start(self, input_callback: Callable[[bytes], None]):
+ # Audio input is using callbacks from pyaudio which we simply pass through.
+ self.input_callback = input_callback
+
+ # Audio output is buffered so we can handle interruptions.
+ # Start a separate thread to handle writing to the output stream.
+ self.output_queue: queue.Queue[bytes] = queue.Queue()
+ self.should_stop = threading.Event()
+ self.output_thread = threading.Thread(target=self._output_thread)
+
+ self.p = self.pyaudio.PyAudio()
+ self.in_stream = self.p.open(
+ format=self.pyaudio.paInt16,
+ channels=1,
+ rate=16000,
+ input=True,
+ stream_callback=self._in_callback,
+ frames_per_buffer=self.INPUT_FRAMES_PER_BUFFER,
+ start=True,
+ )
+ self.out_stream = self.p.open(
+ format=self.pyaudio.paInt16,
+ channels=1,
+ rate=16000,
+ output=True,
+ frames_per_buffer=self.OUTPUT_FRAMES_PER_BUFFER,
+ start=True,
+ )
+
+ self.output_thread.start()
+
+ def stop(self):
+ self.should_stop.set()
+ self.output_thread.join()
+ self.in_stream.stop_stream()
+ self.in_stream.close()
+ self.out_stream.close()
+ self.p.terminate()
+
+ def output(self, audio: bytes):
+ self.output_queue.put(audio)
+
+ def interrupt(self):
+ # Clear the output queue to stop any audio that is currently playing.
+ # Note: We can't atomically clear the whole queue, but we are doing
+ # it from the message handling thread so no new audio will be added
+ # while we are clearing.
+ try:
+ while True:
+ _ = self.output_queue.get(block=False)
+ except queue.Empty:
+ pass
+
+ def _output_thread(self):
+ while not self.should_stop.is_set():
+ try:
+ audio = self.output_queue.get(timeout=0.25)
+ self.out_stream.write(audio)
+ except queue.Empty:
+ pass
+
+ def _in_callback(self, in_data, frame_count, time_info, status):
+ if self.input_callback:
+ self.input_callback(in_data)
+ return (None, self.pyaudio.paContinue)
diff --git a/src/elevenlabs/conversational_ai/types/__init__.py b/src/elevenlabs/conversational_ai/types/__init__.py
new file mode 100644
index 00000000..3d467b3a
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/types/__init__.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+)
+
+__all__ = [
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
+]
diff --git a/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py b/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py
new file mode 100644
index 00000000..ffcbbd74
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ...core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ...core.unchecked_base_model import UnionMetadata
+
+
+class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New(UncheckedBaseModel):
+ type: typing.Literal["new"] = "new"
+ name: str
+ value: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored(UncheckedBaseModel):
+ type: typing.Literal["stored"] = "stored"
+ secret_id: str
+ name: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem = typing_extensions.Annotated[
+ typing.Union[
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+ ],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py
index 6b0813a8..2a164bdb 100644
--- a/src/elevenlabs/core/client_wrapper.py
+++ b/src/elevenlabs/core/client_wrapper.py
@@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "elevenlabs",
- "X-Fern-SDK-Version": "1.9.0",
+ "X-Fern-SDK-Version": "1.50.3",
}
if self._api_key is not None:
headers["xi-api-key"] = self._api_key
@@ -41,9 +41,9 @@ def __init__(
super().__init__(api_key=api_key, base_url=base_url, timeout=timeout)
self.httpx_client = HttpClient(
httpx_client=httpx_client,
- base_headers=self.get_headers(),
- base_timeout=self.get_timeout(),
- base_url=self.get_base_url(),
+ base_headers=self.get_headers,
+ base_timeout=self.get_timeout,
+ base_url=self.get_base_url,
)
@@ -59,7 +59,7 @@ def __init__(
super().__init__(api_key=api_key, base_url=base_url, timeout=timeout)
self.httpx_client = AsyncHttpClient(
httpx_client=httpx_client,
- base_headers=self.get_headers(),
- base_timeout=self.get_timeout(),
- base_url=self.get_base_url(),
+ base_headers=self.get_headers,
+ base_timeout=self.get_timeout,
+ base_url=self.get_base_url,
)
diff --git a/src/elevenlabs/core/file.py b/src/elevenlabs/core/file.py
index b4cbba30..44b0d27c 100644
--- a/src/elevenlabs/core/file.py
+++ b/src/elevenlabs/core/file.py
@@ -43,20 +43,25 @@ def convert_file_dict_to_httpx_tuples(
return httpx_tuples
-def with_content_type(*, file: File, content_type: str) -> File:
- """ """
+def with_content_type(*, file: File, default_content_type: str) -> File:
+ """
+ This function resolves to the file's content type, if provided, and defaults
+ to the default_content_type value if not.
+ """
if isinstance(file, tuple):
if len(file) == 2:
filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore
- return (filename, content, content_type)
+ return (filename, content, default_content_type)
elif len(file) == 3:
- filename, content, _ = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore
- return (filename, content, content_type)
+ filename, content, file_content_type = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore
+ out_content_type = file_content_type or default_content_type
+ return (filename, content, out_content_type)
elif len(file) == 4:
- filename, content, _, headers = cast( # type: ignore
+ filename, content, file_content_type, headers = cast( # type: ignore
Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], file
)
- return (filename, content, content_type, headers)
+ out_content_type = file_content_type or default_content_type
+ return (filename, content, out_content_type, headers)
else:
raise ValueError(f"Unexpected tuple length: {len(file)}")
- return (None, file, content_type)
+ return (None, file, default_content_type)
diff --git a/src/elevenlabs/core/http_client.py b/src/elevenlabs/core/http_client.py
index b07401b5..1a1a1311 100644
--- a/src/elevenlabs/core/http_client.py
+++ b/src/elevenlabs/core/http_client.py
@@ -152,9 +152,9 @@ def __init__(
self,
*,
httpx_client: httpx.Client,
- base_timeout: typing.Optional[float],
- base_headers: typing.Dict[str, str],
- base_url: typing.Optional[str] = None,
+ base_timeout: typing.Callable[[], typing.Optional[float]],
+ base_headers: typing.Callable[[], typing.Dict[str, str]],
+ base_url: typing.Optional[typing.Callable[[], str]] = None,
):
self.base_url = base_url
self.base_timeout = base_timeout
@@ -162,7 +162,10 @@ def __init__(
self.httpx_client = httpx_client
def get_base_url(https://github.com/self,%20maybe_base_url:%20typing.Optional[str]) -> str:
- base_url = self.base_url if maybe_base_url is None else maybe_base_url
+ base_url = maybe_base_url
+ if self.base_url is not None and base_url is None:
+ base_url = self.base_url()
+
if base_url is None:
raise ValueError("A base_url is required to make this request, please provide one and try again.")
return base_url
@@ -187,7 +190,7 @@ def request(
timeout = (
request_options.get("timeout_in_seconds")
if request_options is not None and request_options.get("timeout_in_seconds") is not None
- else self.base_timeout
+ else self.base_timeout()
)
json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -198,7 +201,7 @@ def request(
headers=jsonable_encoder(
remove_none_from_dict(
{
- **self.base_headers,
+ **self.base_headers(),
**(headers if headers is not None else {}),
**(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
}
@@ -224,7 +227,11 @@ def request(
json=json_body,
data=data_body,
content=content,
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ files=(
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
+ if (files is not None and files is not omit)
+ else None
+ ),
timeout=timeout,
)
@@ -269,7 +276,7 @@ def stream(
timeout = (
request_options.get("timeout_in_seconds")
if request_options is not None and request_options.get("timeout_in_seconds") is not None
- else self.base_timeout
+ else self.base_timeout()
)
json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -280,7 +287,7 @@ def stream(
headers=jsonable_encoder(
remove_none_from_dict(
{
- **self.base_headers,
+ **self.base_headers(),
**(headers if headers is not None else {}),
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
}
@@ -306,7 +313,11 @@ def stream(
json=json_body,
data=data_body,
content=content,
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ files=(
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
+ if (files is not None and files is not omit)
+ else None
+ ),
timeout=timeout,
) as stream:
yield stream
@@ -317,9 +328,9 @@ def __init__(
self,
*,
httpx_client: httpx.AsyncClient,
- base_timeout: typing.Optional[float],
- base_headers: typing.Dict[str, str],
- base_url: typing.Optional[str] = None,
+ base_timeout: typing.Callable[[], typing.Optional[float]],
+ base_headers: typing.Callable[[], typing.Dict[str, str]],
+ base_url: typing.Optional[typing.Callable[[], str]] = None,
):
self.base_url = base_url
self.base_timeout = base_timeout
@@ -327,7 +338,10 @@ def __init__(
self.httpx_client = httpx_client
def get_base_url(https://github.com/self,%20maybe_base_url:%20typing.Optional[str]) -> str:
- base_url = self.base_url if maybe_base_url is None else maybe_base_url
+ base_url = maybe_base_url
+ if self.base_url is not None and base_url is None:
+ base_url = self.base_url()
+
if base_url is None:
raise ValueError("A base_url is required to make this request, please provide one and try again.")
return base_url
@@ -352,7 +366,7 @@ async def request(
timeout = (
request_options.get("timeout_in_seconds")
if request_options is not None and request_options.get("timeout_in_seconds") is not None
- else self.base_timeout
+ else self.base_timeout()
)
json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -364,7 +378,7 @@ async def request(
headers=jsonable_encoder(
remove_none_from_dict(
{
- **self.base_headers,
+ **self.base_headers(),
**(headers if headers is not None else {}),
**(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
}
@@ -390,7 +404,11 @@ async def request(
json=json_body,
data=data_body,
content=content,
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ files=(
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
+ if files is not None
+ else None
+ ),
timeout=timeout,
)
@@ -434,7 +452,7 @@ async def stream(
timeout = (
request_options.get("timeout_in_seconds")
if request_options is not None and request_options.get("timeout_in_seconds") is not None
- else self.base_timeout
+ else self.base_timeout()
)
json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -445,7 +463,7 @@ async def stream(
headers=jsonable_encoder(
remove_none_from_dict(
{
- **self.base_headers,
+ **self.base_headers(),
**(headers if headers is not None else {}),
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
}
@@ -471,7 +489,11 @@ async def stream(
json=json_body,
data=data_body,
content=content,
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
+ files=(
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
+ if files is not None
+ else None
+ ),
timeout=timeout,
) as stream:
yield stream
diff --git a/src/elevenlabs/core/pydantic_utilities.py b/src/elevenlabs/core/pydantic_utilities.py
index c14b4828..ee8f0e41 100644
--- a/src/elevenlabs/core/pydantic_utilities.py
+++ b/src/elevenlabs/core/pydantic_utilities.py
@@ -152,7 +152,7 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
)
else:
- _fields_set = self.__fields_set__
+ _fields_set = self.__fields_set__.copy()
fields = _get_model_fields(self.__class__)
for name, field in fields.items():
@@ -162,9 +162,12 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
# If the default values are non-null act like they've been set
# This effectively allows exclude_unset to work like exclude_none where
# the latter passes through intentionally set none values.
- if default != None:
+ if default is not None or ("exclude_unset" in kwargs and not kwargs["exclude_unset"]):
_fields_set.add(name)
+ if default is not None:
+ self.__fields_set__.add(name)
+
kwargs_with_defaults_exclude_unset_include_fields: typing.Any = {
"by_alias": True,
"exclude_unset": True,
@@ -177,13 +180,33 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write")
+def _union_list_of_pydantic_dicts(
+ source: typing.List[typing.Any], destination: typing.List[typing.Any]
+) -> typing.List[typing.Any]:
+ converted_list: typing.List[typing.Any] = []
+ for i, item in enumerate(source):
+ destination_value = destination[i] # type: ignore
+ if isinstance(item, dict):
+ converted_list.append(deep_union_pydantic_dicts(item, destination_value))
+ elif isinstance(item, list):
+ converted_list.append(_union_list_of_pydantic_dicts(item, destination_value))
+ else:
+ converted_list.append(item)
+ return converted_list
+
+
def deep_union_pydantic_dicts(
source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
for key, value in source.items():
+ node = destination.setdefault(key, {})
if isinstance(value, dict):
- node = destination.setdefault(key, {})
deep_union_pydantic_dicts(value, node)
+ # Note: we do not do this same processing for sets given we do not have sets of models
+ # and given the sets are unordered, the processing of the set and matching objects would
+ # be non-trivial.
+ elif isinstance(value, list):
+ destination[key] = _union_list_of_pydantic_dicts(value, node)
else:
destination[key] = value
diff --git a/src/elevenlabs/core/request_options.py b/src/elevenlabs/core/request_options.py
index d0bf0dbc..1b388044 100644
--- a/src/elevenlabs/core/request_options.py
+++ b/src/elevenlabs/core/request_options.py
@@ -23,6 +23,8 @@ class RequestOptions(typing.TypedDict, total=False):
- additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict
- additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict
+
+ - chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads.
"""
timeout_in_seconds: NotRequired[int]
@@ -30,3 +32,4 @@ class RequestOptions(typing.TypedDict, total=False):
additional_headers: NotRequired[typing.Dict[str, typing.Any]]
additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]]
additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]]
+ chunk_size: NotRequired[int]
diff --git a/src/elevenlabs/core/serialization.py b/src/elevenlabs/core/serialization.py
index 5605f1b6..cb5dcbf9 100644
--- a/src/elevenlabs/core/serialization.py
+++ b/src/elevenlabs/core/serialization.py
@@ -71,6 +71,24 @@ def convert_and_respect_annotation_metadata(
if typing_extensions.is_typeddict(clean_type) and isinstance(object_, typing.Mapping):
return _convert_mapping(object_, clean_type, direction)
+ if (
+ typing_extensions.get_origin(clean_type) == typing.Dict
+ or typing_extensions.get_origin(clean_type) == dict
+ or clean_type == typing.Dict
+ ) and isinstance(object_, typing.Dict):
+ key_type = typing_extensions.get_args(clean_type)[0]
+ value_type = typing_extensions.get_args(clean_type)[1]
+
+ return {
+ key: convert_and_respect_annotation_metadata(
+ object_=value,
+ annotation=annotation,
+ inner_type=value_type,
+ direction=direction,
+ )
+ for key, value in object_.items()
+ }
+
# If you're iterating on a string, do not bother to coerce it to a sequence.
if not isinstance(object_, str):
if (
diff --git a/src/elevenlabs/dubbing/__init__.py b/src/elevenlabs/dubbing/__init__.py
index 6e3590aa..54967ecf 100644
--- a/src/elevenlabs/dubbing/__init__.py
+++ b/src/elevenlabs/dubbing/__init__.py
@@ -1,5 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
+from .types import DubbingGetTranscriptForDubRequestFormatType
-__all__ = ["GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType"]
+__all__ = ["DubbingGetTranscriptForDubRequestFormatType"]
diff --git a/src/elevenlabs/dubbing/client.py b/src/elevenlabs/dubbing/client.py
index 3226750a..97be0722 100644
--- a/src/elevenlabs/dubbing/client.py
+++ b/src/elevenlabs/dubbing/client.py
@@ -12,9 +12,7 @@
from ..core.api_error import ApiError
from ..types.dubbing_metadata_response import DubbingMetadataResponse
from ..core.jsonable_encoder import jsonable_encoder
-from .types.get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type import (
- GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType,
-)
+from .types.dubbing_get_transcript_for_dub_request_format_type import DubbingGetTranscriptForDubRequestFormatType
from ..core.client_wrapper import AsyncClientWrapper
# this is used as the default value for optional parameters
@@ -29,15 +27,17 @@ def dub_a_video_or_an_audio_file(
self,
*,
target_lang: str,
- file: typing.Optional[core.File] = None,
- name: typing.Optional[str] = None,
- source_url: typing.Optional[str] = None,
- source_lang: typing.Optional[str] = None,
- num_speakers: typing.Optional[int] = None,
- watermark: typing.Optional[bool] = None,
- start_time: typing.Optional[int] = None,
- end_time: typing.Optional[int] = None,
- highest_resolution: typing.Optional[bool] = None,
+ file: typing.Optional[core.File] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ source_url: typing.Optional[str] = OMIT,
+ source_lang: typing.Optional[str] = OMIT,
+ num_speakers: typing.Optional[int] = OMIT,
+ watermark: typing.Optional[bool] = OMIT,
+ start_time: typing.Optional[int] = OMIT,
+ end_time: typing.Optional[int] = OMIT,
+ highest_resolution: typing.Optional[bool] = OMIT,
+ drop_background_audio: typing.Optional[bool] = OMIT,
+ use_profanity_filter: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DoDubbingResponse:
"""
@@ -75,6 +75,12 @@ def dub_a_video_or_an_audio_file(
highest_resolution : typing.Optional[bool]
Whether to use the highest resolution available.
+ drop_background_audio : typing.Optional[bool]
+ An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues.
+
+ use_profanity_filter : typing.Optional[bool]
+ [BETA] Whether transcripts should have profanities censored with the words '[censored]'
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -107,6 +113,8 @@ def dub_a_video_or_an_audio_file(
"start_time": start_time,
"end_time": end_time,
"highest_resolution": highest_resolution,
+ "drop_background_audio": drop_background_audio,
+ "use_profanity_filter": use_profanity_filter,
},
files={
"file": file,
@@ -271,24 +279,12 @@ def get_dubbed_file(
ID of the language.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.dubbing.get_dubbed_file(
- dubbing_id="string",
- language_code="string",
- )
"""
with self._client_wrapper.httpx_client.stream(
f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}",
@@ -297,7 +293,8 @@ def get_dubbed_file(
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -321,9 +318,7 @@ def get_transcript_for_dub(
dubbing_id: str,
language_code: str,
*,
- format_type: typing.Optional[
- GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
- ] = None,
+ format_type: typing.Optional[DubbingGetTranscriptForDubRequestFormatType] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -337,7 +332,7 @@ def get_transcript_for_dub(
language_code : str
ID of the language.
- format_type : typing.Optional[GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType]
+ format_type : typing.Optional[DubbingGetTranscriptForDubRequestFormatType]
Format to use for the subtitle file, either 'srt' or 'webvtt'
request_options : typing.Optional[RequestOptions]
@@ -401,15 +396,17 @@ async def dub_a_video_or_an_audio_file(
self,
*,
target_lang: str,
- file: typing.Optional[core.File] = None,
- name: typing.Optional[str] = None,
- source_url: typing.Optional[str] = None,
- source_lang: typing.Optional[str] = None,
- num_speakers: typing.Optional[int] = None,
- watermark: typing.Optional[bool] = None,
- start_time: typing.Optional[int] = None,
- end_time: typing.Optional[int] = None,
- highest_resolution: typing.Optional[bool] = None,
+ file: typing.Optional[core.File] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ source_url: typing.Optional[str] = OMIT,
+ source_lang: typing.Optional[str] = OMIT,
+ num_speakers: typing.Optional[int] = OMIT,
+ watermark: typing.Optional[bool] = OMIT,
+ start_time: typing.Optional[int] = OMIT,
+ end_time: typing.Optional[int] = OMIT,
+ highest_resolution: typing.Optional[bool] = OMIT,
+ drop_background_audio: typing.Optional[bool] = OMIT,
+ use_profanity_filter: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DoDubbingResponse:
"""
@@ -447,6 +444,12 @@ async def dub_a_video_or_an_audio_file(
highest_resolution : typing.Optional[bool]
Whether to use the highest resolution available.
+ drop_background_audio : typing.Optional[bool]
+ An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues.
+
+ use_profanity_filter : typing.Optional[bool]
+ [BETA] Whether transcripts should have profanities censored with the words '[censored]'
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -487,6 +490,8 @@ async def main() -> None:
"start_time": start_time,
"end_time": end_time,
"highest_resolution": highest_resolution,
+ "drop_background_audio": drop_background_audio,
+ "use_profanity_filter": use_profanity_filter,
},
files={
"file": file,
@@ -667,32 +672,12 @@ async def get_dubbed_file(
ID of the language.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.dubbing.get_dubbed_file(
- dubbing_id="string",
- language_code="string",
- )
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}",
@@ -701,7 +686,8 @@ async def main() -> None:
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -725,9 +711,7 @@ async def get_transcript_for_dub(
dubbing_id: str,
language_code: str,
*,
- format_type: typing.Optional[
- GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
- ] = None,
+ format_type: typing.Optional[DubbingGetTranscriptForDubRequestFormatType] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -741,7 +725,7 @@ async def get_transcript_for_dub(
language_code : str
ID of the language.
- format_type : typing.Optional[GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType]
+ format_type : typing.Optional[DubbingGetTranscriptForDubRequestFormatType]
Format to use for the subtitle file, either 'srt' or 'webvtt'
request_options : typing.Optional[RequestOptions]
diff --git a/src/elevenlabs/dubbing/types/__init__.py b/src/elevenlabs/dubbing/types/__init__.py
index 6692b1f7..0c667fb2 100644
--- a/src/elevenlabs/dubbing/types/__init__.py
+++ b/src/elevenlabs/dubbing/types/__init__.py
@@ -1,7 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-from .get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type import (
- GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType,
-)
+from .dubbing_get_transcript_for_dub_request_format_type import DubbingGetTranscriptForDubRequestFormatType
-__all__ = ["GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType"]
+__all__ = ["DubbingGetTranscriptForDubRequestFormatType"]
diff --git a/src/elevenlabs/dubbing/types/dubbing_get_transcript_for_dub_request_format_type.py b/src/elevenlabs/dubbing/types/dubbing_get_transcript_for_dub_request_format_type.py
new file mode 100644
index 00000000..ed4d2ea2
--- /dev/null
+++ b/src/elevenlabs/dubbing/types/dubbing_get_transcript_for_dub_request_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DubbingGetTranscriptForDubRequestFormatType = typing.Union[typing.Literal["srt", "webvtt"], typing.Any]
diff --git a/src/elevenlabs/dubbing/types/get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type.py b/src/elevenlabs/dubbing/types/get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type.py
deleted file mode 100644
index 7c651c60..00000000
--- a/src/elevenlabs/dubbing/types/get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType = typing.Union[
- typing.Literal["srt", "webvtt"], typing.Any
-]
diff --git a/src/elevenlabs/environment.py b/src/elevenlabs/environment.py
index dc68f9bc..37557cbf 100644
--- a/src/elevenlabs/environment.py
+++ b/src/elevenlabs/environment.py
@@ -5,3 +5,4 @@
class ElevenLabsEnvironment(enum.Enum):
PRODUCTION = "https://api.elevenlabs.io/"
+ PRODUCTION_US = "https://api.us.elevenlabs.io/"
diff --git a/src/elevenlabs/history/__init__.py b/src/elevenlabs/history/__init__.py
index f3ea2659..5c94f169 100644
--- a/src/elevenlabs/history/__init__.py
+++ b/src/elevenlabs/history/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import HistoryGetAllRequestSource
+
+__all__ = ["HistoryGetAllRequestSource"]
diff --git a/src/elevenlabs/history/client.py b/src/elevenlabs/history/client.py
index ce259e3f..fdc84c72 100644
--- a/src/elevenlabs/history/client.py
+++ b/src/elevenlabs/history/client.py
@@ -2,6 +2,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from .types.history_get_all_request_source import HistoryGetAllRequestSource
from ..core.request_options import RequestOptions
from ..types.get_speech_history_response import GetSpeechHistoryResponse
from ..core.unchecked_base_model import construct_type
@@ -27,6 +28,8 @@ def get_all(
page_size: typing.Optional[int] = None,
start_after_history_item_id: typing.Optional[str] = None,
voice_id: typing.Optional[str] = None,
+ search: typing.Optional[str] = None,
+ source: typing.Optional[HistoryGetAllRequestSource] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetSpeechHistoryResponse:
"""
@@ -43,6 +46,12 @@ def get_all(
voice_id : typing.Optional[str]
Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs.
+ search : typing.Optional[str]
+ search term used for filtering
+
+ source : typing.Optional[HistoryGetAllRequestSource]
+ Source of the generated history item
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -58,10 +67,7 @@ def get_all(
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
- client.history.get_all(
- page_size=1,
- voice_id="pMsXgVXv3BLzUgSXRplE",
- )
+ client.history.get_all()
"""
_response = self._client_wrapper.httpx_client.request(
"v1/history",
@@ -70,6 +76,8 @@ def get_all(
"page_size": page_size,
"start_after_history_item_id": start_after_history_item_id,
"voice_id": voice_id,
+ "search": search,
+ "source": source,
},
request_options=request_options,
)
@@ -124,7 +132,7 @@ def get(
api_key="YOUR_API_KEY",
)
client.history.get(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -183,7 +191,7 @@ def delete(
api_key="YOUR_API_KEY",
)
client.history.delete(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -227,7 +235,7 @@ def get_audio(
History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -242,7 +250,7 @@ def get_audio(
api_key="YOUR_API_KEY",
)
client.history.get_audio(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
"""
with self._client_wrapper.httpx_client.stream(
@@ -252,7 +260,8 @@ def get_audio(
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -304,7 +313,7 @@ def download(
api_key="YOUR_API_KEY",
)
client.history.download(
- history_item_ids=["ja9xsmfGhxYcymxGcOGB"],
+ history_item_ids=["HISTORY_ITEM_ID"],
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -314,6 +323,9 @@ def download(
"history_item_ids": history_item_ids,
"output_format": output_format,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -346,6 +358,8 @@ async def get_all(
page_size: typing.Optional[int] = None,
start_after_history_item_id: typing.Optional[str] = None,
voice_id: typing.Optional[str] = None,
+ search: typing.Optional[str] = None,
+ source: typing.Optional[HistoryGetAllRequestSource] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetSpeechHistoryResponse:
"""
@@ -362,6 +376,12 @@ async def get_all(
voice_id : typing.Optional[str]
Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs.
+ search : typing.Optional[str]
+ search term used for filtering
+
+ source : typing.Optional[HistoryGetAllRequestSource]
+ Source of the generated history item
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -382,10 +402,7 @@ async def get_all(
async def main() -> None:
- await client.history.get_all(
- page_size=1,
- voice_id="pMsXgVXv3BLzUgSXRplE",
- )
+ await client.history.get_all()
asyncio.run(main())
@@ -397,6 +414,8 @@ async def main() -> None:
"page_size": page_size,
"start_after_history_item_id": start_after_history_item_id,
"voice_id": voice_id,
+ "search": search,
+ "source": source,
},
request_options=request_options,
)
@@ -456,7 +475,7 @@ async def get(
async def main() -> None:
await client.history.get(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
@@ -523,7 +542,7 @@ async def delete(
async def main() -> None:
await client.history.delete(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
@@ -570,7 +589,7 @@ async def get_audio(
History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -590,7 +609,7 @@ async def get_audio(
async def main() -> None:
await client.history.get_audio(
- history_item_id="ja9xsmfGhxYcymxGcOGB",
+ history_item_id="HISTORY_ITEM_ID",
)
@@ -603,7 +622,8 @@ async def main() -> None:
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -660,7 +680,7 @@ async def download(
async def main() -> None:
await client.history.download(
- history_item_ids=["ja9xsmfGhxYcymxGcOGB"],
+ history_item_ids=["HISTORY_ITEM_ID"],
)
@@ -673,6 +693,9 @@ async def main() -> None:
"history_item_ids": history_item_ids,
"output_format": output_format,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/elevenlabs/history/types/__init__.py b/src/elevenlabs/history/types/__init__.py
new file mode 100644
index 00000000..c1e50696
--- /dev/null
+++ b/src/elevenlabs/history/types/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .history_get_all_request_source import HistoryGetAllRequestSource
+
+__all__ = ["HistoryGetAllRequestSource"]
diff --git a/src/elevenlabs/history/types/history_get_all_request_source.py b/src/elevenlabs/history/types/history_get_all_request_source.py
new file mode 100644
index 00000000..fc4371db
--- /dev/null
+++ b/src/elevenlabs/history/types/history_get_all_request_source.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+HistoryGetAllRequestSource = typing.Union[typing.Literal["TTS", "STS"], typing.Any]
diff --git a/src/elevenlabs/projects/__init__.py b/src/elevenlabs/projects/__init__.py
index f3ea2659..749f44bc 100644
--- a/src/elevenlabs/projects/__init__.py
+++ b/src/elevenlabs/projects/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import ProjectsAddRequestFiction, ProjectsAddRequestTargetAudience
+
+__all__ = ["ProjectsAddRequestFiction", "ProjectsAddRequestTargetAudience"]
diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py
index 6f8ca56f..e0917f60 100644
--- a/src/elevenlabs/projects/client.py
+++ b/src/elevenlabs/projects/client.py
@@ -10,6 +10,8 @@
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
from .. import core
+from .types.projects_add_request_target_audience import ProjectsAddRequestTargetAudience
+from .types.projects_add_request_fiction import ProjectsAddRequestFiction
from ..types.add_project_response_model import AddProjectResponseModel
from ..types.project_extended_response_model import ProjectExtendedResponseModel
from ..core.jsonable_encoder import jsonable_encoder
@@ -86,15 +88,24 @@ def add(
default_title_voice_id: str,
default_paragraph_voice_id: str,
default_model_id: str,
- from_url: typing.Optional[str] = None,
- from_document: typing.Optional[core.File] = None,
- quality_preset: typing.Optional[str] = None,
- title: typing.Optional[str] = None,
- author: typing.Optional[str] = None,
- isbn_number: typing.Optional[str] = None,
- acx_volume_normalization: typing.Optional[bool] = None,
- volume_normalization: typing.Optional[bool] = None,
- pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = None,
+ from_url: typing.Optional[str] = OMIT,
+ from_document: typing.Optional[core.File] = OMIT,
+ quality_preset: typing.Optional[str] = OMIT,
+ title: typing.Optional[str] = OMIT,
+ author: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ genres: typing.Optional[typing.List[str]] = OMIT,
+ target_audience: typing.Optional[ProjectsAddRequestTargetAudience] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ content_type: typing.Optional[str] = OMIT,
+ original_publication_date: typing.Optional[str] = OMIT,
+ mature_content: typing.Optional[bool] = OMIT,
+ isbn_number: typing.Optional[str] = OMIT,
+ acx_volume_normalization: typing.Optional[bool] = OMIT,
+ volume_normalization: typing.Optional[bool] = OMIT,
+ pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
+ fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddProjectResponseModel:
"""
@@ -123,9 +134,9 @@ def add(
quality_preset : typing.Optional[str]
Output quality of the generated audio. Must be one of:
standard - standard output format, 128kbps with 44.1kHz sample rate.
- high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the character cost by 20%.
- ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the character cost by 50%.
- ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the character cost by 100%.
+ high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%.
+ ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%.
+ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%.
title : typing.Optional[str]
@@ -134,6 +145,27 @@ def add(
author : typing.Optional[str]
An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
+ description : typing.Optional[str]
+ An optional description of the project.
+
+ genres : typing.Optional[typing.List[str]]
+ An optional list of genres associated with the project.
+
+ target_audience : typing.Optional[ProjectsAddRequestTargetAudience]
+ An optional target audience of the project.
+
+ language : typing.Optional[str]
+ An optional language of the project. Two-letter language code (ISO 639-1).
+
+ content_type : typing.Optional[str]
+ An optional content type of the project.
+
+ original_publication_date : typing.Optional[str]
+ An optional original publication date of the project, in the format YYYY-MM-DD or YYYY.
+
+ mature_content : typing.Optional[bool]
+ An optional mature content of the project.
+
isbn_number : typing.Optional[str]
An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
@@ -146,6 +178,12 @@ def add(
pronunciation_dictionary_locators : typing.Optional[typing.List[str]]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ fiction : typing.Optional[ProjectsAddRequestFiction]
+ An optional fiction of the project.
+
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -180,10 +218,19 @@ def add(
"quality_preset": quality_preset,
"title": title,
"author": author,
+ "description": description,
+ "genres": genres,
+ "target_audience": target_audience,
+ "language": language,
+ "content_type": content_type,
+ "original_publication_date": original_publication_date,
+ "mature_content": mature_content,
"isbn_number": isbn_number,
"acx_volume_normalization": acx_volume_normalization,
"volume_normalization": volume_normalization,
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
+ "fiction": fiction,
+ "quality_check_on": quality_check_on,
},
files={
"from_document": from_document,
@@ -285,6 +332,7 @@ def edit_basic_project_info(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -316,6 +364,9 @@ def edit_basic_project_info(
volume_normalization : typing.Optional[bool]
When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -349,6 +400,10 @@ def edit_basic_project_info(
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
+ "quality_check_on": quality_check_on,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
@@ -577,25 +632,12 @@ def stream_audio(
Whether to convert the audio to mpeg format.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.projects.stream_audio(
- project_id="string",
- project_snapshot_id="string",
- convert_to_mpeg=True,
- )
"""
with self._client_wrapper.httpx_client.stream(
f"v1/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream",
@@ -603,12 +645,16 @@ def stream_audio(
json={
"convert_to_mpeg": convert_to_mpeg,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -736,6 +782,9 @@ def update_pronunciation_dictionaries(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -835,15 +884,24 @@ async def add(
default_title_voice_id: str,
default_paragraph_voice_id: str,
default_model_id: str,
- from_url: typing.Optional[str] = None,
- from_document: typing.Optional[core.File] = None,
- quality_preset: typing.Optional[str] = None,
- title: typing.Optional[str] = None,
- author: typing.Optional[str] = None,
- isbn_number: typing.Optional[str] = None,
- acx_volume_normalization: typing.Optional[bool] = None,
- volume_normalization: typing.Optional[bool] = None,
- pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = None,
+ from_url: typing.Optional[str] = OMIT,
+ from_document: typing.Optional[core.File] = OMIT,
+ quality_preset: typing.Optional[str] = OMIT,
+ title: typing.Optional[str] = OMIT,
+ author: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ genres: typing.Optional[typing.List[str]] = OMIT,
+ target_audience: typing.Optional[ProjectsAddRequestTargetAudience] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ content_type: typing.Optional[str] = OMIT,
+ original_publication_date: typing.Optional[str] = OMIT,
+ mature_content: typing.Optional[bool] = OMIT,
+ isbn_number: typing.Optional[str] = OMIT,
+ acx_volume_normalization: typing.Optional[bool] = OMIT,
+ volume_normalization: typing.Optional[bool] = OMIT,
+ pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
+ fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddProjectResponseModel:
"""
@@ -872,9 +930,9 @@ async def add(
quality_preset : typing.Optional[str]
Output quality of the generated audio. Must be one of:
standard - standard output format, 128kbps with 44.1kHz sample rate.
- high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the character cost by 20%.
- ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the character cost by 50%.
- ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the character cost by 100%.
+ high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%.
+ ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%.
+ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%.
title : typing.Optional[str]
@@ -883,6 +941,27 @@ async def add(
author : typing.Optional[str]
An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
+ description : typing.Optional[str]
+ An optional description of the project.
+
+ genres : typing.Optional[typing.List[str]]
+ An optional list of genres associated with the project.
+
+ target_audience : typing.Optional[ProjectsAddRequestTargetAudience]
+ An optional target audience of the project.
+
+ language : typing.Optional[str]
+ An optional language of the project. Two-letter language code (ISO 639-1).
+
+ content_type : typing.Optional[str]
+ An optional content type of the project.
+
+ original_publication_date : typing.Optional[str]
+ An optional original publication date of the project, in the format YYYY-MM-DD or YYYY.
+
+ mature_content : typing.Optional[bool]
+ An optional mature content of the project.
+
isbn_number : typing.Optional[str]
An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
@@ -895,6 +974,12 @@ async def add(
pronunciation_dictionary_locators : typing.Optional[typing.List[str]]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ fiction : typing.Optional[ProjectsAddRequestFiction]
+ An optional fiction of the project.
+
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -937,10 +1022,19 @@ async def main() -> None:
"quality_preset": quality_preset,
"title": title,
"author": author,
+ "description": description,
+ "genres": genres,
+ "target_audience": target_audience,
+ "language": language,
+ "content_type": content_type,
+ "original_publication_date": original_publication_date,
+ "mature_content": mature_content,
"isbn_number": isbn_number,
"acx_volume_normalization": acx_volume_normalization,
"volume_normalization": volume_normalization,
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
+ "fiction": fiction,
+ "quality_check_on": quality_check_on,
},
files={
"from_document": from_document,
@@ -1050,6 +1144,7 @@ async def edit_basic_project_info(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -1081,6 +1176,9 @@ async def edit_basic_project_info(
volume_normalization : typing.Optional[bool]
When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1122,6 +1220,10 @@ async def main() -> None:
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
+ "quality_check_on": quality_check_on,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
@@ -1374,33 +1476,12 @@ async def stream_audio(
Whether to convert the audio to mpeg format.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.projects.stream_audio(
- project_id="string",
- project_snapshot_id="string",
- convert_to_mpeg=True,
- )
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
f"v1/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream",
@@ -1408,12 +1489,16 @@ async def main() -> None:
json={
"convert_to_mpeg": convert_to_mpeg,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -1557,6 +1642,9 @@ async def main() -> None:
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/elevenlabs/projects/types/__init__.py b/src/elevenlabs/projects/types/__init__.py
new file mode 100644
index 00000000..e0531cef
--- /dev/null
+++ b/src/elevenlabs/projects/types/__init__.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .projects_add_request_fiction import ProjectsAddRequestFiction
+from .projects_add_request_target_audience import ProjectsAddRequestTargetAudience
+
+__all__ = ["ProjectsAddRequestFiction", "ProjectsAddRequestTargetAudience"]
diff --git a/src/elevenlabs/projects/types/projects_add_request_fiction.py b/src/elevenlabs/projects/types/projects_add_request_fiction.py
new file mode 100644
index 00000000..a5232ff3
--- /dev/null
+++ b/src/elevenlabs/projects/types/projects_add_request_fiction.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectsAddRequestFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any]
diff --git a/src/elevenlabs/projects/types/projects_add_request_target_audience.py b/src/elevenlabs/projects/types/projects_add_request_target_audience.py
new file mode 100644
index 00000000..74c8b589
--- /dev/null
+++ b/src/elevenlabs/projects/types/projects_add_request_target_audience.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectsAddRequestTargetAudience = typing.Union[
+ typing.Literal["children", "young adult", "adult", "all ages"], typing.Any
+]
diff --git a/src/elevenlabs/pronunciation_dictionary/client.py b/src/elevenlabs/pronunciation_dictionary/client.py
index 84a4c4c9..8efb5601 100644
--- a/src/elevenlabs/pronunciation_dictionary/client.py
+++ b/src/elevenlabs/pronunciation_dictionary/client.py
@@ -36,9 +36,9 @@ def add_from_file(
self,
*,
name: str,
- file: typing.Optional[core.File] = None,
- description: typing.Optional[str] = None,
- workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = None,
+ file: typing.Optional[core.File] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddPronunciationDictionaryResponseModel:
"""
@@ -115,7 +115,7 @@ def add_from_file(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def add_rules_to_the_pronunciation_dictionary(
+ def add_rules(
self,
pronunciation_dictionary_id: str,
*,
@@ -153,7 +153,7 @@ def add_rules_to_the_pronunciation_dictionary(
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
- client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
+ client.pronunciation_dictionary.add_rules(
pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
rules=[
PronunciationDictionaryRule_Phoneme(
@@ -172,6 +172,9 @@ def add_rules_to_the_pronunciation_dictionary(
object_=rules, annotation=typing.Sequence[PronunciationDictionaryRule], direction="write"
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -199,7 +202,7 @@ def add_rules_to_the_pronunciation_dictionary(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def remove_rules_from_the_pronunciation_dictionary(
+ def remove_rules(
self,
pronunciation_dictionary_id: str,
*,
@@ -232,7 +235,7 @@ def remove_rules_from_the_pronunciation_dictionary(
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
- client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
+ client.pronunciation_dictionary.remove_rules(
pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
rule_strings=["rule_strings"],
)
@@ -243,6 +246,9 @@ def remove_rules_from_the_pronunciation_dictionary(
json={
"rule_strings": rule_strings,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -465,9 +471,9 @@ async def add_from_file(
self,
*,
name: str,
- file: typing.Optional[core.File] = None,
- description: typing.Optional[str] = None,
- workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = None,
+ file: typing.Optional[core.File] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddPronunciationDictionaryResponseModel:
"""
@@ -552,7 +558,7 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def add_rules_to_the_pronunciation_dictionary(
+ async def add_rules(
self,
pronunciation_dictionary_id: str,
*,
@@ -595,7 +601,7 @@ async def add_rules_to_the_pronunciation_dictionary(
async def main() -> None:
- await client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
+ await client.pronunciation_dictionary.add_rules(
pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
rules=[
PronunciationDictionaryRule_Phoneme(
@@ -617,6 +623,9 @@ async def main() -> None:
object_=rules, annotation=typing.Sequence[PronunciationDictionaryRule], direction="write"
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -644,7 +653,7 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def remove_rules_from_the_pronunciation_dictionary(
+ async def remove_rules(
self,
pronunciation_dictionary_id: str,
*,
@@ -682,7 +691,7 @@ async def remove_rules_from_the_pronunciation_dictionary(
async def main() -> None:
- await client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
+ await client.pronunciation_dictionary.remove_rules(
pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
rule_strings=["rule_strings"],
)
@@ -696,6 +705,9 @@ async def main() -> None:
json={
"rule_strings": rule_strings,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/elevenlabs/realtime_tts.py b/src/elevenlabs/realtime_tts.py
index 146431d7..f4d4bc58 100644
--- a/src/elevenlabs/realtime_tts.py
+++ b/src/elevenlabs/realtime_tts.py
@@ -9,6 +9,7 @@
from websockets.sync.client import connect
from .core.api_error import ApiError
+from .core.client_wrapper import SyncClientWrapper
from .core.jsonable_encoder import jsonable_encoder
from .core.remove_none_from_dict import remove_none_from_dict
from .core.request_options import RequestOptions
@@ -39,6 +40,9 @@ def text_chunker(chunks: typing.Iterator[str]) -> typing.Iterator[str]:
class RealtimeTextToSpeechClient(TextToSpeechClient):
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ super().__init__(client_wrapper=client_wrapper)
+ self._ws_base_url = urllib.parse.urlparse(self._client_wrapper.get_base_url())._replace(scheme="wss").geturl()
def convert_realtime(
self,
@@ -88,7 +92,7 @@ def get_text() -> typing.Iterator[str]:
"""
with connect(
urllib.parse.urljoin(
- "wss://api.elevenlabs.io/",
+ self._ws_base_url,
f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream-input?model_id={model_id}&output_format={output_format}"
),
additional_headers=jsonable_encoder(
diff --git a/src/elevenlabs/samples/client.py b/src/elevenlabs/samples/client.py
index 37a35981..96b8df90 100644
--- a/src/elevenlabs/samples/client.py
+++ b/src/elevenlabs/samples/client.py
@@ -46,8 +46,8 @@ def delete(
api_key="YOUR_API_KEY",
)
client.samples.delete(
- voice_id="ja9xsmfGhxYcymxGcOGB",
- sample_id="pMsXgVXv3BLzUgSXRplE",
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -94,7 +94,7 @@ def get_audio(
Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -109,8 +109,8 @@ def get_audio(
api_key="YOUR_API_KEY",
)
client.samples.get_audio(
- voice_id="ja9xsmfGhxYcymxGcOGB",
- sample_id="pMsXgVXv3BLzUgSXRplE",
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
)
"""
with self._client_wrapper.httpx_client.stream(
@@ -120,7 +120,8 @@ def get_audio(
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -179,8 +180,8 @@ async def delete(
async def main() -> None:
await client.samples.delete(
- voice_id="ja9xsmfGhxYcymxGcOGB",
- sample_id="pMsXgVXv3BLzUgSXRplE",
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
)
@@ -230,7 +231,7 @@ async def get_audio(
Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -250,8 +251,8 @@ async def get_audio(
async def main() -> None:
await client.samples.get_audio(
- voice_id="ja9xsmfGhxYcymxGcOGB",
- sample_id="pMsXgVXv3BLzUgSXRplE",
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
)
@@ -264,7 +265,8 @@ async def main() -> None:
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py
index ecd7cd4e..baa0c9fe 100644
--- a/src/elevenlabs/speech_to_speech/client.py
+++ b/src/elevenlabs/speech_to_speech/client.py
@@ -3,7 +3,6 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .. import core
-from ..types.optimize_streaming_latency import OptimizeStreamingLatency
from ..types.output_format import OutputFormat
from ..core.request_options import RequestOptions
from ..core.jsonable_encoder import jsonable_encoder
@@ -28,11 +27,12 @@ def convert(
*,
audio: core.File,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
- model_id: typing.Optional[str] = None,
- voice_settings: typing.Optional[str] = None,
- seed: typing.Optional[int] = None,
+ model_id: typing.Optional[str] = OMIT,
+ voice_settings: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ remove_background_noise: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
@@ -49,8 +49,15 @@ def convert(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -62,10 +69,13 @@ def convert(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
+
+ remove_background_noise : typing.Optional[bool]
+ If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -80,10 +90,9 @@ def convert(
api_key="YOUR_API_KEY",
)
client.speech_to_speech.convert(
- voice_id="string",
- enable_logging=True,
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ model_id="eleven_multilingual_sts_v2",
)
"""
with self._client_wrapper.httpx_client.stream(
@@ -98,6 +107,7 @@ def convert(
"model_id": model_id,
"voice_settings": voice_settings,
"seed": seed,
+ "remove_background_noise": remove_background_noise,
},
files={
"audio": audio,
@@ -107,7 +117,8 @@ def convert(
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -131,12 +142,13 @@ def convert_as_stream(
voice_id: str,
*,
audio: core.File,
- enable_logging: typing.Optional[OptimizeStreamingLatency] = None,
- optimize_streaming_latency: typing.Optional[OutputFormat] = None,
- output_format: typing.Optional[str] = None,
- model_id: typing.Optional[str] = None,
- voice_settings: typing.Optional[str] = None,
- seed: typing.Optional[int] = None,
+ enable_logging: typing.Optional[bool] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
+ output_format: typing.Optional[OutputFormat] = None,
+ model_id: typing.Optional[str] = OMIT,
+ voice_settings: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ remove_background_noise: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
@@ -150,25 +162,21 @@ def convert_as_stream(
audio : core.File
See core.File for more documentation
- enable_logging : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ enable_logging : typing.Optional[bool]
+ When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OutputFormat]
- The output format of the generated audio.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
- output_format : typing.Optional[str]
- Output format of the generated audio. Must be one of:
- mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
- mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
- mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
- mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
- mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
- mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
- pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
- pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
- pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
- pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
- ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+ Defaults to None.
+
+ output_format : typing.Optional[OutputFormat]
+ The output format of the generated audio.
model_id : typing.Optional[str]
Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
@@ -177,10 +185,13 @@ def convert_as_stream(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
+
+ remove_background_noise : typing.Optional[bool]
+ If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -195,10 +206,9 @@ def convert_as_stream(
api_key="YOUR_API_KEY",
)
client.speech_to_speech.convert_as_stream(
- voice_id="string",
- enable_logging="0",
- optimize_streaming_latency="mp3_22050_32",
- output_format="string",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ model_id="eleven_multilingual_sts_v2",
)
"""
with self._client_wrapper.httpx_client.stream(
@@ -213,6 +223,7 @@ def convert_as_stream(
"model_id": model_id,
"voice_settings": voice_settings,
"seed": seed,
+ "remove_background_noise": remove_background_noise,
},
files={
"audio": audio,
@@ -222,7 +233,8 @@ def convert_as_stream(
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -252,11 +264,12 @@ async def convert(
*,
audio: core.File,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
- model_id: typing.Optional[str] = None,
- voice_settings: typing.Optional[str] = None,
- seed: typing.Optional[int] = None,
+ model_id: typing.Optional[str] = OMIT,
+ voice_settings: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ remove_background_noise: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
@@ -273,8 +286,15 @@ async def convert(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -286,10 +306,13 @@ async def convert(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
+
+ remove_background_noise : typing.Optional[bool]
+ If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -309,10 +332,9 @@ async def convert(
async def main() -> None:
await client.speech_to_speech.convert(
- voice_id="string",
- enable_logging=True,
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ model_id="eleven_multilingual_sts_v2",
)
@@ -330,6 +352,7 @@ async def main() -> None:
"model_id": model_id,
"voice_settings": voice_settings,
"seed": seed,
+ "remove_background_noise": remove_background_noise,
},
files={
"audio": audio,
@@ -339,7 +362,8 @@ async def main() -> None:
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -363,12 +387,13 @@ async def convert_as_stream(
voice_id: str,
*,
audio: core.File,
- enable_logging: typing.Optional[OptimizeStreamingLatency] = None,
- optimize_streaming_latency: typing.Optional[OutputFormat] = None,
- output_format: typing.Optional[str] = None,
- model_id: typing.Optional[str] = None,
- voice_settings: typing.Optional[str] = None,
- seed: typing.Optional[int] = None,
+ enable_logging: typing.Optional[bool] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
+ output_format: typing.Optional[OutputFormat] = None,
+ model_id: typing.Optional[str] = OMIT,
+ voice_settings: typing.Optional[str] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ remove_background_noise: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
@@ -382,25 +407,21 @@ async def convert_as_stream(
audio : core.File
See core.File for more documentation
- enable_logging : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ enable_logging : typing.Optional[bool]
+ When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OutputFormat]
- The output format of the generated audio.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
- output_format : typing.Optional[str]
- Output format of the generated audio. Must be one of:
- mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
- mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
- mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
- mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
- mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
- mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
- pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
- pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
- pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
- pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
- ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+ Defaults to None.
+
+ output_format : typing.Optional[OutputFormat]
+ The output format of the generated audio.
model_id : typing.Optional[str]
Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
@@ -409,10 +430,13 @@ async def convert_as_stream(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
+
+ remove_background_noise : typing.Optional[bool]
+ If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -432,10 +456,9 @@ async def convert_as_stream(
async def main() -> None:
await client.speech_to_speech.convert_as_stream(
- voice_id="string",
- enable_logging="0",
- optimize_streaming_latency="mp3_22050_32",
- output_format="string",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ model_id="eleven_multilingual_sts_v2",
)
@@ -453,6 +476,7 @@ async def main() -> None:
"model_id": model_id,
"voice_settings": voice_settings,
"seed": seed,
+ "remove_background_noise": remove_background_noise,
},
files={
"audio": audio,
@@ -462,7 +486,8 @@ async def main() -> None:
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
diff --git a/src/elevenlabs/text_to_sound_effects/client.py b/src/elevenlabs/text_to_sound_effects/client.py
index b9eb26df..f562fef6 100644
--- a/src/elevenlabs/text_to_sound_effects/client.py
+++ b/src/elevenlabs/text_to_sound_effects/client.py
@@ -41,7 +41,7 @@ def convert(
A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -56,9 +56,7 @@ def convert(
api_key="YOUR_API_KEY",
)
client.text_to_sound_effects.convert(
- text="string",
- duration_seconds=1.1,
- prompt_influence=1.1,
+ text="Spacious braam suitable for high-impact movie trailer moments",
)
"""
with self._client_wrapper.httpx_client.stream(
@@ -69,12 +67,16 @@ def convert(
"duration_seconds": duration_seconds,
"prompt_influence": prompt_influence,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -121,7 +123,7 @@ async def convert(
A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -141,9 +143,7 @@ async def convert(
async def main() -> None:
await client.text_to_sound_effects.convert(
- text="string",
- duration_seconds=1.1,
- prompt_influence=1.1,
+ text="Spacious braam suitable for high-impact movie trailer moments",
)
@@ -157,12 +157,16 @@ async def main() -> None:
"duration_seconds": duration_seconds,
"prompt_influence": prompt_influence,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
diff --git a/src/elevenlabs/text_to_speech/__init__.py b/src/elevenlabs/text_to_speech/__init__.py
index 518f9a32..2ec7be88 100644
--- a/src/elevenlabs/text_to_speech/__init__.py
+++ b/src/elevenlabs/text_to_speech/__init__.py
@@ -1,5 +1,21 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import SendMessage
+from .types import (
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization,
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
+ BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
+ TextToSpeechStreamWithTimestampsResponse,
+ TextToSpeechStreamWithTimestampsResponseAlignment,
+ TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
+)
-__all__ = ["SendMessage"]
+__all__ = [
+ "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization",
+ "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization",
+ "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization",
+ "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization",
+ "TextToSpeechStreamWithTimestampsResponse",
+ "TextToSpeechStreamWithTimestampsResponseAlignment",
+ "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment",
+]
diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py
index cadd2287..d608dd16 100644
--- a/src/elevenlabs/text_to_speech/client.py
+++ b/src/elevenlabs/text_to_speech/client.py
@@ -2,10 +2,12 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
-from ..types.optimize_streaming_latency import OptimizeStreamingLatency
from ..types.output_format import OutputFormat
from ..types.voice_settings import VoiceSettings
from ..types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator
+from .types.body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization import (
+ BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
+)
from ..core.request_options import RequestOptions
from ..core.jsonable_encoder import jsonable_encoder
from ..core.serialization import convert_and_respect_annotation_metadata
@@ -14,6 +16,17 @@
from ..core.unchecked_base_model import construct_type
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
+from .types.body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import (
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
+)
+from .types.body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization import (
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization,
+)
+from .types.body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization import (
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
+)
+from .types.text_to_speech_stream_with_timestamps_response import TextToSpeechStreamWithTimestampsResponse
+import json
from ..core.client_wrapper import AsyncClientWrapper
# this is used as the default value for optional parameters
@@ -30,7 +43,7 @@ def convert(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -43,6 +56,10 @@ def convert(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
@@ -59,8 +76,15 @@ def convert(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -78,7 +102,7 @@ def convert(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -92,8 +116,14 @@ def convert(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -102,21 +132,16 @@ def convert(
Examples
--------
- from elevenlabs import ElevenLabs, VoiceSettings
+ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert(
- voice_id="pMsXgVXv3BLzUgSXRplE",
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
- voice_settings=VoiceSettings(
- stability=0.1,
- similarity_boost=0.3,
- style=0.2,
- ),
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
"""
with self._client_wrapper.httpx_client.stream(
@@ -144,13 +169,19 @@ def convert(
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- audio_data = b''.join(chunk for chunk in _response.iter_bytes())
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ audio_data = b''.join(chunk for chunk in _response.iter_bytes(chunk_size=_chunk_size))
request_id = _response.headers.get("request-id")
return audio_data, request_id
_response.read()
@@ -175,7 +206,7 @@ def convert_with_timestamps(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -188,6 +219,10 @@ def convert_with_timestamps(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -204,8 +239,15 @@ def convert_with_timestamps(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -223,7 +265,7 @@ def convert_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -237,6 +279,12 @@ def convert_with_timestamps(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -253,8 +301,10 @@ def convert_with_timestamps(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert_with_timestamps(
- voice_id="21m00Tcm4TlvDq8ikWAM",
- text="text",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -282,6 +332,11 @@ def convert_with_timestamps(
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
@@ -318,7 +373,7 @@ def convert_as_stream(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -331,6 +386,10 @@ def convert_as_stream(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
@@ -347,8 +406,15 @@ def convert_as_stream(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -366,7 +432,7 @@ def convert_as_stream(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -380,8 +446,14 @@ def convert_as_stream(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -390,21 +462,16 @@ def convert_as_stream(
Examples
--------
- from elevenlabs import ElevenLabs, VoiceSettings
+ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert_as_stream(
- voice_id="pMsXgVXv3BLzUgSXRplE",
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
- voice_settings=VoiceSettings(
- stability=0.1,
- similarity_boost=0.3,
- style=0.2,
- ),
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
"""
with self._client_wrapper.httpx_client.stream(
@@ -432,13 +499,19 @@ def convert_as_stream(
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -463,7 +536,7 @@ def stream_with_timestamps(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -476,8 +549,12 @@ def stream_with_timestamps(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> typing.Iterator[TextToSpeechStreamWithTimestampsResponse]:
"""
Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
@@ -492,8 +569,15 @@ def stream_with_timestamps(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -511,7 +595,7 @@ def stream_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -525,12 +609,19 @@ def stream_with_timestamps(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
- Returns
- -------
- None
+ Yields
+ ------
+ typing.Iterator[TextToSpeechStreamWithTimestampsResponse]
+ Stream of JSON objects containing audio chunks and character timing information
Examples
--------
@@ -539,12 +630,16 @@ def stream_with_timestamps(
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
- client.text_to_speech.stream_with_timestamps(
- voice_id="21m00Tcm4TlvDq8ikWAM",
- text="text",
+ response = client.text_to_speech.stream_with_timestamps(
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
+ for chunk in response:
+ yield chunk
"""
- _response = self._client_wrapper.httpx_client.request(
+ with self._client_wrapper.httpx_client.stream(
f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream/with-timestamps",
method="POST",
params={
@@ -569,27 +664,46 @@ def stream_with_timestamps(
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- return
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
+ ) as _response:
+ try:
+ if 200 <= _response.status_code < 300:
+ for _text in _response.iter_lines():
+ try:
+ if len(_text) == 0:
+ continue
+ yield typing.cast(
+ TextToSpeechStreamWithTimestampsResponse,
+ construct_type(
+ type_=TextToSpeechStreamWithTimestampsResponse, # type: ignore
+ object_=json.loads(_text),
+ ),
+ )
+ except:
+ pass
+ return
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
)
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
class AsyncTextToSpeechClient:
@@ -602,7 +716,7 @@ async def convert(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -615,6 +729,10 @@ async def convert(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
@@ -631,8 +749,15 @@ async def convert(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -650,7 +775,7 @@ async def convert(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -664,8 +789,14 @@ async def convert(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -676,7 +807,7 @@ async def convert(
--------
import asyncio
- from elevenlabs import AsyncElevenLabs, VoiceSettings
+ from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
@@ -685,15 +816,10 @@ async def convert(
async def main() -> None:
await client.text_to_speech.convert(
- voice_id="pMsXgVXv3BLzUgSXRplE",
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
- voice_settings=VoiceSettings(
- stability=0.1,
- similarity_boost=0.3,
- style=0.2,
- ),
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
@@ -724,13 +850,19 @@ async def main() -> None:
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -755,7 +887,7 @@ async def convert_with_timestamps(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -768,6 +900,10 @@ async def convert_with_timestamps(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -784,8 +920,15 @@ async def convert_with_timestamps(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -803,7 +946,7 @@ async def convert_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -817,6 +960,12 @@ async def convert_with_timestamps(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -838,8 +987,10 @@ async def convert_with_timestamps(
async def main() -> None:
await client.text_to_speech.convert_with_timestamps(
- voice_id="21m00Tcm4TlvDq8ikWAM",
- text="text",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
@@ -870,6 +1021,11 @@ async def main() -> None:
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
@@ -904,7 +1060,7 @@ async def convert_as_stream(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -917,6 +1073,10 @@ async def convert_as_stream(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
@@ -933,8 +1093,15 @@ async def convert_as_stream(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -952,7 +1119,7 @@ async def convert_as_stream(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -966,8 +1133,14 @@ async def convert_as_stream(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -978,7 +1151,7 @@ async def convert_as_stream(
--------
import asyncio
- from elevenlabs import AsyncElevenLabs, VoiceSettings
+ from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
@@ -987,15 +1160,10 @@ async def convert_as_stream(
async def main() -> None:
await client.text_to_speech.convert_as_stream(
- voice_id="pMsXgVXv3BLzUgSXRplE",
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
- voice_settings=VoiceSettings(
- stability=0.1,
- similarity_boost=0.3,
- style=0.2,
- ),
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
@@ -1026,13 +1194,19 @@ async def main() -> None:
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -1057,7 +1231,7 @@ async def stream_with_timestamps(
*,
text: str,
enable_logging: typing.Optional[bool] = None,
- optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None,
+ optimize_streaming_latency: typing.Optional[int] = None,
output_format: typing.Optional[OutputFormat] = None,
model_id: typing.Optional[str] = OMIT,
language_code: typing.Optional[str] = OMIT,
@@ -1070,8 +1244,12 @@ async def stream_with_timestamps(
next_text: typing.Optional[str] = OMIT,
previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ use_pvc_as_ivc: typing.Optional[bool] = OMIT,
+ apply_text_normalization: typing.Optional[
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
+ ] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> typing.AsyncIterator[TextToSpeechStreamWithTimestampsResponse]:
"""
Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
@@ -1086,8 +1264,15 @@ async def stream_with_timestamps(
enable_logging : typing.Optional[bool]
When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
- optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency]
- You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
+ optimize_streaming_latency : typing.Optional[int]
+ You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+ 0 - default mode (no latency optimizations)
+ 1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+ 2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+ 3 - max latency optimizations
+ 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+ Defaults to None.
output_format : typing.Optional[OutputFormat]
The output format of the generated audio.
@@ -1105,7 +1290,7 @@ async def stream_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -1119,12 +1304,19 @@ async def stream_with_timestamps(
next_request_ids : typing.Optional[typing.Sequence[str]]
A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ use_pvc_as_ivc : typing.Optional[bool]
+ If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+ apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization]
+ This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
- Returns
- -------
- None
+ Yields
+ ------
+ typing.AsyncIterator[TextToSpeechStreamWithTimestampsResponse]
+ Stream of JSON objects containing audio chunks and character timing information
Examples
--------
@@ -1138,15 +1330,19 @@ async def stream_with_timestamps(
async def main() -> None:
- await client.text_to_speech.stream_with_timestamps(
- voice_id="21m00Tcm4TlvDq8ikWAM",
- text="text",
+ response = await client.text_to_speech.stream_with_timestamps(
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
+ async for chunk in response:
+ yield chunk
asyncio.run(main())
"""
- _response = await self._client_wrapper.httpx_client.request(
+ async with self._client_wrapper.httpx_client.stream(
f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream/with-timestamps",
method="POST",
params={
@@ -1171,24 +1367,43 @@ async def main() -> None:
"next_text": next_text,
"previous_request_ids": previous_request_ids,
"next_request_ids": next_request_ids,
+ "use_pvc_as_ivc": use_pvc_as_ivc,
+ "apply_text_normalization": apply_text_normalization,
+ },
+ headers={
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- return
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
+ ) as _response:
+ try:
+ if 200 <= _response.status_code < 300:
+ async for _text in _response.aiter_lines():
+ try:
+ if len(_text) == 0:
+ continue
+ yield typing.cast(
+ TextToSpeechStreamWithTimestampsResponse,
+ construct_type(
+ type_=TextToSpeechStreamWithTimestampsResponse, # type: ignore
+ object_=json.loads(_text),
+ ),
+ )
+ except:
+ pass
+ return
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
)
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/elevenlabs/text_to_speech/types/__init__.py b/src/elevenlabs/text_to_speech/types/__init__.py
index d770d9b9..527c7242 100644
--- a/src/elevenlabs/text_to_speech/types/__init__.py
+++ b/src/elevenlabs/text_to_speech/types/__init__.py
@@ -1,5 +1,29 @@
# This file was auto-generated by Fern from our API Definition.
-from .send_message import SendMessage
+from .body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization import (
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization,
+)
+from .body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization import (
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
+)
+from .body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization import (
+ BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
+)
+from .body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import (
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
+)
+from .text_to_speech_stream_with_timestamps_response import TextToSpeechStreamWithTimestampsResponse
+from .text_to_speech_stream_with_timestamps_response_alignment import TextToSpeechStreamWithTimestampsResponseAlignment
+from .text_to_speech_stream_with_timestamps_response_normalized_alignment import (
+ TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
+)
-__all__ = ["SendMessage"]
+__all__ = [
+ "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization",
+ "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization",
+ "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization",
+ "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization",
+ "TextToSpeechStreamWithTimestampsResponse",
+ "TextToSpeechStreamWithTimestampsResponseAlignment",
+ "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment",
+]
diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization.py
new file mode 100644
index 00000000..42f98101
--- /dev/null
+++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization = typing.Union[
+ typing.Literal["auto", "on", "off"], typing.Any
+]
diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization.py
new file mode 100644
index 00000000..7cbbc3b7
--- /dev/null
+++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization = (
+ typing.Union[typing.Literal["auto", "on", "off"], typing.Any]
+)
diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization.py
new file mode 100644
index 00000000..42f873cf
--- /dev/null
+++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization = typing.Union[
+ typing.Literal["auto", "on", "off"], typing.Any
+]
diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization.py
new file mode 100644
index 00000000..b2e12a10
--- /dev/null
+++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization = typing.Union[
+ typing.Literal["auto", "on", "off"], typing.Any
+]
diff --git a/src/elevenlabs/text_to_speech/types/send_message.py b/src/elevenlabs/text_to_speech/types/send_message.py
deleted file mode 100644
index d4d0d409..00000000
--- a/src/elevenlabs/text_to_speech/types/send_message.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from ...types.initialize_connection import InitializeConnection
-from ...types.send_text import SendText
-from ...types.close_connection import CloseConnection
-
-SendMessage = typing.Union[InitializeConnection, SendText, CloseConnection]
diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py
new file mode 100644
index 00000000..07c25399
--- /dev/null
+++ b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ...core.unchecked_base_model import UncheckedBaseModel
+import typing_extensions
+import typing
+from ...core.serialization import FieldMetadata
+import pydantic
+from .text_to_speech_stream_with_timestamps_response_alignment import TextToSpeechStreamWithTimestampsResponseAlignment
+from .text_to_speech_stream_with_timestamps_response_normalized_alignment import (
+ TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
+)
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class TextToSpeechStreamWithTimestampsResponse(UncheckedBaseModel):
+ audio_base_64: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="audio_base64")] = (
+ pydantic.Field(default=None)
+ )
+ """
+ Base64 encoded audio chunk
+ """
+
+ alignment: typing.Optional[TextToSpeechStreamWithTimestampsResponseAlignment] = None
+ normalized_alignment: typing.Optional[TextToSpeechStreamWithTimestampsResponseNormalizedAlignment] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py
new file mode 100644
index 00000000..f8230552
--- /dev/null
+++ b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ...core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class TextToSpeechStreamWithTimestampsResponseAlignment(UncheckedBaseModel):
+ characters: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of individual characters from the input text
+ """
+
+ character_start_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
+ """
+ Array of start times (in seconds) for each character
+ """
+
+ character_end_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
+ """
+ Array of end times (in seconds) for each character
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py
new file mode 100644
index 00000000..2982e649
--- /dev/null
+++ b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ...core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class TextToSpeechStreamWithTimestampsResponseNormalizedAlignment(UncheckedBaseModel):
+ characters: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of individual characters from the normalized text
+ """
+
+ character_start_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
+ """
+ Array of start times (in seconds) for each normalized character
+ """
+
+ character_end_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
+ """
+ Array of end times (in seconds) for each normalized character
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/text_to_voice/__init__.py b/src/elevenlabs/text_to_voice/__init__.py
new file mode 100644
index 00000000..1a606e5d
--- /dev/null
+++ b/src/elevenlabs/text_to_voice/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import TextToVoiceCreatePreviewsRequestOutputFormat
+
+__all__ = ["TextToVoiceCreatePreviewsRequestOutputFormat"]
diff --git a/src/elevenlabs/text_to_voice/client.py b/src/elevenlabs/text_to_voice/client.py
new file mode 100644
index 00000000..ce03c69f
--- /dev/null
+++ b/src/elevenlabs/text_to_voice/client.py
@@ -0,0 +1,421 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from .types.text_to_voice_create_previews_request_output_format import TextToVoiceCreatePreviewsRequestOutputFormat
+from ..core.request_options import RequestOptions
+from ..types.voice_previews_response_model import VoicePreviewsResponseModel
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.voice import Voice
+from ..core.client_wrapper import AsyncClientWrapper
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class TextToVoiceClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def create_previews(
+ self,
+ *,
+ voice_description: str,
+ text: str,
+ output_format: typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] = None,
+ auto_generate_text: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> VoicePreviewsResponseModel:
+ """
+ Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
+
+ Parameters
+ ----------
+ voice_description : str
+ Description to use for the created voice.
+
+ text : str
+ Text to generate, text length has to be between 100 and 1000.
+
+ output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]
+ Output format of the generated audio. Must be one of:
+ mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
+ mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
+ mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
+ mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
+ mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
+ mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
+ pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
+ pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
+ pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
+ pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
+ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+
+ auto_generate_text : typing.Optional[bool]
+ Whether to automatically generate a text suitable for the voice description.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VoicePreviewsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.text_to_voice.create_previews(
+ voice_description="A sassy little squeaky mouse",
+ text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/text-to-voice/create-previews",
+ method="POST",
+ params={
+ "output_format": output_format,
+ },
+ json={
+ "voice_description": voice_description,
+ "text": text,
+ "auto_generate_text": auto_generate_text,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ VoicePreviewsResponseModel,
+ construct_type(
+ type_=VoicePreviewsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_voice_from_preview(
+ self,
+ *,
+ voice_name: str,
+ voice_description: str,
+ generated_voice_id: str,
+ labels: typing.Optional[typing.Dict[str, str]] = OMIT,
+ played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> Voice:
+ """
+ Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
+
+ Parameters
+ ----------
+ voice_name : str
+ Name to use for the created voice.
+
+ voice_description : str
+ Description to use for the created voice.
+
+ generated_voice_id : str
+ The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
+
+ labels : typing.Optional[typing.Dict[str, str]]
+ Optional, metadata to add to the created voice. Defaults to None.
+
+ played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]]
+ List of voice ids that the user has played but not selected. Used for RLHF.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Voice
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.text_to_voice.create_voice_from_preview(
+ voice_name="Little squeaky mouse",
+ voice_description="A sassy little squeaky mouse",
+ generated_voice_id="37HceQefKmEi3bGovXjL",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/text-to-voice/create-voice-from-preview",
+ method="POST",
+ json={
+ "voice_name": voice_name,
+ "voice_description": voice_description,
+ "generated_voice_id": generated_voice_id,
+ "labels": labels,
+ "played_not_selected_voice_ids": played_not_selected_voice_ids,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ Voice,
+ construct_type(
+ type_=Voice, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncTextToVoiceClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def create_previews(
+ self,
+ *,
+ voice_description: str,
+ text: str,
+ output_format: typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] = None,
+ auto_generate_text: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> VoicePreviewsResponseModel:
+ """
+ Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
+
+ Parameters
+ ----------
+ voice_description : str
+ Description to use for the created voice.
+
+ text : str
+ Text to generate, text length has to be between 100 and 1000.
+
+ output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]
+ Output format of the generated audio. Must be one of:
+ mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
+ mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
+ mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
+ mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
+ mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
+ mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
+ pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
+ pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
+ pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
+ pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
+ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+
+ auto_generate_text : typing.Optional[bool]
+ Whether to automatically generate a text suitable for the voice description.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ VoicePreviewsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.text_to_voice.create_previews(
+ voice_description="A sassy little squeaky mouse",
+ text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/text-to-voice/create-previews",
+ method="POST",
+ params={
+ "output_format": output_format,
+ },
+ json={
+ "voice_description": voice_description,
+ "text": text,
+ "auto_generate_text": auto_generate_text,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ VoicePreviewsResponseModel,
+ construct_type(
+ type_=VoicePreviewsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_voice_from_preview(
+ self,
+ *,
+ voice_name: str,
+ voice_description: str,
+ generated_voice_id: str,
+ labels: typing.Optional[typing.Dict[str, str]] = OMIT,
+ played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> Voice:
+ """
+ Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
+
+ Parameters
+ ----------
+ voice_name : str
+ Name to use for the created voice.
+
+ voice_description : str
+ Description to use for the created voice.
+
+ generated_voice_id : str
+ The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
+
+ labels : typing.Optional[typing.Dict[str, str]]
+ Optional, metadata to add to the created voice. Defaults to None.
+
+ played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]]
+ List of voice ids that the user has played but not selected. Used for RLHF.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Voice
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.text_to_voice.create_voice_from_preview(
+ voice_name="Little squeaky mouse",
+ voice_description="A sassy little squeaky mouse",
+ generated_voice_id="37HceQefKmEi3bGovXjL",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/text-to-voice/create-voice-from-preview",
+ method="POST",
+ json={
+ "voice_name": voice_name,
+ "voice_description": voice_description,
+ "generated_voice_id": generated_voice_id,
+ "labels": labels,
+ "played_not_selected_voice_ids": played_not_selected_voice_ids,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ Voice,
+ construct_type(
+ type_=Voice, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/elevenlabs/text_to_voice/types/__init__.py b/src/elevenlabs/text_to_voice/types/__init__.py
new file mode 100644
index 00000000..39c033b9
--- /dev/null
+++ b/src/elevenlabs/text_to_voice/types/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .text_to_voice_create_previews_request_output_format import TextToVoiceCreatePreviewsRequestOutputFormat
+
+__all__ = ["TextToVoiceCreatePreviewsRequestOutputFormat"]
diff --git a/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py b/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py
new file mode 100644
index 00000000..6e6980fc
--- /dev/null
+++ b/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TextToVoiceCreatePreviewsRequestOutputFormat = typing.Union[
+ typing.Literal[
+ "mp3_22050_32",
+ "mp3_44100_32",
+ "mp3_44100_64",
+ "mp3_44100_96",
+ "mp3_44100_128",
+ "mp3_44100_192",
+ "pcm_16000",
+ "pcm_22050",
+ "pcm_24000",
+ "pcm_44100",
+ "ulaw_8000",
+ ],
+ typing.Any,
+]
diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py
index 3cfa2ee3..d2284c1d 100644
--- a/src/elevenlabs/types/__init__.py
+++ b/src/elevenlabs/types/__init__.py
@@ -1,187 +1,437 @@
# This file was auto-generated by Fern from our API Definition.
from .accent import Accent
+from .add_agent_secret_response_model import AddAgentSecretResponseModel
+from .add_chapter_response_model import AddChapterResponseModel
+from .add_knowledge_base_response_model import AddKnowledgeBaseResponseModel
from .add_project_response_model import AddProjectResponseModel
from .add_pronunciation_dictionary_response_model import AddPronunciationDictionaryResponseModel
from .add_pronunciation_dictionary_rules_response_model import AddPronunciationDictionaryRulesResponseModel
+from .add_voice_ivc_response_model import AddVoiceIvcResponseModel
from .add_voice_response_model import AddVoiceResponseModel
from .age import Age
+from .agent_ban import AgentBan
+from .agent_config import AgentConfig
+from .agent_config_override import AgentConfigOverride
+from .agent_config_override_config import AgentConfigOverrideConfig
+from .agent_metadata_response_model import AgentMetadataResponseModel
+from .agent_platform_settings import AgentPlatformSettings
+from .agent_summary_response_model import AgentSummaryResponseModel
+from .allowlist_item import AllowlistItem
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .array_json_schema_property_items import ArrayJsonSchemaPropertyItems
+from .asr_conversational_config import AsrConversationalConfig
+from .asr_input_format import AsrInputFormat
+from .asr_provider import AsrProvider
+from .asr_quality import AsrQuality
from .audio_native_create_project_response_model import AudioNativeCreateProjectResponseModel
-from .audio_native_get_embed_code_response_model import AudioNativeGetEmbedCodeResponseModel
-from .audio_output import AudioOutput
-from .category import Category
+from .auth_settings import AuthSettings
+from .authorization_method import AuthorizationMethod
+from .ban_reason_type import BanReasonType
+from .breakdown_types import BreakdownTypes
from .chapter_response import ChapterResponse
from .chapter_snapshot_response import ChapterSnapshotResponse
from .chapter_snapshots_response import ChapterSnapshotsResponse
from .chapter_state import ChapterState
from .chapter_statistics_response import ChapterStatisticsResponse
-from .close_connection import CloseConnection
+from .client_event import ClientEvent
+from .client_tool_config import ClientToolConfig
+from .conv_ai_new_secret_config import ConvAiNewSecretConfig
+from .conv_ai_secret_locator import ConvAiSecretLocator
+from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig
+from .conversation_charging_common_model import ConversationChargingCommonModel
+from .conversation_config import ConversationConfig
+from .conversation_config_client_override import ConversationConfigClientOverride
+from .conversation_config_client_override_config import ConversationConfigClientOverrideConfig
+from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel
+from .conversation_history_evaluation_criteria_result_common_model import (
+ ConversationHistoryEvaluationCriteriaResultCommonModel,
+)
+from .conversation_history_feedback_common_model import ConversationHistoryFeedbackCommonModel
+from .conversation_history_metadata_common_model import ConversationHistoryMetadataCommonModel
+from .conversation_history_transcript_common_model import ConversationHistoryTranscriptCommonModel
+from .conversation_history_transcript_common_model_role import ConversationHistoryTranscriptCommonModelRole
+from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel
+from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel
+from .conversation_initiation_client_data import ConversationInitiationClientData
+from .conversation_initiation_client_data_config import ConversationInitiationClientDataConfig
+from .conversation_signed_url_response_model import ConversationSignedUrlResponseModel
+from .conversation_summary_response_model import ConversationSummaryResponseModel
+from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus
+from .conversation_token_db_model import ConversationTokenDbModel
+from .conversation_token_purpose import ConversationTokenPurpose
+from .conversational_config import ConversationalConfig
+from .create_agent_response_model import CreateAgentResponseModel
+from .create_phone_number_response_model import CreatePhoneNumberResponseModel
from .currency import Currency
+from .custom_llm import CustomLlm
+from .data_collection_result_common_model import DataCollectionResultCommonModel
from .do_dubbing_response import DoDubbingResponse
from .dubbing_metadata_response import DubbingMetadataResponse
from .edit_project_response_model import EditProjectResponseModel
+from .embed_config import EmbedConfig
+from .embed_config_avatar import (
+ EmbedConfigAvatar,
+ EmbedConfigAvatar_Image,
+ EmbedConfigAvatar_Orb,
+ EmbedConfigAvatar_Url,
+)
+from .embed_variant import EmbedVariant
+from .evaluation_settings import EvaluationSettings
+from .evaluation_success_result import EvaluationSuccessResult
from .extended_subscription_response_model_billing_period import ExtendedSubscriptionResponseModelBillingPeriod
from .extended_subscription_response_model_character_refresh_period import (
ExtendedSubscriptionResponseModelCharacterRefreshPeriod,
)
+from .extended_subscription_response_model_currency import ExtendedSubscriptionResponseModelCurrency
from .feedback_item import FeedbackItem
from .fine_tuning_response import FineTuningResponse
from .fine_tuning_response_model_state_value import FineTuningResponseModelStateValue
from .gender import Gender
-from .generation_config import GenerationConfig
+from .get_agent_embed_response_model import GetAgentEmbedResponseModel
+from .get_agent_link_response_model import GetAgentLinkResponseModel
+from .get_agent_response_model import GetAgentResponseModel
+from .get_agents_page_response_model import GetAgentsPageResponseModel
from .get_chapters_response import GetChaptersResponse
+from .get_conversation_response_model import GetConversationResponseModel
+from .get_conversation_response_model_status import GetConversationResponseModelStatus
+from .get_conversations_page_response_model import GetConversationsPageResponseModel
+from .get_knowledge_base_reponse_model import GetKnowledgeBaseReponseModel
+from .get_knowledge_base_reponse_model_type import GetKnowledgeBaseReponseModelType
from .get_library_voices_response import GetLibraryVoicesResponse
+from .get_phone_number_response_model import GetPhoneNumberResponseModel
from .get_projects_response import GetProjectsResponse
from .get_pronunciation_dictionaries_metadata_response_model import GetPronunciationDictionariesMetadataResponseModel
from .get_pronunciation_dictionary_metadata_response import GetPronunciationDictionaryMetadataResponse
from .get_speech_history_response import GetSpeechHistoryResponse
from .get_voices_response import GetVoicesResponse
-from .history import History
from .history_alignment_response_model import HistoryAlignmentResponseModel
from .history_alignments_response_model import HistoryAlignmentsResponseModel
from .history_item import HistoryItem
from .http_validation_error import HttpValidationError
-from .initialize_connection import InitializeConnection
+from .image_avatar import ImageAvatar
from .invoice import Invoice
+from .knowledge_base_locator import KnowledgeBaseLocator
+from .knowledge_base_locator_type import KnowledgeBaseLocatorType
from .language_response import LanguageResponse
from .library_voice_response import LibraryVoiceResponse
+from .library_voice_response_model_category import LibraryVoiceResponseModelCategory
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from .literal_json_schema_property_type import LiteralJsonSchemaPropertyType
+from .llm import Llm
from .manual_verification_file_response import ManualVerificationFileResponse
from .manual_verification_response import ManualVerificationResponse
from .model import Model
-from .normalized_alignment import NormalizedAlignment
-from .optimize_streaming_latency import OptimizeStreamingLatency
+from .model_rates_response_model import ModelRatesResponseModel
+from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup
+from .moderation_status_response_model import ModerationStatusResponseModel
+from .moderation_status_response_model_safety_status import ModerationStatusResponseModelSafetyStatus
+from .moderation_status_response_model_warning_status import ModerationStatusResponseModelWarningStatus
+from .object_json_schema_property import ObjectJsonSchemaProperty
+from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue
+from .orb_avatar import OrbAvatar
from .output_format import OutputFormat
+from .phone_number_agent_info import PhoneNumberAgentInfo
+from .post_agent_avatar_response_model import PostAgentAvatarResponseModel
+from .privacy_config import PrivacyConfig
from .profile_page_response_model import ProfilePageResponseModel
+from .project_creation_meta_response_model import ProjectCreationMetaResponseModel
+from .project_creation_meta_response_model_status import ProjectCreationMetaResponseModelStatus
+from .project_creation_meta_response_model_type import ProjectCreationMetaResponseModelType
from .project_extended_response_model import ProjectExtendedResponseModel
+from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel
+from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization
+from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction
+from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset
+from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience
from .project_response import ProjectResponse
+from .project_response_model_access_level import ProjectResponseModelAccessLevel
+from .project_response_model_fiction import ProjectResponseModelFiction
+from .project_response_model_target_audience import ProjectResponseModelTargetAudience
from .project_snapshot_response import ProjectSnapshotResponse
from .project_snapshot_upload_response_model import ProjectSnapshotUploadResponseModel
+from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus
from .project_snapshots_response import ProjectSnapshotsResponse
from .project_state import ProjectState
+from .prompt_agent import PromptAgent
+from .prompt_agent_override import PromptAgentOverride
+from .prompt_agent_override_config import PromptAgentOverrideConfig
+from .prompt_agent_tools_item import PromptAgentToolsItem, PromptAgentToolsItem_Client, PromptAgentToolsItem_Webhook
+from .prompt_evaluation_criteria import PromptEvaluationCriteria
from .pronunciation_dictionary_alias_rule_request_model import PronunciationDictionaryAliasRuleRequestModel
from .pronunciation_dictionary_phoneme_rule_request_model import PronunciationDictionaryPhonemeRuleRequestModel
from .pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator
-from .realtime_voice_settings import RealtimeVoiceSettings
+from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel
+from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator
+from .query_params_json_schema import QueryParamsJsonSchema
+from .reader_resource_response_model import ReaderResourceResponseModel
+from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType
from .recording_response import RecordingResponse
from .remove_pronunciation_dictionary_rules_response_model import RemovePronunciationDictionaryRulesResponseModel
from .review_status import ReviewStatus
-from .send_text import SendText
-from .source import Source
+from .safety import Safety
+from .safety_evaluation import SafetyEvaluation
+from .safety_rule import SafetyRule
from .speech_history_item_response import SpeechHistoryItemResponse
+from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource
from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory
-from .sso_provider_response_model import SsoProviderResponseModel
-from .sso_provider_response_model_provider_type import SsoProviderResponseModelProviderType
-from .status import Status
from .subscription import Subscription
from .subscription_response import SubscriptionResponse
from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod
from .subscription_response_model_character_refresh_period import SubscriptionResponseModelCharacterRefreshPeriod
+from .subscription_response_model_currency import SubscriptionResponseModelCurrency
from .subscription_status import SubscriptionStatus
+from .telephony_provider import TelephonyProvider
from .text_to_speech_as_stream_request import TextToSpeechAsStreamRequest
+from .tts_conversational_config import TtsConversationalConfig
+from .tts_conversational_config_override import TtsConversationalConfigOverride
+from .tts_conversational_config_override_config import TtsConversationalConfigOverrideConfig
+from .tts_conversational_model import TtsConversationalModel
+from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency
+from .tts_output_format import TtsOutputFormat
+from .turn_config import TurnConfig
+from .turn_mode import TurnMode
+from .url_avatar import UrlAvatar
from .usage_characters_response_model import UsageCharactersResponseModel
from .user import User
+from .user_feedback import UserFeedback
+from .user_feedback_score import UserFeedbackScore
from .validation_error import ValidationError
from .validation_error_loc_item import ValidationErrorLocItem
from .verification_attempt_response import VerificationAttemptResponse
from .voice import Voice
from .voice_generation_parameter_option_response import VoiceGenerationParameterOptionResponse
from .voice_generation_parameter_response import VoiceGenerationParameterResponse
+from .voice_preview_response_model import VoicePreviewResponseModel
+from .voice_previews_response_model import VoicePreviewsResponseModel
+from .voice_response_model_category import VoiceResponseModelCategory
from .voice_response_model_safety_control import VoiceResponseModelSafetyControl
from .voice_sample import VoiceSample
from .voice_settings import VoiceSettings
+from .voice_sharing_moderation_check_response_model import VoiceSharingModerationCheckResponseModel
from .voice_sharing_response import VoiceSharingResponse
+from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory
from .voice_sharing_state import VoiceSharingState
from .voice_verification_response import VoiceVerificationResponse
+from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig
+from .webhook_tool_api_schema_config_method import WebhookToolApiSchemaConfigMethod
+from .webhook_tool_api_schema_config_request_headers_value import WebhookToolApiSchemaConfigRequestHeadersValue
+from .webhook_tool_config import WebhookToolConfig
+from .widget_feedback_mode import WidgetFeedbackMode
__all__ = [
"Accent",
+ "AddAgentSecretResponseModel",
+ "AddChapterResponseModel",
+ "AddKnowledgeBaseResponseModel",
"AddProjectResponseModel",
"AddPronunciationDictionaryResponseModel",
"AddPronunciationDictionaryRulesResponseModel",
+ "AddVoiceIvcResponseModel",
"AddVoiceResponseModel",
"Age",
+ "AgentBan",
+ "AgentConfig",
+ "AgentConfigOverride",
+ "AgentConfigOverrideConfig",
+ "AgentMetadataResponseModel",
+ "AgentPlatformSettings",
+ "AgentSummaryResponseModel",
+ "AllowlistItem",
+ "ArrayJsonSchemaProperty",
+ "ArrayJsonSchemaPropertyItems",
+ "AsrConversationalConfig",
+ "AsrInputFormat",
+ "AsrProvider",
+ "AsrQuality",
"AudioNativeCreateProjectResponseModel",
- "AudioNativeGetEmbedCodeResponseModel",
- "AudioOutput",
- "Category",
+ "AuthSettings",
+ "AuthorizationMethod",
+ "BanReasonType",
+ "BreakdownTypes",
"ChapterResponse",
"ChapterSnapshotResponse",
"ChapterSnapshotsResponse",
"ChapterState",
"ChapterStatisticsResponse",
- "CloseConnection",
+ "ClientEvent",
+ "ClientToolConfig",
+ "ConvAiNewSecretConfig",
+ "ConvAiSecretLocator",
+ "ConvAiStoredSecretConfig",
+ "ConversationChargingCommonModel",
+ "ConversationConfig",
+ "ConversationConfigClientOverride",
+ "ConversationConfigClientOverrideConfig",
+ "ConversationHistoryAnalysisCommonModel",
+ "ConversationHistoryEvaluationCriteriaResultCommonModel",
+ "ConversationHistoryFeedbackCommonModel",
+ "ConversationHistoryMetadataCommonModel",
+ "ConversationHistoryTranscriptCommonModel",
+ "ConversationHistoryTranscriptCommonModelRole",
+ "ConversationHistoryTranscriptToolCallCommonModel",
+ "ConversationHistoryTranscriptToolResultCommonModel",
+ "ConversationInitiationClientData",
+ "ConversationInitiationClientDataConfig",
+ "ConversationSignedUrlResponseModel",
+ "ConversationSummaryResponseModel",
+ "ConversationSummaryResponseModelStatus",
+ "ConversationTokenDbModel",
+ "ConversationTokenPurpose",
+ "ConversationalConfig",
+ "CreateAgentResponseModel",
+ "CreatePhoneNumberResponseModel",
"Currency",
+ "CustomLlm",
+ "DataCollectionResultCommonModel",
"DoDubbingResponse",
"DubbingMetadataResponse",
"EditProjectResponseModel",
+ "EmbedConfig",
+ "EmbedConfigAvatar",
+ "EmbedConfigAvatar_Image",
+ "EmbedConfigAvatar_Orb",
+ "EmbedConfigAvatar_Url",
+ "EmbedVariant",
+ "EvaluationSettings",
+ "EvaluationSuccessResult",
"ExtendedSubscriptionResponseModelBillingPeriod",
"ExtendedSubscriptionResponseModelCharacterRefreshPeriod",
+ "ExtendedSubscriptionResponseModelCurrency",
"FeedbackItem",
"FineTuningResponse",
"FineTuningResponseModelStateValue",
"Gender",
- "GenerationConfig",
+ "GetAgentEmbedResponseModel",
+ "GetAgentLinkResponseModel",
+ "GetAgentResponseModel",
+ "GetAgentsPageResponseModel",
"GetChaptersResponse",
+ "GetConversationResponseModel",
+ "GetConversationResponseModelStatus",
+ "GetConversationsPageResponseModel",
+ "GetKnowledgeBaseReponseModel",
+ "GetKnowledgeBaseReponseModelType",
"GetLibraryVoicesResponse",
+ "GetPhoneNumberResponseModel",
"GetProjectsResponse",
"GetPronunciationDictionariesMetadataResponseModel",
"GetPronunciationDictionaryMetadataResponse",
"GetSpeechHistoryResponse",
"GetVoicesResponse",
- "History",
"HistoryAlignmentResponseModel",
"HistoryAlignmentsResponseModel",
"HistoryItem",
"HttpValidationError",
- "InitializeConnection",
+ "ImageAvatar",
"Invoice",
+ "KnowledgeBaseLocator",
+ "KnowledgeBaseLocatorType",
"LanguageResponse",
"LibraryVoiceResponse",
+ "LibraryVoiceResponseModelCategory",
+ "LiteralJsonSchemaProperty",
+ "LiteralJsonSchemaPropertyType",
+ "Llm",
"ManualVerificationFileResponse",
"ManualVerificationResponse",
"Model",
- "NormalizedAlignment",
- "OptimizeStreamingLatency",
+ "ModelRatesResponseModel",
+ "ModelResponseModelConcurrencyGroup",
+ "ModerationStatusResponseModel",
+ "ModerationStatusResponseModelSafetyStatus",
+ "ModerationStatusResponseModelWarningStatus",
+ "ObjectJsonSchemaProperty",
+ "ObjectJsonSchemaPropertyPropertiesValue",
+ "OrbAvatar",
"OutputFormat",
+ "PhoneNumberAgentInfo",
+ "PostAgentAvatarResponseModel",
+ "PrivacyConfig",
"ProfilePageResponseModel",
+ "ProjectCreationMetaResponseModel",
+ "ProjectCreationMetaResponseModelStatus",
+ "ProjectCreationMetaResponseModelType",
"ProjectExtendedResponseModel",
+ "ProjectExtendedResponseModelAccessLevel",
+ "ProjectExtendedResponseModelApplyTextNormalization",
+ "ProjectExtendedResponseModelFiction",
+ "ProjectExtendedResponseModelQualityPreset",
+ "ProjectExtendedResponseModelTargetAudience",
"ProjectResponse",
+ "ProjectResponseModelAccessLevel",
+ "ProjectResponseModelFiction",
+ "ProjectResponseModelTargetAudience",
"ProjectSnapshotResponse",
"ProjectSnapshotUploadResponseModel",
+ "ProjectSnapshotUploadResponseModelStatus",
"ProjectSnapshotsResponse",
"ProjectState",
+ "PromptAgent",
+ "PromptAgentOverride",
+ "PromptAgentOverrideConfig",
+ "PromptAgentToolsItem",
+ "PromptAgentToolsItem_Client",
+ "PromptAgentToolsItem_Webhook",
+ "PromptEvaluationCriteria",
"PronunciationDictionaryAliasRuleRequestModel",
"PronunciationDictionaryPhonemeRuleRequestModel",
"PronunciationDictionaryVersionLocator",
- "RealtimeVoiceSettings",
+ "PronunciationDictionaryVersionResponseModel",
+ "PydanticPronunciationDictionaryVersionLocator",
+ "QueryParamsJsonSchema",
+ "ReaderResourceResponseModel",
+ "ReaderResourceResponseModelResourceType",
"RecordingResponse",
"RemovePronunciationDictionaryRulesResponseModel",
"ReviewStatus",
- "SendText",
- "Source",
+ "Safety",
+ "SafetyEvaluation",
+ "SafetyRule",
"SpeechHistoryItemResponse",
+ "SpeechHistoryItemResponseModelSource",
"SpeechHistoryItemResponseModelVoiceCategory",
- "SsoProviderResponseModel",
- "SsoProviderResponseModelProviderType",
- "Status",
"Subscription",
"SubscriptionResponse",
"SubscriptionResponseModelBillingPeriod",
"SubscriptionResponseModelCharacterRefreshPeriod",
+ "SubscriptionResponseModelCurrency",
"SubscriptionStatus",
+ "TelephonyProvider",
"TextToSpeechAsStreamRequest",
+ "TtsConversationalConfig",
+ "TtsConversationalConfigOverride",
+ "TtsConversationalConfigOverrideConfig",
+ "TtsConversationalModel",
+ "TtsOptimizeStreamingLatency",
+ "TtsOutputFormat",
+ "TurnConfig",
+ "TurnMode",
+ "UrlAvatar",
"UsageCharactersResponseModel",
"User",
+ "UserFeedback",
+ "UserFeedbackScore",
"ValidationError",
"ValidationErrorLocItem",
"VerificationAttemptResponse",
"Voice",
"VoiceGenerationParameterOptionResponse",
"VoiceGenerationParameterResponse",
+ "VoicePreviewResponseModel",
+ "VoicePreviewsResponseModel",
+ "VoiceResponseModelCategory",
"VoiceResponseModelSafetyControl",
"VoiceSample",
"VoiceSettings",
+ "VoiceSharingModerationCheckResponseModel",
"VoiceSharingResponse",
+ "VoiceSharingResponseModelCategory",
"VoiceSharingState",
"VoiceVerificationResponse",
+ "WebhookToolApiSchemaConfig",
+ "WebhookToolApiSchemaConfigMethod",
+ "WebhookToolApiSchemaConfigRequestHeadersValue",
+ "WebhookToolConfig",
+ "WidgetFeedbackMode",
]
diff --git a/src/elevenlabs/types/sso_provider_response_model.py b/src/elevenlabs/types/add_agent_secret_response_model.py
similarity index 66%
rename from src/elevenlabs/types/sso_provider_response_model.py
rename to src/elevenlabs/types/add_agent_secret_response_model.py
index ee158271..88687d8b 100644
--- a/src/elevenlabs/types/sso_provider_response_model.py
+++ b/src/elevenlabs/types/add_agent_secret_response_model.py
@@ -1,16 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
-from .sso_provider_response_model_provider_type import SsoProviderResponseModelProviderType
-import typing
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
import pydantic
-class SsoProviderResponseModel(UncheckedBaseModel):
- provider_type: SsoProviderResponseModelProviderType
- provider_id: str
- domains: typing.List[str]
+class AddAgentSecretResponseModel(UncheckedBaseModel):
+ id: str
+ name: str
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/add_chapter_response_model.py b/src/elevenlabs/types/add_chapter_response_model.py
new file mode 100644
index 00000000..c6401365
--- /dev/null
+++ b/src/elevenlabs/types/add_chapter_response_model.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .chapter_response import ChapterResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AddChapterResponseModel(UncheckedBaseModel):
+ chapter: ChapterResponse
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/add_knowledge_base_response_model.py b/src/elevenlabs/types/add_knowledge_base_response_model.py
new file mode 100644
index 00000000..e9105cb4
--- /dev/null
+++ b/src/elevenlabs/types/add_knowledge_base_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AddKnowledgeBaseResponseModel(UncheckedBaseModel):
+ id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/add_voice_ivc_response_model.py b/src/elevenlabs/types/add_voice_ivc_response_model.py
new file mode 100644
index 00000000..0c85c00e
--- /dev/null
+++ b/src/elevenlabs/types/add_voice_ivc_response_model.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AddVoiceIvcResponseModel(UncheckedBaseModel):
+ voice_id: str
+ requires_verification: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_ban.py b/src/elevenlabs/types/agent_ban.py
new file mode 100644
index 00000000..ac7027b6
--- /dev/null
+++ b/src/elevenlabs/types/agent_ban.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .ban_reason_type import BanReasonType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentBan(UncheckedBaseModel):
+ at_unix: int
+ reason: typing.Optional[str] = None
+ reason_type: BanReasonType
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_config.py b/src/elevenlabs/types/agent_config.py
new file mode 100644
index 00000000..e7ff782c
--- /dev/null
+++ b/src/elevenlabs/types/agent_config.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .prompt_agent import PromptAgent
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentConfig(UncheckedBaseModel):
+ prompt: typing.Optional[PromptAgent] = None
+ first_message: typing.Optional[str] = None
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, AgentConfig=AgentConfig)
+update_forward_refs(ObjectJsonSchemaProperty, AgentConfig=AgentConfig)
diff --git a/src/elevenlabs/types/agent_config_override.py b/src/elevenlabs/types/agent_config_override.py
new file mode 100644
index 00000000..a6f959fd
--- /dev/null
+++ b/src/elevenlabs/types/agent_config_override.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .prompt_agent_override import PromptAgentOverride
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentConfigOverride(UncheckedBaseModel):
+ prompt: typing.Optional[PromptAgentOverride] = None
+ first_message: typing.Optional[str] = None
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_config_override_config.py b/src/elevenlabs/types/agent_config_override_config.py
new file mode 100644
index 00000000..bdf830b3
--- /dev/null
+++ b/src/elevenlabs/types/agent_config_override_config.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .prompt_agent_override_config import PromptAgentOverrideConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentConfigOverrideConfig(UncheckedBaseModel):
+ prompt: typing.Optional[PromptAgentOverrideConfig] = None
+ first_message: typing.Optional[bool] = None
+ language: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_metadata_response_model.py b/src/elevenlabs/types/agent_metadata_response_model.py
new file mode 100644
index 00000000..3609829e
--- /dev/null
+++ b/src/elevenlabs/types/agent_metadata_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AgentMetadataResponseModel(UncheckedBaseModel):
+ created_at_unix_secs: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_platform_settings.py b/src/elevenlabs/types/agent_platform_settings.py
new file mode 100644
index 00000000..ac146a58
--- /dev/null
+++ b/src/elevenlabs/types/agent_platform_settings.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .auth_settings import AuthSettings
+from .evaluation_settings import EvaluationSettings
+from .embed_config import EmbedConfig
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from .conversation_initiation_client_data_config import ConversationInitiationClientDataConfig
+from .agent_ban import AgentBan
+from .safety import Safety
+from .privacy_config import PrivacyConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentPlatformSettings(UncheckedBaseModel):
+ auth: typing.Optional[AuthSettings] = None
+ evaluation: typing.Optional[EvaluationSettings] = None
+ widget: typing.Optional[EmbedConfig] = None
+ data_collection: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None
+ overrides: typing.Optional[ConversationInitiationClientDataConfig] = None
+ ban: typing.Optional[AgentBan] = None
+ safety: typing.Optional[Safety] = None
+ privacy: typing.Optional[PrivacyConfig] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_summary_response_model.py b/src/elevenlabs/types/agent_summary_response_model.py
new file mode 100644
index 00000000..91ec68bb
--- /dev/null
+++ b/src/elevenlabs/types/agent_summary_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AgentSummaryResponseModel(UncheckedBaseModel):
+ agent_id: str
+ name: str
+ created_at_unix_secs: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/allowlist_item.py b/src/elevenlabs/types/allowlist_item.py
new file mode 100644
index 00000000..3e10d4b7
--- /dev/null
+++ b/src/elevenlabs/types/allowlist_item.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AllowlistItem(UncheckedBaseModel):
+ hostname: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/array_json_schema_property.py b/src/elevenlabs/types/array_json_schema_property.py
new file mode 100644
index 00000000..bc694678
--- /dev/null
+++ b/src/elevenlabs/types/array_json_schema_property.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ArrayJsonSchemaProperty(UncheckedBaseModel):
+ type: typing.Optional[typing.Literal["array"]] = None
+ items: "ArrayJsonSchemaPropertyItems"
+ description: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .object_json_schema_property import ObjectJsonSchemaProperty # noqa: E402
+from .array_json_schema_property_items import ArrayJsonSchemaPropertyItems # noqa: E402
+
+update_forward_refs(ObjectJsonSchemaProperty, ArrayJsonSchemaProperty=ArrayJsonSchemaProperty)
+update_forward_refs(ArrayJsonSchemaProperty)
diff --git a/src/elevenlabs/types/array_json_schema_property_items.py b/src/elevenlabs/types/array_json_schema_property_items.py
new file mode 100644
index 00000000..ed27a106
--- /dev/null
+++ b/src/elevenlabs/types/array_json_schema_property_items.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+import typing
+
+if typing.TYPE_CHECKING:
+ from .object_json_schema_property import ObjectJsonSchemaProperty
+ from .array_json_schema_property import ArrayJsonSchemaProperty
+ArrayJsonSchemaPropertyItems = typing.Union[
+ LiteralJsonSchemaProperty, "ObjectJsonSchemaProperty", "ArrayJsonSchemaProperty"
+]
diff --git a/src/elevenlabs/types/asr_conversational_config.py b/src/elevenlabs/types/asr_conversational_config.py
new file mode 100644
index 00000000..125c8335
--- /dev/null
+++ b/src/elevenlabs/types/asr_conversational_config.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .asr_quality import AsrQuality
+from .asr_provider import AsrProvider
+from .asr_input_format import AsrInputFormat
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AsrConversationalConfig(UncheckedBaseModel):
+ quality: typing.Optional[AsrQuality] = None
+ provider: typing.Optional[AsrProvider] = None
+ user_input_audio_format: typing.Optional[AsrInputFormat] = None
+ keywords: typing.Optional[typing.List[str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/asr_input_format.py b/src/elevenlabs/types/asr_input_format.py
new file mode 100644
index 00000000..5d0623d4
--- /dev/null
+++ b/src/elevenlabs/types/asr_input_format.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrInputFormat = typing.Union[
+ typing.Literal["pcm_16000", "pcm_22050", "pcm_24000", "pcm_44100", "ulaw_8000"], typing.Any
+]
diff --git a/src/elevenlabs/types/source.py b/src/elevenlabs/types/asr_provider.py
similarity index 55%
rename from src/elevenlabs/types/source.py
rename to src/elevenlabs/types/asr_provider.py
index d5e1b55e..af99d4a7 100644
--- a/src/elevenlabs/types/source.py
+++ b/src/elevenlabs/types/asr_provider.py
@@ -2,4 +2,4 @@
import typing
-Source = typing.Union[typing.Literal["TTS", "STS"], typing.Any]
+AsrProvider = typing.Literal["elevenlabs"]
diff --git a/src/elevenlabs/types/history.py b/src/elevenlabs/types/asr_quality.py
similarity index 67%
rename from src/elevenlabs/types/history.py
rename to src/elevenlabs/types/asr_quality.py
index b9532340..b0f39063 100644
--- a/src/elevenlabs/types/history.py
+++ b/src/elevenlabs/types/asr_quality.py
@@ -2,4 +2,4 @@
import typing
-History = typing.Optional[typing.Any]
+AsrQuality = typing.Literal["high"]
diff --git a/src/elevenlabs/types/audio_output.py b/src/elevenlabs/types/audio_output.py
deleted file mode 100644
index ac0c7c13..00000000
--- a/src/elevenlabs/types/audio_output.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-import typing_extensions
-from ..core.serialization import FieldMetadata
-from .normalized_alignment import NormalizedAlignment
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class AudioOutput(UncheckedBaseModel):
- audio: typing.Optional[str] = pydantic.Field(default=None)
- """
- A generated partial audio chunk, encoded using the selected output_format, by default this
- is MP3 encoded as a base64 string.
- """
-
- is_final: typing_extensions.Annotated[typing.Optional[bool], FieldMetadata(alias="isFinal")] = pydantic.Field(
- default=None
- )
- """
- Indicates if the generation is complete. If set to `True`, `audio` will be null.
- """
-
- normalized_alignment: typing_extensions.Annotated[
- typing.Optional[NormalizedAlignment], FieldMetadata(alias="normalizedAlignment")
- ] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/auth_settings.py b/src/elevenlabs/types/auth_settings.py
new file mode 100644
index 00000000..f673dd81
--- /dev/null
+++ b/src/elevenlabs/types/auth_settings.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .allowlist_item import AllowlistItem
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AuthSettings(UncheckedBaseModel):
+ enable_auth: typing.Optional[bool] = None
+ allowlist: typing.Optional[typing.List[AllowlistItem]] = None
+ shareable_token: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/authorization_method.py b/src/elevenlabs/types/authorization_method.py
new file mode 100644
index 00000000..7605e0df
--- /dev/null
+++ b/src/elevenlabs/types/authorization_method.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AuthorizationMethod = typing.Union[
+ typing.Literal["public", "authorization_header", "signed_url", "shareable_link"], typing.Any
+]
diff --git a/src/elevenlabs/types/ban_reason_type.py b/src/elevenlabs/types/ban_reason_type.py
new file mode 100644
index 00000000..81accd21
--- /dev/null
+++ b/src/elevenlabs/types/ban_reason_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BanReasonType = typing.Union[typing.Literal["safety", "manual"], typing.Any]
diff --git a/src/elevenlabs/types/breakdown_types.py b/src/elevenlabs/types/breakdown_types.py
new file mode 100644
index 00000000..cc299406
--- /dev/null
+++ b/src/elevenlabs/types/breakdown_types.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BreakdownTypes = typing.Union[
+ typing.Literal["none", "voice", "user", "api_keys", "all_api_keys", "product_type", "model", "resource"], typing.Any
+]
diff --git a/src/elevenlabs/types/category.py b/src/elevenlabs/types/category.py
deleted file mode 100644
index eca957fd..00000000
--- a/src/elevenlabs/types/category.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-Category = typing.Union[typing.Literal["generated", "professional", "high_quality", "famous"], typing.Any]
diff --git a/src/elevenlabs/types/chapter_response.py b/src/elevenlabs/types/chapter_response.py
index 1d3b652b..192804d6 100644
--- a/src/elevenlabs/types/chapter_response.py
+++ b/src/elevenlabs/types/chapter_response.py
@@ -1,21 +1,22 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
from .chapter_state import ChapterState
from .chapter_statistics_response import ChapterStatisticsResponse
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
import pydantic
class ChapterResponse(UncheckedBaseModel):
chapter_id: str
name: str
- last_conversion_date_unix: int
- conversion_progress: float
+ last_conversion_date_unix: typing.Optional[int] = None
+ conversion_progress: typing.Optional[float] = None
can_be_downloaded: bool
state: ChapterState
- statistics: ChapterStatisticsResponse
+ statistics: typing.Optional[ChapterStatisticsResponse] = None
+ last_conversion_error: typing.Optional[str] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/client_event.py b/src/elevenlabs/types/client_event.py
new file mode 100644
index 00000000..5152c634
--- /dev/null
+++ b/src/elevenlabs/types/client_event.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ClientEvent = typing.Union[
+ typing.Literal[
+ "conversation_initiation_metadata",
+ "asr_initiation_metadata",
+ "ping",
+ "audio",
+ "interruption",
+ "user_transcript",
+ "agent_response",
+ "agent_response_correction",
+ "client_tool_call",
+ "internal_vad_score",
+ "internal_turn_probability",
+ "internal_tentative_agent_response",
+ ],
+ typing.Any,
+]
diff --git a/src/elevenlabs/types/client_tool_config.py b/src/elevenlabs/types/client_tool_config.py
new file mode 100644
index 00000000..72762e08
--- /dev/null
+++ b/src/elevenlabs/types/client_tool_config.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ClientToolConfig(UncheckedBaseModel):
+ """
+ A client tool is one that sends an event to the user's client to trigger something client side
+ """
+
+ name: str
+ description: str
+ parameters: typing.Optional[ObjectJsonSchemaProperty] = None
+ expects_response: typing.Optional[bool] = None
+ response_timeout_secs: typing.Optional[int] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, ClientToolConfig=ClientToolConfig)
+update_forward_refs(ObjectJsonSchemaProperty, ClientToolConfig=ClientToolConfig)
diff --git a/src/elevenlabs/types/conv_ai_new_secret_config.py b/src/elevenlabs/types/conv_ai_new_secret_config.py
new file mode 100644
index 00000000..4276a25e
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_new_secret_config.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConvAiNewSecretConfig(UncheckedBaseModel):
+ name: str
+ value: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/close_connection.py b/src/elevenlabs/types/conv_ai_secret_locator.py
similarity index 78%
rename from src/elevenlabs/types/close_connection.py
rename to src/elevenlabs/types/conv_ai_secret_locator.py
index 42a428f2..9aa49a80 100644
--- a/src/elevenlabs/types/close_connection.py
+++ b/src/elevenlabs/types/conv_ai_secret_locator.py
@@ -1,17 +1,18 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-class CloseConnection(UncheckedBaseModel):
- text: typing.Literal[""] = pydantic.Field(default="")
+class ConvAiSecretLocator(UncheckedBaseModel):
"""
- End the stream with an empty string
+ Used to reference a secret from the agent's secret store.
"""
+ secret_id: str
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/elevenlabs/types/conv_ai_stored_secret_config.py b/src/elevenlabs/types/conv_ai_stored_secret_config.py
new file mode 100644
index 00000000..316c978d
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_stored_secret_config.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConvAiStoredSecretConfig(UncheckedBaseModel):
+ secret_id: str
+ name: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_charging_common_model.py b/src/elevenlabs/types/conversation_charging_common_model.py
new file mode 100644
index 00000000..cfbf5468
--- /dev/null
+++ b/src/elevenlabs/types/conversation_charging_common_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationChargingCommonModel(UncheckedBaseModel):
+ dev_discount: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_config.py b/src/elevenlabs/types/conversation_config.py
new file mode 100644
index 00000000..d0e80ee1
--- /dev/null
+++ b/src/elevenlabs/types/conversation_config.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .client_event import ClientEvent
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationConfig(UncheckedBaseModel):
+ max_duration_seconds: typing.Optional[int] = None
+ client_events: typing.Optional[typing.List[ClientEvent]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_config_client_override.py b/src/elevenlabs/types/conversation_config_client_override.py
new file mode 100644
index 00000000..d8a89f24
--- /dev/null
+++ b/src/elevenlabs/types/conversation_config_client_override.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .agent_config_override import AgentConfigOverride
+from .tts_conversational_config_override import TtsConversationalConfigOverride
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationConfigClientOverride(UncheckedBaseModel):
+ agent: typing.Optional[AgentConfigOverride] = None
+ tts: typing.Optional[TtsConversationalConfigOverride] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_config_client_override_config.py b/src/elevenlabs/types/conversation_config_client_override_config.py
new file mode 100644
index 00000000..69071e68
--- /dev/null
+++ b/src/elevenlabs/types/conversation_config_client_override_config.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .agent_config_override_config import AgentConfigOverrideConfig
+from .tts_conversational_config_override_config import TtsConversationalConfigOverrideConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationConfigClientOverrideConfig(UncheckedBaseModel):
+ agent: typing.Optional[AgentConfigOverrideConfig] = None
+ tts: typing.Optional[TtsConversationalConfigOverrideConfig] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_analysis_common_model.py b/src/elevenlabs/types/conversation_history_analysis_common_model.py
new file mode 100644
index 00000000..cfbbe147
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_analysis_common_model.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_history_evaluation_criteria_result_common_model import (
+ ConversationHistoryEvaluationCriteriaResultCommonModel,
+)
+from .data_collection_result_common_model import DataCollectionResultCommonModel
+from .evaluation_success_result import EvaluationSuccessResult
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationHistoryAnalysisCommonModel(UncheckedBaseModel):
+ evaluation_criteria_results: typing.Optional[
+ typing.Dict[str, ConversationHistoryEvaluationCriteriaResultCommonModel]
+ ] = None
+ data_collection_results: typing.Optional[typing.Dict[str, DataCollectionResultCommonModel]] = None
+ call_successful: EvaluationSuccessResult
+ transcript_summary: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py b/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py
new file mode 100644
index 00000000..af659a80
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluation_success_result import EvaluationSuccessResult
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationHistoryEvaluationCriteriaResultCommonModel(UncheckedBaseModel):
+ criteria_id: str
+ result: EvaluationSuccessResult
+ rationale: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_feedback_common_model.py b/src/elevenlabs/types/conversation_history_feedback_common_model.py
new file mode 100644
index 00000000..7b265e3c
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_feedback_common_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .user_feedback_score import UserFeedbackScore
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationHistoryFeedbackCommonModel(UncheckedBaseModel):
+ overall_score: typing.Optional[UserFeedbackScore] = None
+ likes: typing.Optional[int] = None
+ dislikes: typing.Optional[int] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_metadata_common_model.py b/src/elevenlabs/types/conversation_history_metadata_common_model.py
new file mode 100644
index 00000000..7db13c3c
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_metadata_common_model.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_history_feedback_common_model import ConversationHistoryFeedbackCommonModel
+from .authorization_method import AuthorizationMethod
+from .conversation_charging_common_model import ConversationChargingCommonModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationHistoryMetadataCommonModel(UncheckedBaseModel):
+ start_time_unix_secs: int
+ call_duration_secs: int
+ cost: typing.Optional[int] = None
+ feedback: typing.Optional[ConversationHistoryFeedbackCommonModel] = None
+ authorization_method: typing.Optional[AuthorizationMethod] = None
+ charging: typing.Optional[ConversationChargingCommonModel] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_transcript_common_model.py b/src/elevenlabs/types/conversation_history_transcript_common_model.py
new file mode 100644
index 00000000..b391ceb8
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_common_model.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .conversation_history_transcript_common_model_role import ConversationHistoryTranscriptCommonModelRole
+import typing
+from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel
+from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel
+from .user_feedback import UserFeedback
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationHistoryTranscriptCommonModel(UncheckedBaseModel):
+ role: ConversationHistoryTranscriptCommonModelRole
+ message: typing.Optional[str] = None
+ tool_calls: typing.Optional[typing.List[ConversationHistoryTranscriptToolCallCommonModel]] = None
+ tool_results: typing.Optional[typing.List[ConversationHistoryTranscriptToolResultCommonModel]] = None
+ feedback: typing.Optional[UserFeedback] = None
+ time_in_call_secs: int
+ conversation_turn_metrics: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_transcript_common_model_role.py b/src/elevenlabs/types/conversation_history_transcript_common_model_role.py
new file mode 100644
index 00000000..1964c6f2
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_common_model_role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ConversationHistoryTranscriptCommonModelRole = typing.Union[typing.Literal["user", "agent"], typing.Any]
diff --git a/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py b/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py
new file mode 100644
index 00000000..1afe0502
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationHistoryTranscriptToolCallCommonModel(UncheckedBaseModel):
+ request_id: str
+ tool_name: str
+ params_as_json: str
+ tool_has_been_called: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py b/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py
new file mode 100644
index 00000000..15ef7ff3
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationHistoryTranscriptToolResultCommonModel(UncheckedBaseModel):
+ request_id: str
+ tool_name: str
+ result_value: str
+ is_error: bool
+ tool_has_been_called: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_initiation_client_data.py b/src/elevenlabs/types/conversation_initiation_client_data.py
new file mode 100644
index 00000000..f98379ec
--- /dev/null
+++ b/src/elevenlabs/types/conversation_initiation_client_data.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_config_client_override import ConversationConfigClientOverride
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationInitiationClientData(UncheckedBaseModel):
+ conversation_config_override: typing.Optional[ConversationConfigClientOverride] = None
+ custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_initiation_client_data_config.py b/src/elevenlabs/types/conversation_initiation_client_data_config.py
new file mode 100644
index 00000000..a92f1e28
--- /dev/null
+++ b/src/elevenlabs/types/conversation_initiation_client_data_config.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_config_client_override_config import ConversationConfigClientOverrideConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationInitiationClientDataConfig(UncheckedBaseModel):
+ conversation_config_override: typing.Optional[ConversationConfigClientOverrideConfig] = None
+ custom_llm_extra_body: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_signed_url_response_model.py b/src/elevenlabs/types/conversation_signed_url_response_model.py
new file mode 100644
index 00000000..b38e5f87
--- /dev/null
+++ b/src/elevenlabs/types/conversation_signed_url_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationSignedUrlResponseModel(UncheckedBaseModel):
+ signed_url: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_summary_response_model.py b/src/elevenlabs/types/conversation_summary_response_model.py
new file mode 100644
index 00000000..19106299
--- /dev/null
+++ b/src/elevenlabs/types/conversation_summary_response_model.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus
+from .evaluation_success_result import EvaluationSuccessResult
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationSummaryResponseModel(UncheckedBaseModel):
+ agent_id: str
+ agent_name: typing.Optional[str] = None
+ conversation_id: str
+ start_time_unix_secs: int
+ call_duration_secs: int
+ message_count: int
+ status: ConversationSummaryResponseModelStatus
+ call_successful: EvaluationSuccessResult
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_summary_response_model_status.py b/src/elevenlabs/types/conversation_summary_response_model_status.py
new file mode 100644
index 00000000..4baceca3
--- /dev/null
+++ b/src/elevenlabs/types/conversation_summary_response_model_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ConversationSummaryResponseModelStatus = typing.Union[typing.Literal["processing", "done"], typing.Any]
diff --git a/src/elevenlabs/types/conversation_token_db_model.py b/src/elevenlabs/types/conversation_token_db_model.py
new file mode 100644
index 00000000..9107ab95
--- /dev/null
+++ b/src/elevenlabs/types/conversation_token_db_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_token_purpose import ConversationTokenPurpose
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationTokenDbModel(UncheckedBaseModel):
+ agent_id: str
+ conversation_token: str
+ expiration_time_unix_secs: typing.Optional[int] = None
+ purpose: typing.Optional[ConversationTokenPurpose] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_token_purpose.py b/src/elevenlabs/types/conversation_token_purpose.py
new file mode 100644
index 00000000..bfaccef4
--- /dev/null
+++ b/src/elevenlabs/types/conversation_token_purpose.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ConversationTokenPurpose = typing.Union[typing.Literal["signed_url", "shareable_link"], typing.Any]
diff --git a/src/elevenlabs/types/conversational_config.py b/src/elevenlabs/types/conversational_config.py
new file mode 100644
index 00000000..0fa91dc0
--- /dev/null
+++ b/src/elevenlabs/types/conversational_config.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .agent_config import AgentConfig
+from .asr_conversational_config import AsrConversationalConfig
+from .turn_config import TurnConfig
+from .tts_conversational_config import TtsConversationalConfig
+from .conversation_config import ConversationConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ConversationalConfig(UncheckedBaseModel):
+ agent: typing.Optional[AgentConfig] = None
+ asr: typing.Optional[AsrConversationalConfig] = None
+ turn: typing.Optional[TurnConfig] = None
+ tts: typing.Optional[TtsConversationalConfig] = None
+ conversation: typing.Optional[ConversationConfig] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, ConversationalConfig=ConversationalConfig)
+update_forward_refs(ObjectJsonSchemaProperty, ConversationalConfig=ConversationalConfig)
diff --git a/src/elevenlabs/types/create_agent_response_model.py b/src/elevenlabs/types/create_agent_response_model.py
new file mode 100644
index 00000000..48aede97
--- /dev/null
+++ b/src/elevenlabs/types/create_agent_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class CreateAgentResponseModel(UncheckedBaseModel):
+ agent_id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/create_phone_number_response_model.py b/src/elevenlabs/types/create_phone_number_response_model.py
new file mode 100644
index 00000000..86642902
--- /dev/null
+++ b/src/elevenlabs/types/create_phone_number_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class CreatePhoneNumberResponseModel(UncheckedBaseModel):
+ phone_number_id: str = pydantic.Field()
+ """
+ Phone entity id
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/custom_llm.py b/src/elevenlabs/types/custom_llm.py
new file mode 100644
index 00000000..5c4a570b
--- /dev/null
+++ b/src/elevenlabs/types/custom_llm.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conv_ai_secret_locator import ConvAiSecretLocator
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class CustomLlm(UncheckedBaseModel):
+ url: str
+ model_id: typing.Optional[str] = None
+ api_key: typing.Optional[ConvAiSecretLocator] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/data_collection_result_common_model.py b/src/elevenlabs/types/data_collection_result_common_model.py
new file mode 100644
index 00000000..1c4856ba
--- /dev/null
+++ b/src/elevenlabs/types/data_collection_result_common_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class DataCollectionResultCommonModel(UncheckedBaseModel):
+ data_collection_id: str
+ value: typing.Optional[typing.Optional[typing.Any]] = None
+ json_schema: typing.Optional[LiteralJsonSchemaProperty] = None
+ rationale: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/embed_config.py b/src/elevenlabs/types/embed_config.py
new file mode 100644
index 00000000..32fa8bd5
--- /dev/null
+++ b/src/elevenlabs/types/embed_config.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .embed_variant import EmbedVariant
+from .embed_config_avatar import EmbedConfigAvatar
+from .widget_feedback_mode import WidgetFeedbackMode
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class EmbedConfig(UncheckedBaseModel):
+ variant: typing.Optional[EmbedVariant] = None
+ avatar: typing.Optional[EmbedConfigAvatar] = None
+ feedback_mode: typing.Optional[WidgetFeedbackMode] = None
+ custom_avatar_path: typing.Optional[str] = None
+ bg_color: typing.Optional[str] = None
+ text_color: typing.Optional[str] = None
+ btn_color: typing.Optional[str] = None
+ btn_text_color: typing.Optional[str] = None
+ border_color: typing.Optional[str] = None
+ focus_color: typing.Optional[str] = None
+ border_radius: typing.Optional[int] = None
+ btn_radius: typing.Optional[int] = None
+ action_text: typing.Optional[str] = None
+ start_call_text: typing.Optional[str] = None
+ end_call_text: typing.Optional[str] = None
+ expand_text: typing.Optional[str] = None
+ listening_text: typing.Optional[str] = None
+ speaking_text: typing.Optional[str] = None
+ shareable_page_text: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/embed_config_avatar.py b/src/elevenlabs/types/embed_config_avatar.py
new file mode 100644
index 00000000..13699ead
--- /dev/null
+++ b/src/elevenlabs/types/embed_config_avatar.py
@@ -0,0 +1,58 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ..core.unchecked_base_model import UnionMetadata
+
+
+class EmbedConfigAvatar_Orb(UncheckedBaseModel):
+ type: typing.Literal["orb"] = "orb"
+ color_1: typing.Optional[str] = None
+ color_2: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class EmbedConfigAvatar_Url(UncheckedBaseModel):
+ type: typing.Literal["url"] = "url"
+ custom_url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class EmbedConfigAvatar_Image(UncheckedBaseModel):
+ type: typing.Literal["image"] = "image"
+ url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+EmbedConfigAvatar = typing_extensions.Annotated[
+ typing.Union[EmbedConfigAvatar_Orb, EmbedConfigAvatar_Url, EmbedConfigAvatar_Image],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/types/embed_variant.py b/src/elevenlabs/types/embed_variant.py
new file mode 100644
index 00000000..3ad72931
--- /dev/null
+++ b/src/elevenlabs/types/embed_variant.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EmbedVariant = typing.Union[typing.Literal["compact", "full", "expandable"], typing.Any]
diff --git a/src/elevenlabs/types/evaluation_settings.py b/src/elevenlabs/types/evaluation_settings.py
new file mode 100644
index 00000000..ed0dd534
--- /dev/null
+++ b/src/elevenlabs/types/evaluation_settings.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .prompt_evaluation_criteria import PromptEvaluationCriteria
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class EvaluationSettings(UncheckedBaseModel):
+ """
+ Settings to evaluate an agent's performance.
+ Agents are evaluated against a set of criteria, with success being defined as meeting some combination of those criteria.
+ """
+
+ criteria: typing.Optional[typing.List[PromptEvaluationCriteria]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/evaluation_success_result.py b/src/elevenlabs/types/evaluation_success_result.py
new file mode 100644
index 00000000..3d18d896
--- /dev/null
+++ b/src/elevenlabs/types/evaluation_success_result.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EvaluationSuccessResult = typing.Union[typing.Literal["success", "failure", "unknown"], typing.Any]
diff --git a/src/elevenlabs/types/extended_subscription_response_model_currency.py b/src/elevenlabs/types/extended_subscription_response_model_currency.py
new file mode 100644
index 00000000..3f566794
--- /dev/null
+++ b/src/elevenlabs/types/extended_subscription_response_model_currency.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ExtendedSubscriptionResponseModelCurrency = typing.Union[typing.Literal["usd", "eur"], typing.Any]
diff --git a/src/elevenlabs/types/fine_tuning_response.py b/src/elevenlabs/types/fine_tuning_response.py
index 4bb159b8..398fb73c 100644
--- a/src/elevenlabs/types/fine_tuning_response.py
+++ b/src/elevenlabs/types/fine_tuning_response.py
@@ -22,6 +22,8 @@ class FineTuningResponse(UncheckedBaseModel):
verification_attempts: typing.Optional[typing.List[VerificationAttemptResponse]] = None
slice_ids: typing.Optional[typing.List[str]] = None
manual_verification: typing.Optional[ManualVerificationResponse] = None
+ max_verification_attempts: typing.Optional[int] = None
+ next_max_verification_attempts_reset_unix_ms: typing.Optional[int] = None
finetuning_state: typing.Optional[typing.Optional[typing.Any]] = None
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/generation_config.py b/src/elevenlabs/types/generation_config.py
deleted file mode 100644
index 8d5491a5..00000000
--- a/src/elevenlabs/types/generation_config.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class GenerationConfig(UncheckedBaseModel):
- chunk_length_schedule: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
- """
- This is an advanced setting that most users shouldn't need to use. It relates to our
- generation schedule explained [here](https://elevenlabs.io/docs/api-reference/websockets#understanding-how-our-websockets-buffer-text).
-
- Determines the minimum amount of text that needs to be sent and present in our
- buffer before audio starts being generated. This is to maximise the amount of context available to
- the model to improve audio quality, whilst balancing latency of the returned audio chunks.
-
- The default value is: [120, 160, 250, 290].
-
- This means that the first chunk of audio will not be generated until you send text that
- totals at least 120 characters long. The next chunk of audio will only be generated once a
- further 160 characters have been sent. The third audio chunk will be generated after the
- next 250 characters. Then the fourth, and beyond, will be generated in sets of at least 290 characters.
-
- Customize this array to suit your needs. If you want to generate audio more frequently
- to optimise latency, you can reduce the values in the array. Note that setting the values
- too low may result in lower quality audio. Please test and adjust as needed.
-
- Each item should be in the range 50-500.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_agent_embed_response_model.py b/src/elevenlabs/types/get_agent_embed_response_model.py
new file mode 100644
index 00000000..760be2fc
--- /dev/null
+++ b/src/elevenlabs/types/get_agent_embed_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .embed_config import EmbedConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class GetAgentEmbedResponseModel(UncheckedBaseModel):
+ agent_id: str
+ widget_config: EmbedConfig
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_agent_link_response_model.py b/src/elevenlabs/types/get_agent_link_response_model.py
new file mode 100644
index 00000000..95789179
--- /dev/null
+++ b/src/elevenlabs/types/get_agent_link_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_token_db_model import ConversationTokenDbModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetAgentLinkResponseModel(UncheckedBaseModel):
+ agent_id: str
+ token: typing.Optional[ConversationTokenDbModel] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_agent_response_model.py b/src/elevenlabs/types/get_agent_response_model.py
new file mode 100644
index 00000000..b23ea801
--- /dev/null
+++ b/src/elevenlabs/types/get_agent_response_model.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+from .conversational_config import ConversationalConfig
+from .agent_metadata_response_model import AgentMetadataResponseModel
+import typing
+from .agent_platform_settings import AgentPlatformSettings
+from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class GetAgentResponseModel(UncheckedBaseModel):
+ agent_id: str
+ name: str
+ conversation_config: ConversationalConfig
+ metadata: AgentMetadataResponseModel
+ platform_settings: typing.Optional[AgentPlatformSettings] = None
+ secrets: typing.List[ConvAiStoredSecretConfig]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, GetAgentResponseModel=GetAgentResponseModel)
+update_forward_refs(ObjectJsonSchemaProperty, GetAgentResponseModel=GetAgentResponseModel)
diff --git a/src/elevenlabs/types/get_agents_page_response_model.py b/src/elevenlabs/types/get_agents_page_response_model.py
new file mode 100644
index 00000000..5170a9ec
--- /dev/null
+++ b/src/elevenlabs/types/get_agents_page_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .agent_summary_response_model import AgentSummaryResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetAgentsPageResponseModel(UncheckedBaseModel):
+ agents: typing.List[AgentSummaryResponseModel]
+ next_cursor: typing.Optional[str] = None
+ has_more: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_conversation_response_model.py b/src/elevenlabs/types/get_conversation_response_model.py
new file mode 100644
index 00000000..1dc49d50
--- /dev/null
+++ b/src/elevenlabs/types/get_conversation_response_model.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .get_conversation_response_model_status import GetConversationResponseModelStatus
+import typing
+from .conversation_history_transcript_common_model import ConversationHistoryTranscriptCommonModel
+from .conversation_history_metadata_common_model import ConversationHistoryMetadataCommonModel
+from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel
+from .conversation_initiation_client_data import ConversationInitiationClientData
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetConversationResponseModel(UncheckedBaseModel):
+ agent_id: str
+ conversation_id: str
+ status: GetConversationResponseModelStatus
+ transcript: typing.List[ConversationHistoryTranscriptCommonModel]
+ metadata: ConversationHistoryMetadataCommonModel
+ analysis: typing.Optional[ConversationHistoryAnalysisCommonModel] = None
+ conversation_initiation_client_data: typing.Optional[ConversationInitiationClientData] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_conversation_response_model_status.py b/src/elevenlabs/types/get_conversation_response_model_status.py
new file mode 100644
index 00000000..e104d5c0
--- /dev/null
+++ b/src/elevenlabs/types/get_conversation_response_model_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+GetConversationResponseModelStatus = typing.Union[typing.Literal["processing", "done"], typing.Any]
diff --git a/src/elevenlabs/types/get_conversations_page_response_model.py b/src/elevenlabs/types/get_conversations_page_response_model.py
new file mode 100644
index 00000000..4deefb52
--- /dev/null
+++ b/src/elevenlabs/types/get_conversations_page_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_summary_response_model import ConversationSummaryResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetConversationsPageResponseModel(UncheckedBaseModel):
+ conversations: typing.List[ConversationSummaryResponseModel]
+ next_cursor: typing.Optional[str] = None
+ has_more: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_knowledge_base_reponse_model.py b/src/elevenlabs/types/get_knowledge_base_reponse_model.py
new file mode 100644
index 00000000..2390d765
--- /dev/null
+++ b/src/elevenlabs/types/get_knowledge_base_reponse_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .get_knowledge_base_reponse_model_type import GetKnowledgeBaseReponseModelType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class GetKnowledgeBaseReponseModel(UncheckedBaseModel):
+ id: str
+ type: GetKnowledgeBaseReponseModelType
+ extracted_inner_html: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py b/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py
new file mode 100644
index 00000000..d8904ba3
--- /dev/null
+++ b/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+GetKnowledgeBaseReponseModelType = typing.Union[typing.Literal["file", "url"], typing.Any]
diff --git a/src/elevenlabs/types/get_phone_number_response_model.py b/src/elevenlabs/types/get_phone_number_response_model.py
new file mode 100644
index 00000000..3917b8b3
--- /dev/null
+++ b/src/elevenlabs/types/get_phone_number_response_model.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from .telephony_provider import TelephonyProvider
+import typing
+from .phone_number_agent_info import PhoneNumberAgentInfo
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class GetPhoneNumberResponseModel(UncheckedBaseModel):
+ phone_number: str = pydantic.Field()
+ """
+ Phone number
+ """
+
+ provider: TelephonyProvider = pydantic.Field(default="twilio")
+ """
+ Phone provider
+ """
+
+ label: str = pydantic.Field()
+ """
+ Label for the phone number
+ """
+
+ phone_number_id: str
+ assigned_agent: typing.Optional[PhoneNumberAgentInfo] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/image_avatar.py b/src/elevenlabs/types/image_avatar.py
new file mode 100644
index 00000000..5b5fed9c
--- /dev/null
+++ b/src/elevenlabs/types/image_avatar.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ImageAvatar(UncheckedBaseModel):
+ url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/initialize_connection.py b/src/elevenlabs/types/initialize_connection.py
deleted file mode 100644
index efc1d88f..00000000
--- a/src/elevenlabs/types/initialize_connection.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from .realtime_voice_settings import RealtimeVoiceSettings
-from .generation_config import GenerationConfig
-import typing_extensions
-from ..core.serialization import FieldMetadata
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class InitializeConnection(UncheckedBaseModel):
- text: typing.Literal[" "] = pydantic.Field(default=" ")
- """
- The initial text that must be sent is a blank space.
- """
-
- voice_settings: typing.Optional[RealtimeVoiceSettings] = None
- generation_config: typing.Optional[GenerationConfig] = pydantic.Field(default=None)
- """
- This property should only be provided in the first message you send.
- """
-
- xi_api_key: typing_extensions.Annotated[str, FieldMetadata(alias="xi-api-key")] = pydantic.Field()
- """
- Your ElevenLabs API key. This is a required parameter that should be provided in the first message you send.
- You can find your API key in the [API Keys section](https://elevenlabs.io/docs/api-reference/websockets#api-keys).
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/knowledge_base_locator.py b/src/elevenlabs/types/knowledge_base_locator.py
new file mode 100644
index 00000000..95aa389c
--- /dev/null
+++ b/src/elevenlabs/types/knowledge_base_locator.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .knowledge_base_locator_type import KnowledgeBaseLocatorType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class KnowledgeBaseLocator(UncheckedBaseModel):
+ type: KnowledgeBaseLocatorType
+ name: str
+ id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/knowledge_base_locator_type.py b/src/elevenlabs/types/knowledge_base_locator_type.py
new file mode 100644
index 00000000..074d02b7
--- /dev/null
+++ b/src/elevenlabs/types/knowledge_base_locator_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+KnowledgeBaseLocatorType = typing.Union[typing.Literal["file", "url"], typing.Any]
diff --git a/src/elevenlabs/types/library_voice_response.py b/src/elevenlabs/types/library_voice_response.py
index bba76f22..84de7efb 100644
--- a/src/elevenlabs/types/library_voice_response.py
+++ b/src/elevenlabs/types/library_voice_response.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .library_voice_response_model_category import LibraryVoiceResponseModelCategory
import typing_extensions
from ..core.serialization import FieldMetadata
import typing
@@ -18,7 +19,7 @@ class LibraryVoiceResponse(UncheckedBaseModel):
age: str
descriptive: str
use_case: str
- category: str
+ category: LibraryVoiceResponseModelCategory
language: str
description: str
preview_url: str
diff --git a/src/elevenlabs/types/library_voice_response_model_category.py b/src/elevenlabs/types/library_voice_response_model_category.py
new file mode 100644
index 00000000..7d3d40fe
--- /dev/null
+++ b/src/elevenlabs/types/library_voice_response_model_category.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LibraryVoiceResponseModelCategory = typing.Union[
+ typing.Literal["generated", "cloned", "premade", "professional", "famous", "high_quality"], typing.Any
+]
diff --git a/src/elevenlabs/types/literal_json_schema_property.py b/src/elevenlabs/types/literal_json_schema_property.py
new file mode 100644
index 00000000..76fa90fa
--- /dev/null
+++ b/src/elevenlabs/types/literal_json_schema_property.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .literal_json_schema_property_type import LiteralJsonSchemaPropertyType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class LiteralJsonSchemaProperty(UncheckedBaseModel):
+ type: LiteralJsonSchemaPropertyType
+ description: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/literal_json_schema_property_type.py b/src/elevenlabs/types/literal_json_schema_property_type.py
new file mode 100644
index 00000000..f3ddb1f4
--- /dev/null
+++ b/src/elevenlabs/types/literal_json_schema_property_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LiteralJsonSchemaPropertyType = typing.Union[typing.Literal["boolean", "string", "integer", "number"], typing.Any]
diff --git a/src/elevenlabs/types/llm.py b/src/elevenlabs/types/llm.py
new file mode 100644
index 00000000..313f9d0a
--- /dev/null
+++ b/src/elevenlabs/types/llm.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Llm = typing.Union[
+ typing.Literal[
+ "gpt-4o-mini",
+ "gpt-4o",
+ "gpt-4",
+ "gpt-4-turbo",
+ "gpt-3.5-turbo",
+ "gemini-1.5-pro",
+ "gemini-1.5-flash",
+ "gemini-1.0-pro",
+ "claude-3-5-sonnet",
+ "claude-3-haiku",
+ "grok-beta",
+ "custom-llm",
+ ],
+ typing.Any,
+]
diff --git a/src/elevenlabs/types/model.py b/src/elevenlabs/types/model.py
index 11545b16..74fb3815 100644
--- a/src/elevenlabs/types/model.py
+++ b/src/elevenlabs/types/model.py
@@ -3,6 +3,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import typing
from .language_response import LanguageResponse
+from .model_rates_response_model import ModelRatesResponseModel
+from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -23,6 +25,8 @@ class Model(UncheckedBaseModel):
max_characters_request_subscribed_user: typing.Optional[int] = None
maximum_text_length_per_request: typing.Optional[int] = None
languages: typing.Optional[typing.List[LanguageResponse]] = None
+ model_rates: typing.Optional[ModelRatesResponseModel] = None
+ concurrency_group: typing.Optional[ModelResponseModelConcurrencyGroup] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/model_rates_response_model.py b/src/elevenlabs/types/model_rates_response_model.py
new file mode 100644
index 00000000..a695d91b
--- /dev/null
+++ b/src/elevenlabs/types/model_rates_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ModelRatesResponseModel(UncheckedBaseModel):
+ character_cost_multiplier: float
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/model_response_model_concurrency_group.py b/src/elevenlabs/types/model_response_model_concurrency_group.py
new file mode 100644
index 00000000..1f25a1e0
--- /dev/null
+++ b/src/elevenlabs/types/model_response_model_concurrency_group.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ModelResponseModelConcurrencyGroup = typing.Union[typing.Literal["standard", "turbo"], typing.Any]
diff --git a/src/elevenlabs/types/moderation_status_response_model.py b/src/elevenlabs/types/moderation_status_response_model.py
new file mode 100644
index 00000000..c1055274
--- /dev/null
+++ b/src/elevenlabs/types/moderation_status_response_model.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .moderation_status_response_model_safety_status import ModerationStatusResponseModelSafetyStatus
+from .moderation_status_response_model_warning_status import ModerationStatusResponseModelWarningStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ModerationStatusResponseModel(UncheckedBaseModel):
+ is_in_probation: bool
+ enterprise_check_nogo_voice: bool
+ enterprise_check_block_nogo_voice: bool
+ never_live_moderate: bool
+ nogo_voice_similar_voice_upload_count: int
+ enterprise_background_moderation_enabled: bool
+ safety_status: ModerationStatusResponseModelSafetyStatus
+ warning_status: ModerationStatusResponseModelWarningStatus
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/moderation_status_response_model_safety_status.py b/src/elevenlabs/types/moderation_status_response_model_safety_status.py
new file mode 100644
index 00000000..23c22d37
--- /dev/null
+++ b/src/elevenlabs/types/moderation_status_response_model_safety_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ModerationStatusResponseModelSafetyStatus = typing.Union[typing.Literal["appeal_approved", "appeal_denied"], typing.Any]
diff --git a/src/elevenlabs/types/moderation_status_response_model_warning_status.py b/src/elevenlabs/types/moderation_status_response_model_warning_status.py
new file mode 100644
index 00000000..3869962f
--- /dev/null
+++ b/src/elevenlabs/types/moderation_status_response_model_warning_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ModerationStatusResponseModelWarningStatus = typing.Union[typing.Literal["warning", "warning_cleared"], typing.Any]
diff --git a/src/elevenlabs/types/normalized_alignment.py b/src/elevenlabs/types/normalized_alignment.py
deleted file mode 100644
index cac3e0c6..00000000
--- a/src/elevenlabs/types/normalized_alignment.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class NormalizedAlignment(UncheckedBaseModel):
- """
- Alignment information for the generated audio given the input normalized text sequence.
- """
-
- char_start_times_ms: typing.Optional[typing.List[int]] = pydantic.Field(default=None)
- """
- A list of starting times (in milliseconds) for each character in the normalized text as it
- corresponds to the audio. For instance, the character 'H' starts at time 0 ms in the audio.
- Note these times are relative to the returned chunk from the model, and not the
- full audio response.
- """
-
- chars_durations_ms: typing.Optional[typing.List[int]] = pydantic.Field(default=None)
- """
- A list of durations (in milliseconds) for each character in the normalized text as it
- corresponds to the audio. For instance, the character 'H' lasts for 3 ms in the audio.
- Note these times are relative to the returned chunk from the model, and not the
- full audio response.
- """
-
- chars: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- A list of characters in the normalized text sequence. For instance, the first character is 'H'.
- Note that this list may contain spaces, punctuation, and other special characters.
- The length of this list should be the same as the lengths of `char_start_times_ms` and `chars_durations_ms`.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/object_json_schema_property.py b/src/elevenlabs/types/object_json_schema_property.py
new file mode 100644
index 00000000..4ec5fd8b
--- /dev/null
+++ b/src/elevenlabs/types/object_json_schema_property.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ObjectJsonSchemaProperty(UncheckedBaseModel):
+ type: typing.Optional[typing.Literal["object"]] = None
+ properties: typing.Optional[typing.Dict[str, "ObjectJsonSchemaPropertyPropertiesValue"]] = None
+ required: typing.Optional[typing.List[str]] = None
+ description: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue # noqa: E402
+
+update_forward_refs(ObjectJsonSchemaProperty)
diff --git a/src/elevenlabs/types/object_json_schema_property_properties_value.py b/src/elevenlabs/types/object_json_schema_property_properties_value.py
new file mode 100644
index 00000000..20b89511
--- /dev/null
+++ b/src/elevenlabs/types/object_json_schema_property_properties_value.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+import typing
+
+if typing.TYPE_CHECKING:
+ from .object_json_schema_property import ObjectJsonSchemaProperty
+ from .array_json_schema_property import ArrayJsonSchemaProperty
+ObjectJsonSchemaPropertyPropertiesValue = typing.Union[
+ LiteralJsonSchemaProperty, "ObjectJsonSchemaProperty", "ArrayJsonSchemaProperty"
+]
diff --git a/src/elevenlabs/types/optimize_streaming_latency.py b/src/elevenlabs/types/optimize_streaming_latency.py
deleted file mode 100644
index 1b9a4dec..00000000
--- a/src/elevenlabs/types/optimize_streaming_latency.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-OptimizeStreamingLatency = typing.Union[typing.Literal["0", "1", "2", "3", "4"], typing.Any]
diff --git a/src/elevenlabs/types/orb_avatar.py b/src/elevenlabs/types/orb_avatar.py
new file mode 100644
index 00000000..ff39f856
--- /dev/null
+++ b/src/elevenlabs/types/orb_avatar.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class OrbAvatar(UncheckedBaseModel):
+ color_1: typing.Optional[str] = None
+ color_2: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/phone_number_agent_info.py b/src/elevenlabs/types/phone_number_agent_info.py
new file mode 100644
index 00000000..f20c291a
--- /dev/null
+++ b/src/elevenlabs/types/phone_number_agent_info.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class PhoneNumberAgentInfo(UncheckedBaseModel):
+ agent_id: str
+ agent_name: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/post_agent_avatar_response_model.py b/src/elevenlabs/types/post_agent_avatar_response_model.py
new file mode 100644
index 00000000..3b56a774
--- /dev/null
+++ b/src/elevenlabs/types/post_agent_avatar_response_model.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PostAgentAvatarResponseModel(UncheckedBaseModel):
+ agent_id: str
+ avatar_url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/privacy_config.py b/src/elevenlabs/types/privacy_config.py
new file mode 100644
index 00000000..928860b0
--- /dev/null
+++ b/src/elevenlabs/types/privacy_config.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PrivacyConfig(UncheckedBaseModel):
+ record_voice: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/project_creation_meta_response_model.py b/src/elevenlabs/types/project_creation_meta_response_model.py
new file mode 100644
index 00000000..fb16cd92
--- /dev/null
+++ b/src/elevenlabs/types/project_creation_meta_response_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .project_creation_meta_response_model_status import ProjectCreationMetaResponseModelStatus
+from .project_creation_meta_response_model_type import ProjectCreationMetaResponseModelType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ProjectCreationMetaResponseModel(UncheckedBaseModel):
+ creation_progress: float
+ status: ProjectCreationMetaResponseModelStatus
+ type: ProjectCreationMetaResponseModelType
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/project_creation_meta_response_model_status.py b/src/elevenlabs/types/project_creation_meta_response_model_status.py
new file mode 100644
index 00000000..806a9a0f
--- /dev/null
+++ b/src/elevenlabs/types/project_creation_meta_response_model_status.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectCreationMetaResponseModelStatus = typing.Union[
+ typing.Literal["pending", "creating", "finished", "failed"], typing.Any
+]
diff --git a/src/elevenlabs/types/project_creation_meta_response_model_type.py b/src/elevenlabs/types/project_creation_meta_response_model_type.py
new file mode 100644
index 00000000..e4563898
--- /dev/null
+++ b/src/elevenlabs/types/project_creation_meta_response_model_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectCreationMetaResponseModelType = typing.Union[typing.Literal["blank", "generate_podcast"], typing.Any]
diff --git a/src/elevenlabs/types/project_extended_response_model.py b/src/elevenlabs/types/project_extended_response_model.py
index 847f5d23..b40a4039 100644
--- a/src/elevenlabs/types/project_extended_response_model.py
+++ b/src/elevenlabs/types/project_extended_response_model.py
@@ -1,9 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
-from .project_state import ProjectState
import typing
+from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience
+from .project_state import ProjectState
+from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel
+from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction
+from .project_creation_meta_response_model import ProjectCreationMetaResponseModel
+from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset
from .chapter_response import ChapterResponse
+from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel
+from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -15,10 +22,31 @@ class ProjectExtendedResponseModel(UncheckedBaseModel):
default_title_voice_id: str
default_paragraph_voice_id: str
default_model_id: str
- last_conversion_date_unix: int
+ last_conversion_date_unix: typing.Optional[int] = None
can_be_downloaded: bool
+ title: typing.Optional[str] = None
+ author: typing.Optional[str] = None
+ description: typing.Optional[str] = None
+ genres: typing.Optional[typing.List[str]] = None
+ cover_image_url: typing.Optional[str] = None
+ target_audience: typing.Optional[ProjectExtendedResponseModelTargetAudience] = None
+ language: typing.Optional[str] = None
+ content_type: typing.Optional[str] = None
+ original_publication_date: typing.Optional[str] = None
+ mature_content: typing.Optional[bool] = None
+ isbn_number: typing.Optional[str] = None
+ volume_normalization: bool
state: ProjectState
+ access_level: ProjectExtendedResponseModelAccessLevel
+ fiction: typing.Optional[ProjectExtendedResponseModelFiction] = None
+ quality_check_on: bool
+ quality_check_on_when_bulk_convert: bool
+ creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = None
+ quality_preset: ProjectExtendedResponseModelQualityPreset
chapters: typing.List[ChapterResponse]
+ pronunciation_dictionary_versions: typing.List[PronunciationDictionaryVersionResponseModel]
+ apply_text_normalization: ProjectExtendedResponseModelApplyTextNormalization
+ experimental: typing.Dict[str, typing.Optional[typing.Any]]
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/project_extended_response_model_access_level.py b/src/elevenlabs/types/project_extended_response_model_access_level.py
new file mode 100644
index 00000000..53427425
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_access_level.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectExtendedResponseModelAccessLevel = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any]
diff --git a/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py b/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py
new file mode 100644
index 00000000..490a9abf
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectExtendedResponseModelApplyTextNormalization = typing.Union[
+ typing.Literal["auto", "on", "off", "apply_english"], typing.Any
+]
diff --git a/src/elevenlabs/types/project_extended_response_model_fiction.py b/src/elevenlabs/types/project_extended_response_model_fiction.py
new file mode 100644
index 00000000..0c54e149
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_fiction.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectExtendedResponseModelFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any]
diff --git a/src/elevenlabs/types/project_extended_response_model_quality_preset.py b/src/elevenlabs/types/project_extended_response_model_quality_preset.py
new file mode 100644
index 00000000..8b10a5f9
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_quality_preset.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectExtendedResponseModelQualityPreset = typing.Union[
+ typing.Literal["standard", "high", "highest", "ultra", "ultra_lossless"], typing.Any
+]
diff --git a/src/elevenlabs/types/project_extended_response_model_target_audience.py b/src/elevenlabs/types/project_extended_response_model_target_audience.py
new file mode 100644
index 00000000..5fa6dc33
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_target_audience.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectExtendedResponseModelTargetAudience = typing.Union[
+ typing.Literal["children", "young adult", "adult", "all ages"], typing.Any
+]
diff --git a/src/elevenlabs/types/project_response.py b/src/elevenlabs/types/project_response.py
index 735032d7..23936047 100644
--- a/src/elevenlabs/types/project_response.py
+++ b/src/elevenlabs/types/project_response.py
@@ -1,9 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .project_response_model_target_audience import ProjectResponseModelTargetAudience
from .project_state import ProjectState
+from .project_response_model_access_level import ProjectResponseModelAccessLevel
+from .project_response_model_fiction import ProjectResponseModelFiction
+from .project_creation_meta_response_model import ProjectCreationMetaResponseModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
import pydantic
@@ -14,13 +18,26 @@ class ProjectResponse(UncheckedBaseModel):
default_title_voice_id: str
default_paragraph_voice_id: str
default_model_id: str
- last_conversion_date_unix: int
+ last_conversion_date_unix: typing.Optional[int] = None
can_be_downloaded: bool
- title: str
- author: str
- isbn_number: str
+ title: typing.Optional[str] = None
+ author: typing.Optional[str] = None
+ description: typing.Optional[str] = None
+ genres: typing.Optional[typing.List[str]] = None
+ cover_image_url: typing.Optional[str] = None
+ target_audience: typing.Optional[ProjectResponseModelTargetAudience] = None
+ language: typing.Optional[str] = None
+ content_type: typing.Optional[str] = None
+ original_publication_date: typing.Optional[str] = None
+ mature_content: typing.Optional[bool] = None
+ isbn_number: typing.Optional[str] = None
volume_normalization: bool
state: ProjectState
+ access_level: ProjectResponseModelAccessLevel
+ fiction: typing.Optional[ProjectResponseModelFiction] = None
+ quality_check_on: bool
+ quality_check_on_when_bulk_convert: bool
+ creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/project_response_model_access_level.py b/src/elevenlabs/types/project_response_model_access_level.py
new file mode 100644
index 00000000..b5d62265
--- /dev/null
+++ b/src/elevenlabs/types/project_response_model_access_level.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectResponseModelAccessLevel = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any]
diff --git a/src/elevenlabs/types/project_response_model_fiction.py b/src/elevenlabs/types/project_response_model_fiction.py
new file mode 100644
index 00000000..04a90ca4
--- /dev/null
+++ b/src/elevenlabs/types/project_response_model_fiction.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectResponseModelFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any]
diff --git a/src/elevenlabs/types/project_response_model_target_audience.py b/src/elevenlabs/types/project_response_model_target_audience.py
new file mode 100644
index 00000000..f235fc35
--- /dev/null
+++ b/src/elevenlabs/types/project_response_model_target_audience.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectResponseModelTargetAudience = typing.Union[
+ typing.Literal["children", "young adult", "adult", "all ages"], typing.Any
+]
diff --git a/src/elevenlabs/types/project_snapshot_upload_response_model.py b/src/elevenlabs/types/project_snapshot_upload_response_model.py
index 58493d00..c19ca85c 100644
--- a/src/elevenlabs/types/project_snapshot_upload_response_model.py
+++ b/src/elevenlabs/types/project_snapshot_upload_response_model.py
@@ -1,14 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
-from .status import Status
+from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus
import typing
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
class ProjectSnapshotUploadResponseModel(UncheckedBaseModel):
- status: Status
+ status: ProjectSnapshotUploadResponseModelStatus
acx_volume_normalization: typing.Optional[bool] = None
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/project_snapshot_upload_response_model_status.py b/src/elevenlabs/types/project_snapshot_upload_response_model_status.py
new file mode 100644
index 00000000..884059e0
--- /dev/null
+++ b/src/elevenlabs/types/project_snapshot_upload_response_model_status.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectSnapshotUploadResponseModelStatus = typing.Union[
+ typing.Literal["success", "in_queue", "pending", "failed"], typing.Any
+]
diff --git a/src/elevenlabs/types/project_state.py b/src/elevenlabs/types/project_state.py
index 6e1398a5..d96580c0 100644
--- a/src/elevenlabs/types/project_state.py
+++ b/src/elevenlabs/types/project_state.py
@@ -2,4 +2,4 @@
import typing
-ProjectState = typing.Union[typing.Literal["default", "converting", "in_queue"], typing.Any]
+ProjectState = typing.Union[typing.Literal["creating", "default", "converting", "in_queue"], typing.Any]
diff --git a/src/elevenlabs/types/prompt_agent.py b/src/elevenlabs/types/prompt_agent.py
new file mode 100644
index 00000000..b3b60717
--- /dev/null
+++ b/src/elevenlabs/types/prompt_agent.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .llm import Llm
+from .prompt_agent_tools_item import PromptAgentToolsItem
+from .knowledge_base_locator import KnowledgeBaseLocator
+from .custom_llm import CustomLlm
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class PromptAgent(UncheckedBaseModel):
+ prompt: typing.Optional[str] = None
+ llm: typing.Optional[Llm] = None
+ temperature: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ tools: typing.Optional[typing.List[PromptAgentToolsItem]] = None
+ knowledge_base: typing.Optional[typing.List[KnowledgeBaseLocator]] = None
+ custom_llm: typing.Optional[CustomLlm] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, PromptAgent=PromptAgent)
+update_forward_refs(ObjectJsonSchemaProperty, PromptAgent=PromptAgent)
diff --git a/src/elevenlabs/types/prompt_agent_override.py b/src/elevenlabs/types/prompt_agent_override.py
new file mode 100644
index 00000000..2ca03954
--- /dev/null
+++ b/src/elevenlabs/types/prompt_agent_override.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PromptAgentOverride(UncheckedBaseModel):
+ prompt: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/prompt_agent_override_config.py b/src/elevenlabs/types/prompt_agent_override_config.py
new file mode 100644
index 00000000..108ee790
--- /dev/null
+++ b/src/elevenlabs/types/prompt_agent_override_config.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PromptAgentOverrideConfig(UncheckedBaseModel):
+ prompt: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/prompt_agent_tools_item.py b/src/elevenlabs/types/prompt_agent_tools_item.py
new file mode 100644
index 00000000..410df2c1
--- /dev/null
+++ b/src/elevenlabs/types/prompt_agent_tools_item.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ..core.unchecked_base_model import UnionMetadata
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class PromptAgentToolsItem_Webhook(UncheckedBaseModel):
+ type: typing.Literal["webhook"] = "webhook"
+ name: str
+ description: str
+ api_schema: WebhookToolApiSchemaConfig
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class PromptAgentToolsItem_Client(UncheckedBaseModel):
+ type: typing.Literal["client"] = "client"
+ name: str
+ description: str
+ parameters: typing.Optional[ObjectJsonSchemaProperty] = None
+ expects_response: typing.Optional[bool] = None
+ response_timeout_secs: typing.Optional[int] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+PromptAgentToolsItem = typing_extensions.Annotated[
+ typing.Union[PromptAgentToolsItem_Webhook, PromptAgentToolsItem_Client], UnionMetadata(discriminant="type")
+]
+update_forward_refs(ArrayJsonSchemaProperty, PromptAgentToolsItem_Webhook=PromptAgentToolsItem_Webhook)
+update_forward_refs(ObjectJsonSchemaProperty, PromptAgentToolsItem_Webhook=PromptAgentToolsItem_Webhook)
+update_forward_refs(ArrayJsonSchemaProperty, PromptAgentToolsItem_Client=PromptAgentToolsItem_Client)
+update_forward_refs(ObjectJsonSchemaProperty, PromptAgentToolsItem_Client=PromptAgentToolsItem_Client)
diff --git a/src/elevenlabs/types/prompt_evaluation_criteria.py b/src/elevenlabs/types/prompt_evaluation_criteria.py
new file mode 100644
index 00000000..23a56890
--- /dev/null
+++ b/src/elevenlabs/types/prompt_evaluation_criteria.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PromptEvaluationCriteria(UncheckedBaseModel):
+ """
+ An evaluation using the transcript and a prompt for a yes/no achieved answer
+ """
+
+ id: str
+ name: typing.Optional[str] = None
+ type: typing.Optional[typing.Literal["prompt"]] = None
+ conversation_goal_prompt: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/pronunciation_dictionary_version_response_model.py b/src/elevenlabs/types/pronunciation_dictionary_version_response_model.py
new file mode 100644
index 00000000..21150c87
--- /dev/null
+++ b/src/elevenlabs/types/pronunciation_dictionary_version_response_model.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class PronunciationDictionaryVersionResponseModel(UncheckedBaseModel):
+ version_id: str
+ pronunciation_dictionary_id: str
+ dictionary_name: str
+ version_name: str
+ created_by: str
+ creation_time_unix: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py b/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py
new file mode 100644
index 00000000..e967a2f7
--- /dev/null
+++ b/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class PydanticPronunciationDictionaryVersionLocator(UncheckedBaseModel):
+ """
+ A locator for other documents to be able to reference a specific dictionary and it's version.
+ This is a pydantic version of PronunciationDictionaryVersionLocatorDBModel.
+ Required to ensure compat with the rest of the agent data models.
+ """
+
+ pronunciation_dictionary_id: str
+ version_id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/query_params_json_schema.py b/src/elevenlabs/types/query_params_json_schema.py
new file mode 100644
index 00000000..0de3881a
--- /dev/null
+++ b/src/elevenlabs/types/query_params_json_schema.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class QueryParamsJsonSchema(UncheckedBaseModel):
+ properties: typing.Dict[str, LiteralJsonSchemaProperty]
+ required: typing.Optional[typing.List[str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/reader_resource_response_model.py b/src/elevenlabs/types/reader_resource_response_model.py
new file mode 100644
index 00000000..e98b7096
--- /dev/null
+++ b/src/elevenlabs/types/reader_resource_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ReaderResourceResponseModel(UncheckedBaseModel):
+ resource_type: ReaderResourceResponseModelResourceType
+ resource_id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/reader_resource_response_model_resource_type.py b/src/elevenlabs/types/reader_resource_response_model_resource_type.py
new file mode 100644
index 00000000..937d9174
--- /dev/null
+++ b/src/elevenlabs/types/reader_resource_response_model_resource_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ReaderResourceResponseModelResourceType = typing.Union[typing.Literal["read", "collection"], typing.Any]
diff --git a/src/elevenlabs/types/realtime_voice_settings.py b/src/elevenlabs/types/realtime_voice_settings.py
deleted file mode 100644
index 983efc51..00000000
--- a/src/elevenlabs/types/realtime_voice_settings.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
-import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class RealtimeVoiceSettings(UncheckedBaseModel):
- stability: float = pydantic.Field()
- """
- Defines the stability for voice settings.
- """
-
- similarity_boost: float = pydantic.Field()
- """
- Defines the similarity boost for voice settings.
- """
-
- style: typing.Optional[float] = pydantic.Field(default=None)
- """
- Defines the style for voice settings. This parameter is available on V2+ models.
- """
-
- use_speaker_boost: typing.Optional[bool] = pydantic.Field(default=None)
- """
- Defines the use speaker boost for voice settings. This parameter is available on V2+ models.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/safety.py b/src/elevenlabs/types/safety.py
new file mode 100644
index 00000000..d5980e39
--- /dev/null
+++ b/src/elevenlabs/types/safety.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .safety_evaluation import SafetyEvaluation
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class Safety(UncheckedBaseModel):
+ """
+ Safety object that has the information of safety evaluations based on used voice.
+ """
+
+ ivc: typing.Optional[SafetyEvaluation] = None
+ non_ivc: typing.Optional[SafetyEvaluation] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/safety_evaluation.py b/src/elevenlabs/types/safety_evaluation.py
new file mode 100644
index 00000000..0ac8cfc7
--- /dev/null
+++ b/src/elevenlabs/types/safety_evaluation.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .safety_rule import SafetyRule
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class SafetyEvaluation(UncheckedBaseModel):
+ """
+ Safety evaluation of the agent. Prompt and first message is taken into account.
+ The unsafe reason is provided from the evaluation
+ """
+
+ is_unsafe: typing.Optional[bool] = None
+ llm_reason: typing.Optional[str] = None
+ safety_prompt_version: typing.Optional[int] = None
+ matched_rule_id: typing.Optional[typing.List[SafetyRule]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/safety_rule.py b/src/elevenlabs/types/safety_rule.py
new file mode 100644
index 00000000..982a0945
--- /dev/null
+++ b/src/elevenlabs/types/safety_rule.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SafetyRule = typing.Union[
+ typing.Literal[
+ "sexual_minors",
+ "forget_moderation",
+ "extremism",
+ "scam_fraud",
+ "political",
+ "self_harm",
+ "illegal_distribution_medical",
+ "sexual_adults",
+ "unknown",
+ ],
+ typing.Any,
+]
diff --git a/src/elevenlabs/types/send_text.py b/src/elevenlabs/types/send_text.py
deleted file mode 100644
index e1a391e5..00000000
--- a/src/elevenlabs/types/send_text.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class SendText(UncheckedBaseModel):
- text: str
- try_trigger_generation: typing.Optional[bool] = pydantic.Field(default=None)
- """
- This is an advanced setting that most users shouldn't need to use. It relates to our generation schedule
- explained [here](#understanding-how-our-websockets-buffer-text).
-
- Use this to attempt to immediately trigger the generation of audio, overriding the `chunk_length_schedule`.
- Unlike flush, `try_trigger_generation` will only generate audio if our
- buffer contains more than a minimum
- threshold of characters, this is to ensure a higher quality response from our model.
-
- Note that overriding the chunk schedule to generate small amounts of
- text may result in lower quality audio, therefore, only use this parameter if you
- really need text to be processed immediately. We generally recommend keeping the default value of
- `false` and adjusting the `chunk_length_schedule` in the `generation_config` instead.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/speech_history_item_response.py b/src/elevenlabs/types/speech_history_item_response.py
index 21addda4..783c55d0 100644
--- a/src/elevenlabs/types/speech_history_item_response.py
+++ b/src/elevenlabs/types/speech_history_item_response.py
@@ -4,7 +4,7 @@
import typing
from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory
from .feedback_item import FeedbackItem
-from .source import Source
+from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource
from .history_alignments_response_model import HistoryAlignmentsResponseModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -26,7 +26,7 @@ class SpeechHistoryItemResponse(UncheckedBaseModel):
settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
feedback: typing.Optional[FeedbackItem] = None
share_link_id: typing.Optional[str] = None
- source: typing.Optional[Source] = None
+ source: typing.Optional[SpeechHistoryItemResponseModelSource] = None
alignments: typing.Optional[HistoryAlignmentsResponseModel] = None
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/speech_history_item_response_model_source.py b/src/elevenlabs/types/speech_history_item_response_model_source.py
new file mode 100644
index 00000000..7ed84840
--- /dev/null
+++ b/src/elevenlabs/types/speech_history_item_response_model_source.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeechHistoryItemResponseModelSource = typing.Union[typing.Literal["TTS", "STS"], typing.Any]
diff --git a/src/elevenlabs/types/sso_provider_response_model_provider_type.py b/src/elevenlabs/types/sso_provider_response_model_provider_type.py
deleted file mode 100644
index 52c8f957..00000000
--- a/src/elevenlabs/types/sso_provider_response_model_provider_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SsoProviderResponseModelProviderType = typing.Union[typing.Literal["saml", "oidc"], typing.Any]
diff --git a/src/elevenlabs/types/status.py b/src/elevenlabs/types/status.py
deleted file mode 100644
index 7023643d..00000000
--- a/src/elevenlabs/types/status.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-Status = typing.Union[typing.Literal["success", "in_queue", "pending", "failed"], typing.Any]
diff --git a/src/elevenlabs/types/subscription.py b/src/elevenlabs/types/subscription.py
index a89f632b..b480e2c4 100644
--- a/src/elevenlabs/types/subscription.py
+++ b/src/elevenlabs/types/subscription.py
@@ -2,7 +2,7 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .currency import Currency
+from .extended_subscription_response_model_currency import ExtendedSubscriptionResponseModelCurrency
from .subscription_status import SubscriptionStatus
from .extended_subscription_response_model_billing_period import ExtendedSubscriptionResponseModelBillingPeriod
from .extended_subscription_response_model_character_refresh_period import (
@@ -27,7 +27,7 @@ class Subscription(UncheckedBaseModel):
can_extend_voice_limit: bool
can_use_instant_voice_cloning: bool
can_use_professional_voice_cloning: bool
- currency: typing.Optional[Currency] = None
+ currency: typing.Optional[ExtendedSubscriptionResponseModelCurrency] = None
status: typing.Optional[SubscriptionStatus] = None
billing_period: typing.Optional[ExtendedSubscriptionResponseModelBillingPeriod] = None
character_refresh_period: typing.Optional[ExtendedSubscriptionResponseModelCharacterRefreshPeriod] = None
diff --git a/src/elevenlabs/types/subscription_response.py b/src/elevenlabs/types/subscription_response.py
index 0513a818..75bc496f 100644
--- a/src/elevenlabs/types/subscription_response.py
+++ b/src/elevenlabs/types/subscription_response.py
@@ -1,7 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
-from .currency import Currency
+from .subscription_response_model_currency import SubscriptionResponseModelCurrency
from .subscription_status import SubscriptionStatus
from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod
from .subscription_response_model_character_refresh_period import SubscriptionResponseModelCharacterRefreshPeriod
@@ -24,7 +24,7 @@ class SubscriptionResponse(UncheckedBaseModel):
can_extend_voice_limit: bool
can_use_instant_voice_cloning: bool
can_use_professional_voice_cloning: bool
- currency: Currency
+ currency: SubscriptionResponseModelCurrency
status: SubscriptionStatus
billing_period: SubscriptionResponseModelBillingPeriod
character_refresh_period: SubscriptionResponseModelCharacterRefreshPeriod
diff --git a/src/elevenlabs/types/subscription_response_model_currency.py b/src/elevenlabs/types/subscription_response_model_currency.py
new file mode 100644
index 00000000..9cba2c8d
--- /dev/null
+++ b/src/elevenlabs/types/subscription_response_model_currency.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SubscriptionResponseModelCurrency = typing.Union[typing.Literal["usd", "eur"], typing.Any]
diff --git a/src/elevenlabs/types/audio_native_get_embed_code_response_model.py b/src/elevenlabs/types/telephony_provider.py
similarity index 54%
rename from src/elevenlabs/types/audio_native_get_embed_code_response_model.py
rename to src/elevenlabs/types/telephony_provider.py
index 12c70385..a678a2ca 100644
--- a/src/elevenlabs/types/audio_native_get_embed_code_response_model.py
+++ b/src/elevenlabs/types/telephony_provider.py
@@ -2,4 +2,4 @@
import typing
-AudioNativeGetEmbedCodeResponseModel = typing.Optional[typing.Any]
+TelephonyProvider = typing.Literal["twilio"]
diff --git a/src/elevenlabs/types/tts_conversational_config.py b/src/elevenlabs/types/tts_conversational_config.py
new file mode 100644
index 00000000..3c219fb3
--- /dev/null
+++ b/src/elevenlabs/types/tts_conversational_config.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .tts_conversational_model import TtsConversationalModel
+from .tts_output_format import TtsOutputFormat
+from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency
+from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TtsConversationalConfig(UncheckedBaseModel):
+ model_id: typing.Optional[TtsConversationalModel] = None
+ voice_id: typing.Optional[str] = None
+ agent_output_audio_format: typing.Optional[TtsOutputFormat] = None
+ optimize_streaming_latency: typing.Optional[TtsOptimizeStreamingLatency] = None
+ stability: typing.Optional[float] = None
+ similarity_boost: typing.Optional[float] = None
+ pronunciation_dictionary_locators: typing.Optional[typing.List[PydanticPronunciationDictionaryVersionLocator]] = (
+ None
+ )
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/tts_conversational_config_override.py b/src/elevenlabs/types/tts_conversational_config_override.py
new file mode 100644
index 00000000..db600b89
--- /dev/null
+++ b/src/elevenlabs/types/tts_conversational_config_override.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TtsConversationalConfigOverride(UncheckedBaseModel):
+ voice_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/tts_conversational_config_override_config.py b/src/elevenlabs/types/tts_conversational_config_override_config.py
new file mode 100644
index 00000000..13b1d756
--- /dev/null
+++ b/src/elevenlabs/types/tts_conversational_config_override_config.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TtsConversationalConfigOverrideConfig(UncheckedBaseModel):
+ voice_id: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/tts_conversational_model.py b/src/elevenlabs/types/tts_conversational_model.py
new file mode 100644
index 00000000..3c9c0bc9
--- /dev/null
+++ b/src/elevenlabs/types/tts_conversational_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TtsConversationalModel = typing.Union[typing.Literal["eleven_turbo_v2", "eleven_turbo_v2_5"], typing.Any]
diff --git a/src/elevenlabs/types/tts_optimize_streaming_latency.py b/src/elevenlabs/types/tts_optimize_streaming_latency.py
new file mode 100644
index 00000000..36429b8d
--- /dev/null
+++ b/src/elevenlabs/types/tts_optimize_streaming_latency.py
@@ -0,0 +1,3 @@
+# This file was auto-generated by Fern from our API Definition.
+
+TtsOptimizeStreamingLatency = int
diff --git a/src/elevenlabs/types/tts_output_format.py b/src/elevenlabs/types/tts_output_format.py
new file mode 100644
index 00000000..aceaba22
--- /dev/null
+++ b/src/elevenlabs/types/tts_output_format.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TtsOutputFormat = typing.Union[
+ typing.Literal["pcm_16000", "pcm_22050", "pcm_24000", "pcm_44100", "ulaw_8000"], typing.Any
+]
diff --git a/src/elevenlabs/types/turn_config.py b/src/elevenlabs/types/turn_config.py
new file mode 100644
index 00000000..50347f60
--- /dev/null
+++ b/src/elevenlabs/types/turn_config.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .turn_mode import TurnMode
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TurnConfig(UncheckedBaseModel):
+ turn_timeout: typing.Optional[float] = None
+ mode: typing.Optional[TurnMode] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/turn_mode.py b/src/elevenlabs/types/turn_mode.py
new file mode 100644
index 00000000..a82a3a3d
--- /dev/null
+++ b/src/elevenlabs/types/turn_mode.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TurnMode = typing.Union[typing.Literal["silence", "turn"], typing.Any]
diff --git a/src/elevenlabs/types/url_avatar.py b/src/elevenlabs/types/url_avatar.py
new file mode 100644
index 00000000..44069331
--- /dev/null
+++ b/src/elevenlabs/types/url_avatar.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class UrlAvatar(UncheckedBaseModel):
+ custom_url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/user.py b/src/elevenlabs/types/user.py
index df7839d0..0dc3d1f4 100644
--- a/src/elevenlabs/types/user.py
+++ b/src/elevenlabs/types/user.py
@@ -9,6 +9,7 @@
class User(UncheckedBaseModel):
subscription: SubscriptionResponse
+ subscription_extras: typing.Optional[typing.Any] = None
is_new_user: bool
xi_api_key: str
can_use_delayed_payment_methods: bool
@@ -17,6 +18,8 @@ class User(UncheckedBaseModel):
first_name: typing.Optional[str] = None
is_api_key_hashed: typing.Optional[bool] = None
xi_api_key_preview: typing.Optional[str] = None
+ referral_link_code: typing.Optional[str] = None
+ partnerstack_partner_default_link: typing.Optional[str] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/user_feedback.py b/src/elevenlabs/types/user_feedback.py
new file mode 100644
index 00000000..47abecae
--- /dev/null
+++ b/src/elevenlabs/types/user_feedback.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .user_feedback_score import UserFeedbackScore
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class UserFeedback(UncheckedBaseModel):
+ score: UserFeedbackScore
+ time_in_call_secs: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/user_feedback_score.py b/src/elevenlabs/types/user_feedback_score.py
new file mode 100644
index 00000000..5b8c3ec2
--- /dev/null
+++ b/src/elevenlabs/types/user_feedback_score.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+UserFeedbackScore = typing.Union[typing.Literal["like", "dislike"], typing.Any]
diff --git a/src/elevenlabs/types/voice.py b/src/elevenlabs/types/voice.py
index f3d1e570..08ee9932 100644
--- a/src/elevenlabs/types/voice.py
+++ b/src/elevenlabs/types/voice.py
@@ -3,6 +3,7 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import typing
from .voice_sample import VoiceSample
+from .voice_response_model_category import VoiceResponseModelCategory
from .fine_tuning_response import FineTuningResponse
from .voice_settings import VoiceSettings
from .voice_sharing_response import VoiceSharingResponse
@@ -16,7 +17,7 @@ class Voice(UncheckedBaseModel):
voice_id: str
name: typing.Optional[str] = None
samples: typing.Optional[typing.List[VoiceSample]] = None
- category: typing.Optional[str] = None
+ category: typing.Optional[VoiceResponseModelCategory] = None
fine_tuning: typing.Optional[FineTuningResponse] = None
labels: typing.Optional[typing.Dict[str, str]] = None
description: typing.Optional[str] = None
@@ -27,9 +28,11 @@ class Voice(UncheckedBaseModel):
high_quality_base_model_ids: typing.Optional[typing.List[str]] = None
safety_control: typing.Optional[VoiceResponseModelSafetyControl] = None
voice_verification: typing.Optional[VoiceVerificationResponse] = None
- owner_id: typing.Optional[str] = None
permission_on_resource: typing.Optional[str] = None
+ is_owner: typing.Optional[bool] = None
is_legacy: typing.Optional[bool] = None
+ is_mixed: typing.Optional[bool] = None
+ created_at_unix: typing.Optional[int] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/voice_preview_response_model.py b/src/elevenlabs/types/voice_preview_response_model.py
new file mode 100644
index 00000000..a43c34b4
--- /dev/null
+++ b/src/elevenlabs/types/voice_preview_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class VoicePreviewResponseModel(UncheckedBaseModel):
+ audio_base_64: str
+ generated_voice_id: str
+ media_type: str
+ duration_secs: float
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/voice_previews_response_model.py b/src/elevenlabs/types/voice_previews_response_model.py
new file mode 100644
index 00000000..d9b8b56d
--- /dev/null
+++ b/src/elevenlabs/types/voice_previews_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .voice_preview_response_model import VoicePreviewResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class VoicePreviewsResponseModel(UncheckedBaseModel):
+ previews: typing.List[VoicePreviewResponseModel]
+ text: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/voice_response_model_category.py b/src/elevenlabs/types/voice_response_model_category.py
new file mode 100644
index 00000000..2743db78
--- /dev/null
+++ b/src/elevenlabs/types/voice_response_model_category.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VoiceResponseModelCategory = typing.Union[
+ typing.Literal["generated", "cloned", "premade", "professional", "famous", "high_quality"], typing.Any
+]
diff --git a/src/elevenlabs/types/voice_response_model_safety_control.py b/src/elevenlabs/types/voice_response_model_safety_control.py
index 29bd845a..8c887cd5 100644
--- a/src/elevenlabs/types/voice_response_model_safety_control.py
+++ b/src/elevenlabs/types/voice_response_model_safety_control.py
@@ -3,5 +3,6 @@
import typing
VoiceResponseModelSafetyControl = typing.Union[
- typing.Literal["NONE", "BAN", "CAPTCHA", "CAPTCHA_AND_MODERATION"], typing.Any
+ typing.Literal["NONE", "BAN", "CAPTCHA", "CAPTCHA_AND_MODERATION", "ENTERPRISE_BAN", "ENTERPRISE_CAPTCHA"],
+ typing.Any,
]
diff --git a/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py b/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py
new file mode 100644
index 00000000..cdd6875b
--- /dev/null
+++ b/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class VoiceSharingModerationCheckResponseModel(UncheckedBaseModel):
+ date_checked_unix: typing.Optional[int] = None
+ name_value: typing.Optional[str] = None
+ name_check: typing.Optional[bool] = None
+ description_value: typing.Optional[str] = None
+ description_check: typing.Optional[bool] = None
+ sample_ids: typing.Optional[typing.List[str]] = None
+ sample_checks: typing.Optional[typing.List[float]] = None
+ captcha_ids: typing.Optional[typing.List[str]] = None
+ captcha_checks: typing.Optional[typing.List[float]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/voice_sharing_response.py b/src/elevenlabs/types/voice_sharing_response.py
index beb346c8..9fb5062d 100644
--- a/src/elevenlabs/types/voice_sharing_response.py
+++ b/src/elevenlabs/types/voice_sharing_response.py
@@ -3,8 +3,10 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import typing
from .voice_sharing_state import VoiceSharingState
-from .category import Category
+from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory
from .review_status import ReviewStatus
+from .voice_sharing_moderation_check_response_model import VoiceSharingModerationCheckResponseModel
+from .reader_resource_response_model import ReaderResourceResponseModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -24,7 +26,7 @@ class VoiceSharingResponse(UncheckedBaseModel):
disable_at_unix: typing.Optional[int] = None
voice_mixing_allowed: typing.Optional[bool] = None
featured: typing.Optional[bool] = None
- category: typing.Optional[Category] = None
+ category: typing.Optional[VoiceSharingResponseModelCategory] = None
reader_app_enabled: typing.Optional[bool] = None
image_url: typing.Optional[str] = None
ban_reason: typing.Optional[str] = None
@@ -40,6 +42,8 @@ class VoiceSharingResponse(UncheckedBaseModel):
twitter_username: typing.Optional[str] = None
youtube_username: typing.Optional[str] = None
tiktok_username: typing.Optional[str] = None
+ moderation_check: typing.Optional[VoiceSharingModerationCheckResponseModel] = None
+ reader_restricted_on: typing.Optional[typing.List[ReaderResourceResponseModel]] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/voice_sharing_response_model_category.py b/src/elevenlabs/types/voice_sharing_response_model_category.py
new file mode 100644
index 00000000..8439ad79
--- /dev/null
+++ b/src/elevenlabs/types/voice_sharing_response_model_category.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VoiceSharingResponseModelCategory = typing.Union[
+ typing.Literal["generated", "professional", "high_quality", "famous"], typing.Any
+]
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config.py b/src/elevenlabs/types/webhook_tool_api_schema_config.py
new file mode 100644
index 00000000..ae3ad49f
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .webhook_tool_api_schema_config_method import WebhookToolApiSchemaConfigMethod
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from .query_params_json_schema import QueryParamsJsonSchema
+from .webhook_tool_api_schema_config_request_headers_value import WebhookToolApiSchemaConfigRequestHeadersValue
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class WebhookToolApiSchemaConfig(UncheckedBaseModel):
+ """
+ Configuration for a webhook that will be called by an LLM tool.
+ """
+
+ url: str
+ method: typing.Optional[WebhookToolApiSchemaConfigMethod] = None
+ path_params_schema: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None
+ query_params_schema: typing.Optional[QueryParamsJsonSchema] = None
+ request_body_schema: typing.Optional[ObjectJsonSchemaProperty] = None
+ request_headers: typing.Optional[typing.Dict[str, WebhookToolApiSchemaConfigRequestHeadersValue]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, WebhookToolApiSchemaConfig=WebhookToolApiSchemaConfig)
+update_forward_refs(ObjectJsonSchemaProperty, WebhookToolApiSchemaConfig=WebhookToolApiSchemaConfig)
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_method.py b/src/elevenlabs/types/webhook_tool_api_schema_config_method.py
new file mode 100644
index 00000000..02708dff
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_method.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WebhookToolApiSchemaConfigMethod = typing.Union[typing.Literal["GET", "POST", "PATCH", "DELETE"], typing.Any]
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py b/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py
new file mode 100644
index 00000000..e4aae56c
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .conv_ai_secret_locator import ConvAiSecretLocator
+
+WebhookToolApiSchemaConfigRequestHeadersValue = typing.Union[str, ConvAiSecretLocator]
diff --git a/src/elevenlabs/types/webhook_tool_config.py b/src/elevenlabs/types/webhook_tool_config.py
new file mode 100644
index 00000000..9d76e0dd
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_config.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class WebhookToolConfig(UncheckedBaseModel):
+ """
+ A webhook tool is a tool that calls an external webhook from our server
+ """
+
+ name: str
+ description: str
+ api_schema: WebhookToolApiSchemaConfig
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, WebhookToolConfig=WebhookToolConfig)
+update_forward_refs(ObjectJsonSchemaProperty, WebhookToolConfig=WebhookToolConfig)
diff --git a/src/elevenlabs/types/widget_feedback_mode.py b/src/elevenlabs/types/widget_feedback_mode.py
new file mode 100644
index 00000000..8dc295de
--- /dev/null
+++ b/src/elevenlabs/types/widget_feedback_mode.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WidgetFeedbackMode = typing.Union[typing.Literal["none", "during", "end"], typing.Any]
diff --git a/src/elevenlabs/usage/__init__.py b/src/elevenlabs/usage/__init__.py
index 27d1e497..f3ea2659 100644
--- a/src/elevenlabs/usage/__init__.py
+++ b/src/elevenlabs/usage/__init__.py
@@ -1,5 +1,2 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import UsageGetCharactersUsageMetricsRequestBreakdownType
-
-__all__ = ["UsageGetCharactersUsageMetricsRequestBreakdownType"]
diff --git a/src/elevenlabs/usage/client.py b/src/elevenlabs/usage/client.py
index a6c75588..0d4c04c1 100644
--- a/src/elevenlabs/usage/client.py
+++ b/src/elevenlabs/usage/client.py
@@ -2,9 +2,7 @@
from ..core.client_wrapper import SyncClientWrapper
import typing
-from .types.usage_get_characters_usage_metrics_request_breakdown_type import (
- UsageGetCharactersUsageMetricsRequestBreakdownType,
-)
+from ..types.breakdown_types import BreakdownTypes
from ..core.request_options import RequestOptions
from ..types.usage_characters_response_model import UsageCharactersResponseModel
from ..core.unchecked_base_model import construct_type
@@ -25,11 +23,11 @@ def get_characters_usage_metrics(
start_unix: int,
end_unix: int,
include_workspace_metrics: typing.Optional[bool] = None,
- breakdown_type: typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType] = None,
+ breakdown_type: typing.Optional[BreakdownTypes] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> UsageCharactersResponseModel:
"""
- Returns the characters usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
+ Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
Parameters
----------
@@ -42,7 +40,7 @@ def get_characters_usage_metrics(
include_workspace_metrics : typing.Optional[bool]
Whether or not to include the statistics of the entire workspace.
- breakdown_type : typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType]
+ breakdown_type : typing.Optional[BreakdownTypes]
How to break down the information. Cannot be "user" if include_workspace_metrics is False.
request_options : typing.Optional[RequestOptions]
@@ -111,11 +109,11 @@ async def get_characters_usage_metrics(
start_unix: int,
end_unix: int,
include_workspace_metrics: typing.Optional[bool] = None,
- breakdown_type: typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType] = None,
+ breakdown_type: typing.Optional[BreakdownTypes] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> UsageCharactersResponseModel:
"""
- Returns the characters usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
+ Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
Parameters
----------
@@ -128,7 +126,7 @@ async def get_characters_usage_metrics(
include_workspace_metrics : typing.Optional[bool]
Whether or not to include the statistics of the entire workspace.
- breakdown_type : typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType]
+ breakdown_type : typing.Optional[BreakdownTypes]
How to break down the information. Cannot be "user" if include_workspace_metrics is False.
request_options : typing.Optional[RequestOptions]
diff --git a/src/elevenlabs/usage/types/__init__.py b/src/elevenlabs/usage/types/__init__.py
deleted file mode 100644
index 93750b4c..00000000
--- a/src/elevenlabs/usage/types/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from .usage_get_characters_usage_metrics_request_breakdown_type import (
- UsageGetCharactersUsageMetricsRequestBreakdownType,
-)
-
-__all__ = ["UsageGetCharactersUsageMetricsRequestBreakdownType"]
diff --git a/src/elevenlabs/usage/types/usage_get_characters_usage_metrics_request_breakdown_type.py b/src/elevenlabs/usage/types/usage_get_characters_usage_metrics_request_breakdown_type.py
deleted file mode 100644
index 5275386a..00000000
--- a/src/elevenlabs/usage/types/usage_get_characters_usage_metrics_request_breakdown_type.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-UsageGetCharactersUsageMetricsRequestBreakdownType = typing.Union[
- typing.Literal["none", "voice", "user", "api_keys"], typing.Any
-]
diff --git a/src/elevenlabs/voice_generation/client.py b/src/elevenlabs/voice_generation/client.py
index d02804a4..05e5fb94 100644
--- a/src/elevenlabs/voice_generation/client.py
+++ b/src/elevenlabs/voice_generation/client.py
@@ -85,7 +85,7 @@ def generate(
Category code corresponding to the gender of the generated voice. Possible values: female, male.
accent : str
- Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian.
+ Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian.
age : Age
Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
@@ -97,7 +97,7 @@ def generate(
Text to generate, text length has to be between 100 and 1000.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -129,12 +129,16 @@ def generate(
"accent_strength": accent_strength,
"text": text,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- for _chunk in _response.iter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
yield _chunk
return
_response.read()
@@ -159,6 +163,7 @@ def create_a_previously_generated_voice(
voice_name: str,
voice_description: str,
generated_voice_id: str,
+ played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT,
labels: typing.Optional[typing.Dict[str, str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> Voice:
@@ -176,6 +181,9 @@ def create_a_previously_generated_voice(
generated_voice_id : str
The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
+ played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]]
+ List of voice ids that the user has played but not selected. Used for RLHF.
+
labels : typing.Optional[typing.Dict[str, str]]
Optional, metadata to add to the created voice. Defaults to None.
@@ -207,8 +215,12 @@ def create_a_previously_generated_voice(
"voice_name": voice_name,
"voice_description": voice_description,
"generated_voice_id": generated_voice_id,
+ "played_not_selected_voice_ids": played_not_selected_voice_ids,
"labels": labels,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -312,7 +324,7 @@ async def generate(
Category code corresponding to the gender of the generated voice. Possible values: female, male.
accent : str
- Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian.
+ Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian.
age : Age
Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
@@ -324,7 +336,7 @@ async def generate(
Text to generate, text length has to be between 100 and 1000.
request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Yields
------
@@ -364,12 +376,16 @@ async def main() -> None:
"accent_strength": accent_strength,
"text": text,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
) as _response:
try:
if 200 <= _response.status_code < 300:
- async for _chunk in _response.aiter_bytes():
+ _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
+ async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
yield _chunk
return
await _response.aread()
@@ -394,6 +410,7 @@ async def create_a_previously_generated_voice(
voice_name: str,
voice_description: str,
generated_voice_id: str,
+ played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT,
labels: typing.Optional[typing.Dict[str, str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> Voice:
@@ -411,6 +428,9 @@ async def create_a_previously_generated_voice(
generated_voice_id : str
The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
+ played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]]
+ List of voice ids that the user has played but not selected. Used for RLHF.
+
labels : typing.Optional[typing.Dict[str, str]]
Optional, metadata to add to the created voice. Defaults to None.
@@ -450,8 +470,12 @@ async def main() -> None:
"voice_name": voice_name,
"voice_description": voice_description,
"generated_voice_id": generated_voice_id,
+ "played_not_selected_voice_ids": played_not_selected_voice_ids,
"labels": labels,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/elevenlabs/voices/client.py b/src/elevenlabs/voices/client.py
index 76f070e5..8b2b9a0b 100644
--- a/src/elevenlabs/voices/client.py
+++ b/src/elevenlabs/voices/client.py
@@ -14,6 +14,7 @@
from ..types.voice import Voice
from ..core.serialization import convert_and_respect_annotation_metadata
from .. import core
+from ..types.add_voice_ivc_response_model import AddVoiceIvcResponseModel
from ..types.add_voice_response_model import AddVoiceResponseModel
from ..types.get_library_voices_response import GetLibraryVoicesResponse
from ..types.profile_page_response_model import ProfilePageResponseModel
@@ -154,7 +155,7 @@ def get_settings(self, voice_id: str, *, request_options: typing.Optional[Reques
api_key="YOUR_API_KEY",
)
client.voices.get_settings(
- voice_id="2EiwWnXFnvU5JabPnv8n",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -220,7 +221,7 @@ def get(
api_key="YOUR_API_KEY",
)
client.voices.get(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -282,7 +283,7 @@ def delete(
api_key="YOUR_API_KEY",
)
client.voices.delete(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="VOICE_ID",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -343,7 +344,7 @@ def edit_settings(
api_key="YOUR_API_KEY",
)
client.voices.edit_settings(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="VOICE_ID",
request=VoiceSettings(
stability=0.1,
similarity_boost=0.3,
@@ -387,11 +388,11 @@ def add(
*,
name: str,
files: typing.List[core.File],
- description: typing.Optional[str] = None,
- labels: typing.Optional[str] = None,
- remove_background_noise: typing.Optional[bool] = False,
+ remove_background_noise: typing.Optional[bool] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ labels: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AddVoiceResponseModel:
+ ) -> AddVoiceIvcResponseModel:
"""
Add a new voice to your collection of voices in VoiceLab.
@@ -403,6 +404,9 @@ def add(
files : typing.List[core.File]
See core.File for more documentation
+ remove_background_noise : typing.Optional[bool]
+ If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
description : typing.Optional[str]
How would you describe the voice?
@@ -414,7 +418,7 @@ def add(
Returns
-------
- AddVoiceResponseModel
+ AddVoiceIvcResponseModel
Successful Response
Examples
@@ -433,6 +437,7 @@ def add(
method="POST",
data={
"name": name,
+ "remove_background_noise": remove_background_noise,
"description": description,
"labels": labels,
"remove_background_noise": remove_background_noise
@@ -446,9 +451,9 @@ def add(
try:
if 200 <= _response.status_code < 300:
return typing.cast(
- AddVoiceResponseModel,
+ AddVoiceIvcResponseModel,
construct_type(
- type_=AddVoiceResponseModel, # type: ignore
+ type_=AddVoiceIvcResponseModel, # type: ignore
object_=_response.json(),
),
)
@@ -472,9 +477,10 @@ def edit(
voice_id: str,
*,
name: str,
- files: typing.Optional[typing.List[core.File]] = None,
- description: typing.Optional[str] = None,
- labels: typing.Optional[str] = None,
+ files: typing.Optional[typing.List[core.File]] = OMIT,
+ remove_background_noise: typing.Optional[bool] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ labels: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -491,6 +497,9 @@ def edit(
files : typing.Optional[typing.List[core.File]]
See core.File for more documentation
+ remove_background_noise : typing.Optional[bool]
+ If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
description : typing.Optional[str]
How would you describe the voice?
@@ -513,7 +522,7 @@ def edit(
api_key="YOUR_API_KEY",
)
client.voices.edit(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
+ voice_id="VOICE_ID",
name="George",
)
"""
@@ -522,6 +531,7 @@ def edit(
method="POST",
data={
"name": name,
+ "remove_background_noise": remove_background_noise,
"description": description,
"labels": labels,
},
@@ -561,7 +571,6 @@ def add_sharing_voice(
voice_id: str,
*,
new_name: str,
- xi_app_check_token: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AddVoiceResponseModel:
"""
@@ -578,9 +587,6 @@ def add_sharing_voice(
new_name : str
The name that identifies this voice. This will be displayed in the dropdown of the website.
- xi_app_check_token : typing.Optional[str]
- Your app check token.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -609,7 +615,7 @@ def add_sharing_voice(
"new_name": new_name,
},
headers={
- "xi-app-check-token": str(xi_app_check_token) if xi_app_check_token is not None else None,
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
@@ -772,9 +778,9 @@ def get_shared(
def get_similar_library_voices(
self,
*,
- audio_file: typing.Optional[core.File] = None,
- similarity_threshold: typing.Optional[float] = None,
- top_k: typing.Optional[int] = None,
+ audio_file: typing.Optional[core.File] = OMIT,
+ similarity_threshold: typing.Optional[float] = OMIT,
+ top_k: typing.Optional[int] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetLibraryVoicesResponse:
"""
@@ -1059,7 +1065,7 @@ async def get_settings(
async def main() -> None:
await client.voices.get_settings(
- voice_id="2EiwWnXFnvU5JabPnv8n",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
)
@@ -1133,7 +1139,7 @@ async def get(
async def main() -> None:
await client.voices.get(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
)
@@ -1203,7 +1209,7 @@ async def delete(
async def main() -> None:
await client.voices.delete(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="VOICE_ID",
)
@@ -1272,7 +1278,7 @@ async def edit_settings(
async def main() -> None:
await client.voices.edit_settings(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+ voice_id="VOICE_ID",
request=VoiceSettings(
stability=0.1,
similarity_boost=0.3,
@@ -1319,10 +1325,11 @@ async def add(
*,
name: str,
files: typing.List[core.File],
- description: typing.Optional[str] = None,
- labels: typing.Optional[str] = None,
+ remove_background_noise: typing.Optional[bool] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ labels: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AddVoiceResponseModel:
+ ) -> AddVoiceIvcResponseModel:
"""
Add a new voice to your collection of voices in VoiceLab.
@@ -1334,6 +1341,9 @@ async def add(
files : typing.List[core.File]
See core.File for more documentation
+ remove_background_noise : typing.Optional[bool]
+ If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
description : typing.Optional[str]
How would you describe the voice?
@@ -1345,7 +1355,7 @@ async def add(
Returns
-------
- AddVoiceResponseModel
+ AddVoiceIvcResponseModel
Successful Response
Examples
@@ -1372,6 +1382,7 @@ async def main() -> None:
method="POST",
data={
"name": name,
+ "remove_background_noise": remove_background_noise,
"description": description,
"labels": labels,
},
@@ -1384,9 +1395,9 @@ async def main() -> None:
try:
if 200 <= _response.status_code < 300:
return typing.cast(
- AddVoiceResponseModel,
+ AddVoiceIvcResponseModel,
construct_type(
- type_=AddVoiceResponseModel, # type: ignore
+ type_=AddVoiceIvcResponseModel, # type: ignore
object_=_response.json(),
),
)
@@ -1410,9 +1421,10 @@ async def edit(
voice_id: str,
*,
name: str,
- files: typing.Optional[typing.List[core.File]] = None,
- description: typing.Optional[str] = None,
- labels: typing.Optional[str] = None,
+ files: typing.Optional[typing.List[core.File]] = OMIT,
+ remove_background_noise: typing.Optional[bool] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ labels: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -1429,6 +1441,9 @@ async def edit(
files : typing.Optional[typing.List[core.File]]
See core.File for more documentation
+ remove_background_noise : typing.Optional[bool]
+ If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
description : typing.Optional[str]
How would you describe the voice?
@@ -1456,7 +1471,7 @@ async def edit(
async def main() -> None:
await client.voices.edit(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
+ voice_id="VOICE_ID",
name="George",
)
@@ -1468,6 +1483,7 @@ async def main() -> None:
method="POST",
data={
"name": name,
+ "remove_background_noise": remove_background_noise,
"description": description,
"labels": labels,
},
@@ -1507,7 +1523,6 @@ async def add_sharing_voice(
voice_id: str,
*,
new_name: str,
- xi_app_check_token: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AddVoiceResponseModel:
"""
@@ -1524,9 +1539,6 @@ async def add_sharing_voice(
new_name : str
The name that identifies this voice. This will be displayed in the dropdown of the website.
- xi_app_check_token : typing.Optional[str]
- Your app check token.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1563,7 +1575,7 @@ async def main() -> None:
"new_name": new_name,
},
headers={
- "xi-app-check-token": str(xi_app_check_token) if xi_app_check_token is not None else None,
+ "content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
@@ -1734,9 +1746,9 @@ async def main() -> None:
async def get_similar_library_voices(
self,
*,
- audio_file: typing.Optional[core.File] = None,
- similarity_threshold: typing.Optional[float] = None,
- top_k: typing.Optional[int] = None,
+ audio_file: typing.Optional[core.File] = OMIT,
+ similarity_threshold: typing.Optional[float] = OMIT,
+ top_k: typing.Optional[int] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetLibraryVoicesResponse:
"""
diff --git a/src/elevenlabs/workspace/client.py b/src/elevenlabs/workspace/client.py
index ff9411cc..dade871f 100644
--- a/src/elevenlabs/workspace/client.py
+++ b/src/elevenlabs/workspace/client.py
@@ -57,6 +57,9 @@ def invite_user(
json={
"email": email,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -120,6 +123,9 @@ def delete_existing_invitation(
json={
"email": email,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -196,6 +202,9 @@ def update_member(
"is_locked": is_locked,
"workspace_role": workspace_role,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -272,6 +281,9 @@ async def main() -> None:
json={
"email": email,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -343,6 +355,9 @@ async def main() -> None:
json={
"email": email,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -427,6 +442,9 @@ async def main() -> None:
"is_locked": is_locked,
"workspace_role": workspace_role,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/tests/e2e_test_convai.py b/tests/e2e_test_convai.py
new file mode 100644
index 00000000..a0e62641
--- /dev/null
+++ b/tests/e2e_test_convai.py
@@ -0,0 +1,79 @@
+import os
+import time
+import asyncio
+
+import pytest
+from elevenlabs import ElevenLabs
+from elevenlabs.conversational_ai.conversation import Conversation, ClientTools
+from elevenlabs.conversational_ai.default_audio_interface import DefaultAudioInterface
+
+
+@pytest.mark.skipif(os.getenv("CI") == "true", reason="Skip live conversation test in CI environment")
+def test_live_conversation():
+ """Test a live conversation with actual audio I/O"""
+
+ api_key = os.getenv("ELEVENLABS_API_KEY")
+ if not api_key:
+ raise ValueError("ELEVENLABS_API_KEY environment variable missing.")
+
+ agent_id = os.getenv("AGENT_ID")
+ if not api_key or not agent_id:
+ raise ValueError("AGENT_ID environment variable missing.")
+
+ client = ElevenLabs(api_key=api_key)
+
+ # Create conversation handlers
+ def on_agent_response(text: str):
+ print(f"Agent: {text}")
+
+ def on_user_transcript(text: str):
+ print(f"You: {text}")
+
+ def on_latency(ms: int):
+ print(f"Latency: {ms}ms")
+
+ # Initialize client tools
+ client_tools = ClientTools()
+
+ def test(parameters):
+ print("Sync tool called with parameters:", parameters)
+ return "Tool called successfully"
+
+ async def test_async(parameters):
+ # Simulate some async work
+ await asyncio.sleep(10)
+ print("Async tool called with parameters:", parameters)
+ return "Tool called successfully"
+
+ client_tools.register("test", test)
+ client_tools.register("test_async", test_async, is_async=True)
+
+ # Initialize conversation
+ conversation = Conversation(
+ client=client,
+ agent_id=agent_id,
+ requires_auth=False,
+ audio_interface=DefaultAudioInterface(),
+ callback_agent_response=on_agent_response,
+ callback_user_transcript=on_user_transcript,
+ callback_latency_measurement=on_latency,
+ client_tools=client_tools,
+ )
+
+ # Start the conversation
+ conversation.start_session()
+
+ # Let it run for 100 seconds
+ time.sleep(100)
+
+ # End the conversation
+ conversation.end_session()
+ conversation.wait_for_session_end()
+
+ # Get the conversation ID for reference
+ conversation_id = conversation._conversation_id
+ print(f"Conversation ID: {conversation_id}")
+
+
+if __name__ == "__main__":
+ test_live_conversation()
diff --git a/tests/fixtures/voice_sample.mp3 b/tests/fixtures/voice_sample.mp3
new file mode 100644
index 00000000..cbecc9ac
Binary files /dev/null and b/tests/fixtures/voice_sample.mp3 differ
diff --git a/tests/test_async_generation.py b/tests/test_async_generation.py
deleted file mode 100644
index 1ed11ab6..00000000
--- a/tests/test_async_generation.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import asyncio
-import pytest
-
-from .utils import IN_GITHUB, async_client
-from elevenlabs import play
-
-def test_async_generation():
- async def main():
- results = await async_client.generate(
- voice='Rachel',
- model='eleven_multilingual_v2',
- text='This is an example sentence',
- )
- out = b''
- async for value in results:
- out += value
- if not IN_GITHUB:
- play(out)
-
- results = await async_client.generate(
- voice='Rachel',
- model='eleven_multilingual_v2',
- text='This is an example sentence with streaming',
- stream=True
- )
- out = b''
- async for value in results:
- out += value
- if not IN_GITHUB:
- play(out)
- asyncio.run(main())
diff --git a/tests/test_audio_isolation.py b/tests/test_audio_isolation.py
new file mode 100644
index 00000000..deb6d46e
--- /dev/null
+++ b/tests/test_audio_isolation.py
@@ -0,0 +1,32 @@
+from elevenlabs import play
+from elevenlabs.client import ElevenLabs
+
+from .utils import IN_GITHUB, DEFAULT_VOICE_FILE
+
+
+def test_audio_isolation() -> None:
+ """Test basic audio isolation."""
+ client = ElevenLabs()
+ audio_file = open(DEFAULT_VOICE_FILE, "rb")
+ try:
+ audio_stream = client.audio_isolation.audio_isolation(audio=audio_file)
+ audio = b"".join(chunk for chunk in audio_stream)
+ assert isinstance(audio, bytes), "Combined audio should be bytes"
+ if not IN_GITHUB:
+ play(audio)
+ finally:
+ audio_file.close()
+
+
+def test_audio_isolation_as_stream():
+ """Test audio isolation with streaming."""
+ client = ElevenLabs()
+ audio_file = open(DEFAULT_VOICE_FILE, "rb")
+ try:
+ audio_stream = client.audio_isolation.audio_isolation_stream(audio=audio_file)
+ audio = b"".join(chunk for chunk in audio_stream)
+ assert isinstance(audio, bytes), "Combined audio should be bytes"
+ if not IN_GITHUB:
+ play(audio)
+ finally:
+ audio_file.close()
diff --git a/tests/test_client.py b/tests/test_client.py
deleted file mode 100644
index 32be1f41..00000000
--- a/tests/test_client.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from elevenlabs import play, \
- Voice, VoiceSettings, stream
-from .utils import IN_GITHUB, client
-
-
-def test_voices() -> None:
- print("Voices are...", client.voices.get_all())
-
-
-def test_generate() -> None:
- audio = client.generate(
- text="Hello! My name is Bella.",
- voice=Voice(
- voice_id='EXAVITQu4vr4xnSDxMaL',
- settings=VoiceSettings(
- stability=0.71,
- similarity_boost=0.5,
- style=0.0,
- use_speaker_boost=True
- )
- ))
- if not IN_GITHUB:
- play(audio) # type: ignore
-
-
-def test_generate_stream() -> None:
- def text_stream():
- yield "Hi there, I'm Eleven "
- yield "I'm a text to speech API "
-
- audio_stream = client.generate(
- text=text_stream(),
- voice="Nicole",
- model="eleven_monolingual_v1",
- stream=True
- )
-
- if not IN_GITHUB:
- stream(audio_stream) # type: ignore
diff --git a/tests/test_convai.py b/tests/test_convai.py
new file mode 100644
index 00000000..076daf9a
--- /dev/null
+++ b/tests/test_convai.py
@@ -0,0 +1,118 @@
+from unittest.mock import MagicMock, patch
+from elevenlabs.conversational_ai.conversation import Conversation, AudioInterface
+import json
+import time
+
+
+class MockAudioInterface(AudioInterface):
+ def start(self, input_callback):
+ print("Audio interface started")
+ self.input_callback = input_callback
+
+ def stop(self):
+ print("Audio interface stopped")
+
+ def output(self, audio):
+ print(f"Would play audio of length: {len(audio)} bytes")
+
+ def interrupt(self):
+ print("Audio interrupted")
+
+
+# Add test constants and helpers at module level
+TEST_CONVERSATION_ID = "test123"
+TEST_AGENT_ID = "test_agent"
+
+
+def create_mock_websocket(messages=None):
+ """Helper to create a mock websocket with predefined responses"""
+ mock_ws = MagicMock()
+
+ if messages is None:
+ messages = [
+ {
+ "type": "conversation_initiation_metadata",
+ "conversation_initiation_metadata_event": {"conversation_id": TEST_CONVERSATION_ID},
+ },
+ {"type": "agent_response", "agent_response_event": {"agent_response": "Hello there!"}},
+ ]
+
+ def response_generator():
+ for msg in messages:
+ yield json.dumps(msg)
+ while True:
+ yield '{"type": "keep_alive"}'
+
+ mock_ws.recv = MagicMock(side_effect=response_generator())
+ return mock_ws
+
+
+def test_conversation_basic_flow():
+ # Mock setup
+ mock_ws = create_mock_websocket()
+ mock_client = MagicMock()
+ agent_response_callback = MagicMock()
+
+ # Setup the conversation
+ conversation = Conversation(
+ client=mock_client,
+ agent_id=TEST_AGENT_ID,
+ requires_auth=False,
+ audio_interface=MockAudioInterface(),
+ callback_agent_response=agent_response_callback,
+ )
+
+ # Run the test
+ with patch("elevenlabs.conversational_ai.conversation.connect") as mock_connect:
+ mock_connect.return_value.__enter__.return_value = mock_ws
+ conversation.start_session()
+
+ # Add a wait for the callback to be called
+ timeout = 5 # 5 seconds timeout
+ start_time = time.time()
+ while not agent_response_callback.called and time.time() - start_time < timeout:
+ time.sleep(0.1)
+
+ conversation.end_session()
+ conversation.wait_for_session_end()
+
+ # Assertions
+ expected_init_message = {
+ "type": "conversation_initiation_client_data",
+ "custom_llm_extra_body": {},
+ "conversation_config_override": {},
+ }
+ mock_ws.send.assert_any_call(json.dumps(expected_init_message))
+ agent_response_callback.assert_called_once_with("Hello there!")
+ assert conversation._conversation_id == TEST_CONVERSATION_ID
+
+
+def test_conversation_with_auth():
+ # Mock setup
+ mock_client = MagicMock()
+ mock_client.conversational_ai.get_signed_url.return_value.signed_url = "wss://signed.url"
+ mock_ws = create_mock_websocket(
+ [
+ {
+ "type": "conversation_initiation_metadata",
+ "conversation_initiation_metadata_event": {"conversation_id": TEST_CONVERSATION_ID},
+ }
+ ]
+ )
+
+ conversation = Conversation(
+ client=mock_client,
+ agent_id=TEST_AGENT_ID,
+ requires_auth=True,
+ audio_interface=MockAudioInterface(),
+ )
+
+ # Run the test
+ with patch("elevenlabs.conversational_ai.conversation.connect") as mock_connect:
+ mock_connect.return_value.__enter__.return_value = mock_ws
+ conversation.start_session()
+ conversation.end_session()
+ conversation.wait_for_session_end()
+
+ # Assertions
+ mock_client.conversational_ai.get_signed_url.assert_called_once_with(agent_id=TEST_AGENT_ID)
diff --git a/tests/test_history.py b/tests/test_history.py
index de1291c1..1dc83e45 100644
--- a/tests/test_history.py
+++ b/tests/test_history.py
@@ -1,36 +1,8 @@
-import time
-from random import randint
-
-from elevenlabs import GetSpeechHistoryResponse, \
- play
-
-from .utils import IN_GITHUB, client
+from elevenlabs import GetSpeechHistoryResponse, ElevenLabs
def test_history():
+ client = ElevenLabs()
page_size = 5
history = client.history.get_all(page_size=page_size)
assert isinstance(history, GetSpeechHistoryResponse)
-
-
-def test_history_item_delete():
- text = f"Test {randint(0, 1000)}"
- audio = client.generate(text=text)
- if not IN_GITHUB:
- play(audio) # type: ignore
-
- time.sleep(1)
-
- history = client.history.get_all().history
- print(history)
- history_item = history[0]
-
- assert history_item.text != None
-
- # Check that item matches
- # assert history_item.text == text
- # client.history.delete(history_item.history_item_id)
-
- # Test that the history item was deleted
- # history = client.history.get_all(page_size=1).history
- # assert len(history) == 0 or history[0].text != text
diff --git a/tests/test_model.py b/tests/test_model.py
deleted file mode 100644
index 9ea570ff..00000000
--- a/tests/test_model.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from elevenlabs import Model
-from .utils import client
-
-
-def test_model():
- # Test that we can get all models
- models = client.models.get_all()
- print(models)
- assert len(models) > 0
- assert isinstance(models[0], Model)
\ No newline at end of file
diff --git a/tests/test_models.py b/tests/test_models.py
new file mode 100644
index 00000000..84f85d1c
--- /dev/null
+++ b/tests/test_models.py
@@ -0,0 +1,9 @@
+from elevenlabs import Model
+from elevenlabs.client import ElevenLabs
+
+
+def test_models_get_all():
+ client = ElevenLabs()
+ models = client.models.get_all()
+ assert len(models) > 0
+ assert isinstance(models[0], Model)
diff --git a/tests/test_sts.py b/tests/test_sts.py
new file mode 100644
index 00000000..530e6fce
--- /dev/null
+++ b/tests/test_sts.py
@@ -0,0 +1,31 @@
+from elevenlabs import play
+from elevenlabs.client import ElevenLabs
+
+from .utils import IN_GITHUB, DEFAULT_VOICE, DEFAULT_VOICE_FILE
+
+
+def test_sts() -> None:
+ """Test basic speech-to-speech generation."""
+ client = ElevenLabs()
+ audio_file = open(DEFAULT_VOICE_FILE, "rb")
+ try:
+ audio_stream = client.speech_to_speech.convert(voice_id=DEFAULT_VOICE, audio=audio_file)
+ audio = b"".join(chunk for chunk in audio_stream)
+ assert isinstance(audio, bytes), "Combined audio should be bytes"
+ if not IN_GITHUB:
+ play(audio)
+ finally:
+ audio_file.close()
+
+
+def test_sts_as_stream():
+ client = ElevenLabs()
+ audio_file = open(DEFAULT_VOICE_FILE, "rb")
+ try:
+ audio_stream = client.speech_to_speech.convert_as_stream(voice_id=DEFAULT_VOICE, audio=audio_file)
+ audio = b"".join(chunk for chunk in audio_stream)
+ assert isinstance(audio, bytes), "Combined audio should be bytes"
+ if not IN_GITHUB:
+ play(audio)
+ finally:
+ audio_file.close()
diff --git a/tests/test_tts.py b/tests/test_tts.py
new file mode 100644
index 00000000..0baf8312
--- /dev/null
+++ b/tests/test_tts.py
@@ -0,0 +1,124 @@
+import asyncio
+
+from elevenlabs import VoiceSettings, play, Voice
+from elevenlabs.client import AsyncElevenLabs, ElevenLabs
+
+from .utils import IN_GITHUB, DEFAULT_TEXT, DEFAULT_VOICE, DEFAULT_MODEL
+import base64
+
+
+def test_tts_generate() -> None:
+ """Test basic text-to-speech generation w/ custom generate."""
+ client = ElevenLabs()
+ audio_generator = client.generate(text=DEFAULT_TEXT, voice="Brian", model=DEFAULT_MODEL)
+ audio = b"".join(audio_generator)
+ assert isinstance(audio, bytes), "TTS should return bytes"
+ if not IN_GITHUB:
+ play(audio)
+
+
+def test_tts_generate_with_voice_settings() -> None:
+ """Test basic text-to-speech generation."""
+ client = ElevenLabs()
+ audio_generator = client.generate(
+ text=DEFAULT_TEXT,
+ model=DEFAULT_MODEL,
+ voice=Voice(
+ voice_id="nPczCjzI2devNBz1zQrb",
+ settings=VoiceSettings(stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True),
+ ),
+ )
+ audio = b"".join(audio_generator)
+ assert isinstance(audio, bytes), "TTS should return bytes"
+ if not IN_GITHUB:
+ play(audio)
+
+
+def test_tts_generate_stream() -> None:
+ """Test streaming text-to-speech generation."""
+ client = ElevenLabs()
+ audio_generator = client.generate(
+ stream=True,
+ text=DEFAULT_TEXT,
+ model=DEFAULT_MODEL,
+ )
+ audio = b"".join(audio_generator)
+ assert isinstance(audio, bytes), "TTS should return bytes"
+ if not IN_GITHUB:
+ play(audio)
+
+
+def test_tts_convert() -> None:
+ """Test basic text-to-speech generation."""
+ client = ElevenLabs()
+ audio_generator = client.text_to_speech.convert(text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL)
+ audio = b"".join(audio_generator)
+ assert isinstance(audio, bytes), "TTS should return bytes"
+ if not IN_GITHUB:
+ play(audio)
+
+
+def test_tts_convert_with_voice_settings() -> None:
+ """Test TTS with custom voice settings."""
+ client = ElevenLabs()
+ audio_generator = client.text_to_speech.convert(
+ text=DEFAULT_TEXT,
+ voice_id=DEFAULT_VOICE,
+ model_id=DEFAULT_MODEL,
+ voice_settings=VoiceSettings(stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True),
+ )
+ audio = b"".join(audio_generator)
+ assert isinstance(audio, bytes), "TTS with voice settings should return bytes"
+ if not IN_GITHUB:
+ play(audio)
+
+
+def test_tts_convert_as_stream():
+ async def main():
+ async_client = AsyncElevenLabs()
+ results = async_client.text_to_speech.convert_as_stream(
+ text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL
+ )
+ out = b""
+ async for value in results:
+ assert isinstance(value, bytes), "Stream chunks should be bytes"
+ out += value
+ if not IN_GITHUB:
+ play(out)
+
+ asyncio.run(main())
+
+
+def test_tts_convert_with_timestamps() -> None:
+ """Test TTS generation with timestamps."""
+ client = ElevenLabs()
+ result = client.text_to_speech.convert_with_timestamps(
+ text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL
+ )
+
+ assert "alignment" in result # type: ignore
+ assert "characters" in result["alignment"] # type: ignore
+
+ if not IN_GITHUB:
+ audio_bytes = base64.b64decode(result["audio_base64"]) # type: ignore
+ play(audio_bytes)
+
+
+def test_tts_stream_with_timestamps():
+ async def main():
+ async_client = AsyncElevenLabs()
+ audio_data = b""
+ async_stream = async_client.text_to_speech.stream_with_timestamps(
+ voice_id=DEFAULT_VOICE,
+ text=DEFAULT_TEXT,
+ model_id=DEFAULT_MODEL,
+ )
+ async for chunk in async_stream:
+ if hasattr(chunk, "audio_base_64"):
+ audio_bytes = base64.b64decode(chunk.audio_base_64)
+ audio_data += audio_bytes
+
+ if not IN_GITHUB:
+ play(audio_data)
+
+ asyncio.run(main())
diff --git a/tests/test_ttsfx.py b/tests/test_ttsfx.py
new file mode 100644
index 00000000..30de6fc1
--- /dev/null
+++ b/tests/test_ttsfx.py
@@ -0,0 +1,17 @@
+from elevenlabs import play
+from elevenlabs.client import ElevenLabs
+
+from .utils import IN_GITHUB
+
+
+def test_text_to_sound_effects_convert() -> None:
+ """Test basic sound-effect generation."""
+ client = ElevenLabs()
+ audio_generator = client.text_to_sound_effects.convert(
+ text="Hypnotic throbbing sound effect. Increases in imtensity.",
+ duration_seconds=2,
+ )
+ audio = b"".join(audio_generator)
+ assert isinstance(audio, bytes), "TTS should return bytes"
+ if not IN_GITHUB:
+ play(audio)
diff --git a/tests/test_ttv.py b/tests/test_ttv.py
new file mode 100644
index 00000000..5d08682b
--- /dev/null
+++ b/tests/test_ttv.py
@@ -0,0 +1,17 @@
+from elevenlabs.client import ElevenLabs
+
+
+def test_voice_preview_generation():
+ """Test generating voice previews from description."""
+ client = ElevenLabs()
+
+ # Test parameters
+ description = "A warm and friendly female voice with a slight British accent, speaking clearly and professionally"
+ sample_text = "This is a test message that needs to be at least one hundred characters long to meet the API requirements. Here it is."
+
+ previews = client.text_to_voice.create_previews(voice_description=description, text=sample_text)
+
+ assert hasattr(previews, "previews"), "Response should have 'previews' attribute"
+ assert len(previews.previews) > 0, "Should receive at least one preview"
+ assert hasattr(previews.previews[0], "generated_voice_id"), "Preview should contain generated_voice_id"
+ assert hasattr(previews.previews[0], "audio_base_64"), "Preview should contain audio_base_64"
diff --git a/tests/test_voice.py b/tests/test_voice.py
deleted file mode 100644
index 2e9f6e1d..00000000
--- a/tests/test_voice.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import pytest
-
-from elevenlabs import Voice, \
- VoiceSettings, play
-from .utils import IN_GITHUB, as_local_files, client
-
-
-def test_voice_from_id():
-
- # Test that we can get a voice from id
- voice_id = "21m00Tcm4TlvDq8ikWAM"
-
- voice = client.voices.get(voice_id)
- assert isinstance(voice, Voice)
-
- assert voice.voice_id == voice_id
- assert voice.name == "Rachel"
- assert voice.category == "premade"
- if voice.settings is not None:
- assert isinstance(voice.settings, VoiceSettings)
-
-@pytest.mark.skip(reason="subscription limit reached")
-def test_voice_clone():
- voice_file_urls = [
- "https://user-images.githubusercontent.com/12028621/235474694-584f7103-dab2-4c39-bb9a-8e5f00be85da.webm",
- ]
-
- for file in as_local_files(voice_file_urls):
- voice = client.clone(
- name="Alex",
- description=(
- "An old American male voice with a slight hoarseness in his throat."
- " Perfect for news"
- ),
- files=[file],
- )
-
- assert isinstance(voice, Voice) # type: ignore
- assert voice.voice_id is not None
- assert voice.name == "Alex"
- assert voice.category == "cloned"
- assert len(voice.samples or []) == len(voice_file_urls)
-
- audio = client.generate(
- text="Voice clone test successful.",
- voice=voice,
- )
-
- if not IN_GITHUB:
- play(audio)
-
- client.voices.delete(voice.voice_id)
-
-
-def test_voice_design():
- audio = client.voice_generation.generate(
- text=(
- "Hi! My name is Lexa, I'm a voice design test. I should have a middle aged"
- " female voice with a british accent. "
- ),
- gender="female",
- age="middle_aged",
- accent="british",
- accent_strength=1.5,
- )
-
- if not IN_GITHUB:
- play(audio)
-
-
-def test_voices():
- # Test that we can get voices from api
- response = client.voices.get_all()
-
- assert len(response.voices) > 0
-
- for voice in response.voices:
- assert isinstance(voice, Voice)
diff --git a/tests/test_voices.py b/tests/test_voices.py
new file mode 100644
index 00000000..6690792e
--- /dev/null
+++ b/tests/test_voices.py
@@ -0,0 +1,24 @@
+from elevenlabs import Voice, VoiceSettings, ElevenLabs
+from .utils import DEFAULT_VOICE
+
+
+def test_get_voice():
+ client = ElevenLabs()
+ voice_id = DEFAULT_VOICE
+
+ voice = client.voices.get(voice_id)
+ assert isinstance(voice, Voice)
+
+ assert voice.voice_id == voice_id
+ if voice.settings is not None:
+ assert isinstance(voice.settings, VoiceSettings)
+
+
+def test_get_voices():
+ client = ElevenLabs()
+ response = client.voices.get_all()
+
+ assert len(response.voices) > 0
+
+ for voice in response.voices:
+ assert isinstance(voice, Voice)
diff --git a/tests/utils.py b/tests/utils.py
index b51b44b6..f2061ef0 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -3,14 +3,13 @@
import httpx
from typing import Sequence, Generator
-from elevenlabs.client import ElevenLabs, \
- AsyncElevenLabs
IN_GITHUB = "GITHUB_ACTIONS" in os.environ
-client = ElevenLabs()
-
-async_client = AsyncElevenLabs()
+DEFAULT_VOICE = "21m00Tcm4TlvDq8ikWAM"
+DEFAULT_TEXT = "Hello"
+DEFAULT_MODEL = "eleven_multilingual_v2"
+DEFAULT_VOICE_FILE = "tests/fixtures/voice_sample.mp3"
def as_local_files(urls: Sequence[str]) -> Generator[str, None, None]:
@@ -25,4 +24,4 @@ def as_local_files(urls: Sequence[str]) -> Generator[str, None, None]:
yield temp_file.name
# Remove the files
for temp_file in temp_files:
- temp_file.close()
\ No newline at end of file
+ temp_file.close()