From 01b74c894d6ed4190900cf45f14e9e6b55fd3eb6 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:35:21 +0200 Subject: [PATCH 01/45] SDK regeneration (#375) Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com> --- poetry.lock | 405 +++++++++--------- pyproject.toml | 2 +- reference.md | 6 +- src/elevenlabs/audio_native/client.py | 44 +- src/elevenlabs/core/client_wrapper.py | 14 +- src/elevenlabs/core/http_client.py | 46 +- src/elevenlabs/core/pydantic_utilities.py | 29 +- src/elevenlabs/core/serialization.py | 18 + src/elevenlabs/dubbing/client.py | 36 +- src/elevenlabs/environment.py | 1 + src/elevenlabs/projects/client.py | 36 +- .../pronunciation_dictionary/client.py | 12 +- src/elevenlabs/speech_to_speech/client.py | 24 +- src/elevenlabs/text_to_speech/client.py | 12 +- src/elevenlabs/voices/client.py | 32 +- 15 files changed, 392 insertions(+), 325 deletions(-) diff --git a/poetry.lock b/poetry.lock index f51b9619..9df66569 100644 --- a/poetry.lock +++ b/poetry.lock @@ -16,13 +16,13 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "4.4.0" +version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, + {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, ] [package.dependencies] @@ -32,9 +32,9 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "certifi" @@ -49,101 +49,116 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -184,13 +199,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.5" +version = "1.0.6" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, ] [package.dependencies] @@ -201,7 +216,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" @@ -586,24 +601,24 @@ files = [ [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] name = "types-python-dateutil" -version = "2.9.0.20240906" +version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, - {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] [[package]] @@ -636,97 +651,97 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "websockets" -version = "13.0.1" +version = "13.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.8" files = [ - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1841c9082a3ba4a05ea824cf6d99570a6a2d8849ef0db16e9c826acb28089e8f"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c5870b4a11b77e4caa3937142b650fbbc0914a3e07a0cf3131f35c0587489c1c"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f1d3d1f2eb79fe7b0fb02e599b2bf76a7619c79300fc55f0b5e2d382881d4f7f"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c7d62ee071fa94a2fc52c2b472fed4af258d43f9030479d9c4a2de885fd543"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6724b554b70d6195ba19650fef5759ef11346f946c07dbbe390e039bcaa7cc3d"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a952fa2ae57a42ba7951e6b2605e08a24801a4931b5644dfc68939e041bc7f"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17118647c0ea14796364299e942c330d72acc4b248e07e639d34b75067b3cdd8"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64a11aae1de4c178fa653b07d90f2fb1a2ed31919a5ea2361a38760192e1858b"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0617fd0b1d14309c7eab6ba5deae8a7179959861846cbc5cb528a7531c249448"}, - {file = "websockets-13.0.1-cp310-cp310-win32.whl", hash = "sha256:11f9976ecbc530248cf162e359a92f37b7b282de88d1d194f2167b5e7ad80ce3"}, - {file = "websockets-13.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c3c493d0e5141ec055a7d6809a28ac2b88d5b878bb22df8c621ebe79a61123d0"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:699ba9dd6a926f82a277063603fc8d586b89f4cb128efc353b749b641fcddda7"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cf2fae6d85e5dc384bf846f8243ddaa9197f3a1a70044f59399af001fd1f51d4"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52aed6ef21a0f1a2a5e310fb5c42d7555e9c5855476bbd7173c3aa3d8a0302f2"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb2b9a318542153674c6e377eb8cb9ca0fc011c04475110d3477862f15d29f0"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5df891c86fe68b2c38da55b7aea7095beca105933c697d719f3f45f4220a5e0e"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2d146ff30d9dd2fcf917e5d147db037a5c573f0446c564f16f1f94cf87462"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b8ac5b46fd798bbbf2ac6620e0437c36a202b08e1f827832c4bf050da081b501"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:46af561eba6f9b0848b2c9d2427086cabadf14e0abdd9fde9d72d447df268418"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b5a06d7f60bc2fc378a333978470dfc4e1415ee52f5f0fce4f7853eb10c1e9df"}, - {file = "websockets-13.0.1-cp311-cp311-win32.whl", hash = "sha256:556e70e4f69be1082e6ef26dcb70efcd08d1850f5d6c5f4f2bcb4e397e68f01f"}, - {file = "websockets-13.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:67494e95d6565bf395476e9d040037ff69c8b3fa356a886b21d8422ad86ae075"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f9c9e258e3d5efe199ec23903f5da0eeaad58cf6fccb3547b74fd4750e5ac47a"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6b41a1b3b561f1cba8321fb32987552a024a8f67f0d05f06fcf29f0090a1b956"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f73e676a46b0fe9426612ce8caeca54c9073191a77c3e9d5c94697aef99296af"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f613289f4a94142f914aafad6c6c87903de78eae1e140fa769a7385fb232fdf"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f52504023b1480d458adf496dc1c9e9811df4ba4752f0bc1f89ae92f4f07d0c"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:139add0f98206cb74109faf3611b7783ceafc928529c62b389917a037d4cfdf4"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47236c13be337ef36546004ce8c5580f4b1150d9538b27bf8a5ad8edf23ccfab"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c44ca9ade59b2e376612df34e837013e2b273e6c92d7ed6636d0556b6f4db93d"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9bbc525f4be3e51b89b2a700f5746c2a6907d2e2ef4513a8daafc98198b92237"}, - {file = "websockets-13.0.1-cp312-cp312-win32.whl", hash = "sha256:3624fd8664f2577cf8de996db3250662e259bfbc870dd8ebdcf5d7c6ac0b5185"}, - {file = "websockets-13.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0513c727fb8adffa6d9bf4a4463b2bade0186cbd8c3604ae5540fae18a90cb99"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1ee4cc030a4bdab482a37462dbf3ffb7e09334d01dd37d1063be1136a0d825fa"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dbb0b697cc0655719522406c059eae233abaa3243821cfdfab1215d02ac10231"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:acbebec8cb3d4df6e2488fbf34702cbc37fc39ac7abf9449392cefb3305562e9"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63848cdb6fcc0bf09d4a155464c46c64ffdb5807ede4fb251da2c2692559ce75"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:872afa52a9f4c414d6955c365b6588bc4401272c629ff8321a55f44e3f62b553"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e70fec7c54aad4d71eae8e8cab50525e899791fc389ec6f77b95312e4e9920"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e82db3756ccb66266504f5a3de05ac6b32f287faacff72462612120074103329"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4e85f46ce287f5c52438bb3703d86162263afccf034a5ef13dbe4318e98d86e7"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f3fea72e4e6edb983908f0db373ae0732b275628901d909c382aae3b592589f2"}, - {file = "websockets-13.0.1-cp313-cp313-win32.whl", hash = "sha256:254ecf35572fca01a9f789a1d0f543898e222f7b69ecd7d5381d8d8047627bdb"}, - {file = "websockets-13.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:ca48914cdd9f2ccd94deab5bcb5ac98025a5ddce98881e5cce762854a5de330b"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b74593e9acf18ea5469c3edaa6b27fa7ecf97b30e9dabd5a94c4c940637ab96e"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:132511bfd42e77d152c919147078460c88a795af16b50e42a0bd14f0ad71ddd2"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:165bedf13556f985a2aa064309baa01462aa79bf6112fbd068ae38993a0e1f1b"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e801ca2f448850685417d723ec70298feff3ce4ff687c6f20922c7474b4746ae"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30d3a1f041360f029765d8704eae606781e673e8918e6b2c792e0775de51352f"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67648f5e50231b5a7f6d83b32f9c525e319f0ddc841be0de64f24928cd75a603"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4f0426d51c8f0926a4879390f53c7f5a855e42d68df95fff6032c82c888b5f36"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ef48e4137e8799998a343706531e656fdec6797b80efd029117edacb74b0a10a"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:249aab278810bee585cd0d4de2f08cfd67eed4fc75bde623be163798ed4db2eb"}, - {file = "websockets-13.0.1-cp38-cp38-win32.whl", hash = "sha256:06c0a667e466fcb56a0886d924b5f29a7f0886199102f0a0e1c60a02a3751cb4"}, - {file = "websockets-13.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1f3cf6d6ec1142412d4535adabc6bd72a63f5f148c43fe559f06298bc21953c9"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1fa082ea38d5de51dd409434edc27c0dcbd5fed2b09b9be982deb6f0508d25bc"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a365bcb7be554e6e1f9f3ed64016e67e2fa03d7b027a33e436aecf194febb63"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:10a0dc7242215d794fb1918f69c6bb235f1f627aaf19e77f05336d147fce7c37"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59197afd478545b1f73367620407b0083303569c5f2d043afe5363676f2697c9"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d20516990d8ad557b5abeb48127b8b779b0b7e6771a265fa3e91767596d7d97"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1a2e272d067030048e1fe41aa1ec8cfbbaabce733b3d634304fa2b19e5c897f"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ad327ac80ba7ee61da85383ca8822ff808ab5ada0e4a030d66703cc025b021c4"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:518f90e6dd089d34eaade01101fd8a990921c3ba18ebbe9b0165b46ebff947f0"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68264802399aed6fe9652e89761031acc734fc4c653137a5911c2bfa995d6d6d"}, - {file = "websockets-13.0.1-cp39-cp39-win32.whl", hash = "sha256:a5dc0c42ded1557cc7c3f0240b24129aefbad88af4f09346164349391dea8e58"}, - {file = "websockets-13.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b448a0690ef43db5ef31b3a0d9aea79043882b4632cfc3eaab20105edecf6097"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:faef9ec6354fe4f9a2c0bbb52fb1ff852effc897e2a4501e25eb3a47cb0a4f89"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:03d3f9ba172e0a53e37fa4e636b86cc60c3ab2cfee4935e66ed1d7acaa4625ad"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d450f5a7a35662a9b91a64aefa852f0c0308ee256122f5218a42f1d13577d71e"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f55b36d17ac50aa8a171b771e15fbe1561217510c8768af3d546f56c7576cdc"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14b9c006cac63772b31abbcd3e3abb6228233eec966bf062e89e7fa7ae0b7333"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b79915a1179a91f6c5f04ece1e592e2e8a6bd245a0e45d12fd56b2b59e559a32"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f40de079779acbcdbb6ed4c65af9f018f8b77c5ec4e17a4b737c05c2db554491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80e4ba642fc87fa532bac07e5ed7e19d56940b6af6a8c61d4429be48718a380f"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a02b0161c43cc9e0232711eff846569fad6ec836a7acab16b3cf97b2344c060"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6aa74a45d4cdc028561a7d6ab3272c8b3018e23723100b12e58be9dfa5a24491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00fd961943b6c10ee6f0b1130753e50ac5dcd906130dcd77b0003c3ab797d026"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d93572720d781331fb10d3da9ca1067817d84ad1e7c31466e9f5e59965618096"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:71e6e5a3a3728886caee9ab8752e8113670936a193284be9d6ad2176a137f376"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c4a6343e3b0714e80da0b0893543bf9a5b5fa71b846ae640e56e9abc6fbc4c83"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a678532018e435396e37422a95e3ab87f75028ac79570ad11f5bf23cd2a7d8c"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6716c087e4aa0b9260c4e579bb82e068f84faddb9bfba9906cb87726fa2e870"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e33505534f3f673270dd67f81e73550b11de5b538c56fe04435d63c02c3f26b5"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acab3539a027a85d568c2573291e864333ec9d912675107d6efceb7e2be5d980"}, - {file = "websockets-13.0.1-py3-none-any.whl", hash = "sha256:b80f0c51681c517604152eb6a572f5a9378f877763231fddb883ba2f968e8817"}, - {file = "websockets-13.0.1.tar.gz", hash = "sha256:4d6ece65099411cfd9a48d13701d7438d9c34f479046b34c50ff60bb8834e43e"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] [metadata] diff --git a/pyproject.toml b/pyproject.toml index c9d0bb26..416dde7a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.9.0" +version = "1.10.0" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index d5a0de04..682c52cc 100644 --- a/reference.md +++ b/reference.md @@ -803,9 +803,9 @@ client.text_to_speech.convert( output_format="mp3_22050_32", text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", voice_settings=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, + stability=0.5, + similarity_boost=0.75, + style=0.0, ), ) diff --git a/src/elevenlabs/audio_native/client.py b/src/elevenlabs/audio_native/client.py index e0f7768e..fa998d8d 100644 --- a/src/elevenlabs/audio_native/client.py +++ b/src/elevenlabs/audio_native/client.py @@ -24,17 +24,17 @@ def create( self, *, name: str, - image: typing.Optional[str] = None, - author: typing.Optional[str] = None, - title: typing.Optional[str] = None, - small: typing.Optional[bool] = None, - text_color: typing.Optional[str] = None, - background_color: typing.Optional[str] = None, - sessionization: typing.Optional[int] = None, - voice_id: typing.Optional[str] = None, - model_id: typing.Optional[str] = None, - file: typing.Optional[core.File] = None, - auto_convert: typing.Optional[bool] = None, + image: typing.Optional[str] = OMIT, + author: typing.Optional[str] = OMIT, + title: typing.Optional[str] = OMIT, + small: typing.Optional[bool] = OMIT, + text_color: typing.Optional[str] = OMIT, + background_color: typing.Optional[str] = OMIT, + sessionization: typing.Optional[int] = OMIT, + voice_id: typing.Optional[str] = OMIT, + model_id: typing.Optional[str] = OMIT, + file: typing.Optional[core.File] = OMIT, + auto_convert: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AudioNativeCreateProjectResponseModel: """ @@ -152,17 +152,17 @@ async def create( self, *, name: str, - image: typing.Optional[str] = None, - author: typing.Optional[str] = None, - title: typing.Optional[str] = None, - small: typing.Optional[bool] = None, - text_color: typing.Optional[str] = None, - background_color: typing.Optional[str] = None, - sessionization: typing.Optional[int] = None, - voice_id: typing.Optional[str] = None, - model_id: typing.Optional[str] = None, - file: typing.Optional[core.File] = None, - auto_convert: typing.Optional[bool] = None, + image: typing.Optional[str] = OMIT, + author: typing.Optional[str] = OMIT, + title: typing.Optional[str] = OMIT, + small: typing.Optional[bool] = OMIT, + text_color: typing.Optional[str] = OMIT, + background_color: typing.Optional[str] = OMIT, + sessionization: typing.Optional[int] = OMIT, + voice_id: typing.Optional[str] = OMIT, + model_id: typing.Optional[str] = OMIT, + file: typing.Optional[core.File] = OMIT, + auto_convert: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AudioNativeCreateProjectResponseModel: """ diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 6b0813a8..633805d0 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.9.0", + "X-Fern-SDK-Version": "1.10.0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key @@ -41,9 +41,9 @@ def __init__( super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = HttpClient( httpx_client=httpx_client, - base_headers=self.get_headers(), - base_timeout=self.get_timeout(), - base_url=self.get_base_url(), + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, ) @@ -59,7 +59,7 @@ def __init__( super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = AsyncHttpClient( httpx_client=httpx_client, - base_headers=self.get_headers(), - base_timeout=self.get_timeout(), - base_url=self.get_base_url(), + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, ) diff --git a/src/elevenlabs/core/http_client.py b/src/elevenlabs/core/http_client.py index b07401b5..eb4e8943 100644 --- a/src/elevenlabs/core/http_client.py +++ b/src/elevenlabs/core/http_client.py @@ -152,9 +152,9 @@ def __init__( self, *, httpx_client: httpx.Client, - base_timeout: typing.Optional[float], - base_headers: typing.Dict[str, str], - base_url: typing.Optional[str] = None, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, ): self.base_url = base_url self.base_timeout = base_timeout @@ -162,7 +162,10 @@ def __init__( self.httpx_client = httpx_client def get_base_url(https://github.com/self,%20maybe_base_url:%20typing.Optional[str]) -> str: - base_url = self.base_url if maybe_base_url is None else maybe_base_url + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + if base_url is None: raise ValueError("A base_url is required to make this request, please provide one and try again.") return base_url @@ -187,7 +190,7 @@ def request( timeout = ( request_options.get("timeout_in_seconds") if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout + else self.base_timeout() ) json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) @@ -198,7 +201,7 @@ def request( headers=jsonable_encoder( remove_none_from_dict( { - **self.base_headers, + **self.base_headers(), **(headers if headers is not None else {}), **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), } @@ -224,7 +227,9 @@ def request( json=json_body, data=data_body, content=content, - files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None, + files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) + if (files is not None and files is not omit) + else None, timeout=timeout, ) @@ -269,7 +274,7 @@ def stream( timeout = ( request_options.get("timeout_in_seconds") if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout + else self.base_timeout() ) json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) @@ -280,7 +285,7 @@ def stream( headers=jsonable_encoder( remove_none_from_dict( { - **self.base_headers, + **self.base_headers(), **(headers if headers is not None else {}), **(request_options.get("additional_headers", {}) if request_options is not None else {}), } @@ -306,7 +311,9 @@ def stream( json=json_body, data=data_body, content=content, - files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None, + files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) + if (files is not None and files is not omit) + else None, timeout=timeout, ) as stream: yield stream @@ -317,9 +324,9 @@ def __init__( self, *, httpx_client: httpx.AsyncClient, - base_timeout: typing.Optional[float], - base_headers: typing.Dict[str, str], - base_url: typing.Optional[str] = None, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, ): self.base_url = base_url self.base_timeout = base_timeout @@ -327,7 +334,10 @@ def __init__( self.httpx_client = httpx_client def get_base_url(https://github.com/self,%20maybe_base_url:%20typing.Optional[str]) -> str: - base_url = self.base_url if maybe_base_url is None else maybe_base_url + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + if base_url is None: raise ValueError("A base_url is required to make this request, please provide one and try again.") return base_url @@ -352,7 +362,7 @@ async def request( timeout = ( request_options.get("timeout_in_seconds") if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout + else self.base_timeout() ) json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) @@ -364,7 +374,7 @@ async def request( headers=jsonable_encoder( remove_none_from_dict( { - **self.base_headers, + **self.base_headers(), **(headers if headers is not None else {}), **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), } @@ -434,7 +444,7 @@ async def stream( timeout = ( request_options.get("timeout_in_seconds") if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout + else self.base_timeout() ) json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) @@ -445,7 +455,7 @@ async def stream( headers=jsonable_encoder( remove_none_from_dict( { - **self.base_headers, + **self.base_headers(), **(headers if headers is not None else {}), **(request_options.get("additional_headers", {}) if request_options is not None else {}), } diff --git a/src/elevenlabs/core/pydantic_utilities.py b/src/elevenlabs/core/pydantic_utilities.py index c14b4828..ee8f0e41 100644 --- a/src/elevenlabs/core/pydantic_utilities.py +++ b/src/elevenlabs/core/pydantic_utilities.py @@ -152,7 +152,7 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: ) else: - _fields_set = self.__fields_set__ + _fields_set = self.__fields_set__.copy() fields = _get_model_fields(self.__class__) for name, field in fields.items(): @@ -162,9 +162,12 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: # If the default values are non-null act like they've been set # This effectively allows exclude_unset to work like exclude_none where # the latter passes through intentionally set none values. - if default != None: + if default is not None or ("exclude_unset" in kwargs and not kwargs["exclude_unset"]): _fields_set.add(name) + if default is not None: + self.__fields_set__.add(name) + kwargs_with_defaults_exclude_unset_include_fields: typing.Any = { "by_alias": True, "exclude_unset": True, @@ -177,13 +180,33 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write") +def _union_list_of_pydantic_dicts( + source: typing.List[typing.Any], destination: typing.List[typing.Any] +) -> typing.List[typing.Any]: + converted_list: typing.List[typing.Any] = [] + for i, item in enumerate(source): + destination_value = destination[i] # type: ignore + if isinstance(item, dict): + converted_list.append(deep_union_pydantic_dicts(item, destination_value)) + elif isinstance(item, list): + converted_list.append(_union_list_of_pydantic_dicts(item, destination_value)) + else: + converted_list.append(item) + return converted_list + + def deep_union_pydantic_dicts( source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any] ) -> typing.Dict[str, typing.Any]: for key, value in source.items(): + node = destination.setdefault(key, {}) if isinstance(value, dict): - node = destination.setdefault(key, {}) deep_union_pydantic_dicts(value, node) + # Note: we do not do this same processing for sets given we do not have sets of models + # and given the sets are unordered, the processing of the set and matching objects would + # be non-trivial. + elif isinstance(value, list): + destination[key] = _union_list_of_pydantic_dicts(value, node) else: destination[key] = value diff --git a/src/elevenlabs/core/serialization.py b/src/elevenlabs/core/serialization.py index 5605f1b6..cb5dcbf9 100644 --- a/src/elevenlabs/core/serialization.py +++ b/src/elevenlabs/core/serialization.py @@ -71,6 +71,24 @@ def convert_and_respect_annotation_metadata( if typing_extensions.is_typeddict(clean_type) and isinstance(object_, typing.Mapping): return _convert_mapping(object_, clean_type, direction) + if ( + typing_extensions.get_origin(clean_type) == typing.Dict + or typing_extensions.get_origin(clean_type) == dict + or clean_type == typing.Dict + ) and isinstance(object_, typing.Dict): + key_type = typing_extensions.get_args(clean_type)[0] + value_type = typing_extensions.get_args(clean_type)[1] + + return { + key: convert_and_respect_annotation_metadata( + object_=value, + annotation=annotation, + inner_type=value_type, + direction=direction, + ) + for key, value in object_.items() + } + # If you're iterating on a string, do not bother to coerce it to a sequence. if not isinstance(object_, str): if ( diff --git a/src/elevenlabs/dubbing/client.py b/src/elevenlabs/dubbing/client.py index 3226750a..3fdeae18 100644 --- a/src/elevenlabs/dubbing/client.py +++ b/src/elevenlabs/dubbing/client.py @@ -29,15 +29,15 @@ def dub_a_video_or_an_audio_file( self, *, target_lang: str, - file: typing.Optional[core.File] = None, - name: typing.Optional[str] = None, - source_url: typing.Optional[str] = None, - source_lang: typing.Optional[str] = None, - num_speakers: typing.Optional[int] = None, - watermark: typing.Optional[bool] = None, - start_time: typing.Optional[int] = None, - end_time: typing.Optional[int] = None, - highest_resolution: typing.Optional[bool] = None, + file: typing.Optional[core.File] = OMIT, + name: typing.Optional[str] = OMIT, + source_url: typing.Optional[str] = OMIT, + source_lang: typing.Optional[str] = OMIT, + num_speakers: typing.Optional[int] = OMIT, + watermark: typing.Optional[bool] = OMIT, + start_time: typing.Optional[int] = OMIT, + end_time: typing.Optional[int] = OMIT, + highest_resolution: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DoDubbingResponse: """ @@ -401,15 +401,15 @@ async def dub_a_video_or_an_audio_file( self, *, target_lang: str, - file: typing.Optional[core.File] = None, - name: typing.Optional[str] = None, - source_url: typing.Optional[str] = None, - source_lang: typing.Optional[str] = None, - num_speakers: typing.Optional[int] = None, - watermark: typing.Optional[bool] = None, - start_time: typing.Optional[int] = None, - end_time: typing.Optional[int] = None, - highest_resolution: typing.Optional[bool] = None, + file: typing.Optional[core.File] = OMIT, + name: typing.Optional[str] = OMIT, + source_url: typing.Optional[str] = OMIT, + source_lang: typing.Optional[str] = OMIT, + num_speakers: typing.Optional[int] = OMIT, + watermark: typing.Optional[bool] = OMIT, + start_time: typing.Optional[int] = OMIT, + end_time: typing.Optional[int] = OMIT, + highest_resolution: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DoDubbingResponse: """ diff --git a/src/elevenlabs/environment.py b/src/elevenlabs/environment.py index dc68f9bc..37557cbf 100644 --- a/src/elevenlabs/environment.py +++ b/src/elevenlabs/environment.py @@ -5,3 +5,4 @@ class ElevenLabsEnvironment(enum.Enum): PRODUCTION = "https://api.elevenlabs.io/" + PRODUCTION_US = "https://api.us.elevenlabs.io/" diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py index 6f8ca56f..604abcb4 100644 --- a/src/elevenlabs/projects/client.py +++ b/src/elevenlabs/projects/client.py @@ -86,15 +86,15 @@ def add( default_title_voice_id: str, default_paragraph_voice_id: str, default_model_id: str, - from_url: typing.Optional[str] = None, - from_document: typing.Optional[core.File] = None, - quality_preset: typing.Optional[str] = None, - title: typing.Optional[str] = None, - author: typing.Optional[str] = None, - isbn_number: typing.Optional[str] = None, - acx_volume_normalization: typing.Optional[bool] = None, - volume_normalization: typing.Optional[bool] = None, - pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = None, + from_url: typing.Optional[str] = OMIT, + from_document: typing.Optional[core.File] = OMIT, + quality_preset: typing.Optional[str] = OMIT, + title: typing.Optional[str] = OMIT, + author: typing.Optional[str] = OMIT, + isbn_number: typing.Optional[str] = OMIT, + acx_volume_normalization: typing.Optional[bool] = OMIT, + volume_normalization: typing.Optional[bool] = OMIT, + pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddProjectResponseModel: """ @@ -835,15 +835,15 @@ async def add( default_title_voice_id: str, default_paragraph_voice_id: str, default_model_id: str, - from_url: typing.Optional[str] = None, - from_document: typing.Optional[core.File] = None, - quality_preset: typing.Optional[str] = None, - title: typing.Optional[str] = None, - author: typing.Optional[str] = None, - isbn_number: typing.Optional[str] = None, - acx_volume_normalization: typing.Optional[bool] = None, - volume_normalization: typing.Optional[bool] = None, - pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = None, + from_url: typing.Optional[str] = OMIT, + from_document: typing.Optional[core.File] = OMIT, + quality_preset: typing.Optional[str] = OMIT, + title: typing.Optional[str] = OMIT, + author: typing.Optional[str] = OMIT, + isbn_number: typing.Optional[str] = OMIT, + acx_volume_normalization: typing.Optional[bool] = OMIT, + volume_normalization: typing.Optional[bool] = OMIT, + pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddProjectResponseModel: """ diff --git a/src/elevenlabs/pronunciation_dictionary/client.py b/src/elevenlabs/pronunciation_dictionary/client.py index 84a4c4c9..536cb817 100644 --- a/src/elevenlabs/pronunciation_dictionary/client.py +++ b/src/elevenlabs/pronunciation_dictionary/client.py @@ -36,9 +36,9 @@ def add_from_file( self, *, name: str, - file: typing.Optional[core.File] = None, - description: typing.Optional[str] = None, - workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = None, + file: typing.Optional[core.File] = OMIT, + description: typing.Optional[str] = OMIT, + workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddPronunciationDictionaryResponseModel: """ @@ -465,9 +465,9 @@ async def add_from_file( self, *, name: str, - file: typing.Optional[core.File] = None, - description: typing.Optional[str] = None, - workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = None, + file: typing.Optional[core.File] = OMIT, + description: typing.Optional[str] = OMIT, + workspace_access: typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddPronunciationDictionaryResponseModel: """ diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py index ecd7cd4e..5aa8427f 100644 --- a/src/elevenlabs/speech_to_speech/client.py +++ b/src/elevenlabs/speech_to_speech/client.py @@ -30,9 +30,9 @@ def convert( enable_logging: typing.Optional[bool] = None, optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, output_format: typing.Optional[OutputFormat] = None, - model_id: typing.Optional[str] = None, - voice_settings: typing.Optional[str] = None, - seed: typing.Optional[int] = None, + model_id: typing.Optional[str] = OMIT, + voice_settings: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[bytes]: """ @@ -134,9 +134,9 @@ def convert_as_stream( enable_logging: typing.Optional[OptimizeStreamingLatency] = None, optimize_streaming_latency: typing.Optional[OutputFormat] = None, output_format: typing.Optional[str] = None, - model_id: typing.Optional[str] = None, - voice_settings: typing.Optional[str] = None, - seed: typing.Optional[int] = None, + model_id: typing.Optional[str] = OMIT, + voice_settings: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[bytes]: """ @@ -254,9 +254,9 @@ async def convert( enable_logging: typing.Optional[bool] = None, optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, output_format: typing.Optional[OutputFormat] = None, - model_id: typing.Optional[str] = None, - voice_settings: typing.Optional[str] = None, - seed: typing.Optional[int] = None, + model_id: typing.Optional[str] = OMIT, + voice_settings: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[bytes]: """ @@ -366,9 +366,9 @@ async def convert_as_stream( enable_logging: typing.Optional[OptimizeStreamingLatency] = None, optimize_streaming_latency: typing.Optional[OutputFormat] = None, output_format: typing.Optional[str] = None, - model_id: typing.Optional[str] = None, - voice_settings: typing.Optional[str] = None, - seed: typing.Optional[int] = None, + model_id: typing.Optional[str] = OMIT, + voice_settings: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[bytes]: """ diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py index 95d17fa3..87e8193e 100644 --- a/src/elevenlabs/text_to_speech/client.py +++ b/src/elevenlabs/text_to_speech/client.py @@ -113,9 +113,9 @@ def convert( output_format="mp3_22050_32", text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", voice_settings=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, + stability=0.5, + similarity_boost=0.75, + style=0.0, ), ) """ @@ -688,9 +688,9 @@ async def main() -> None: output_format="mp3_22050_32", text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", voice_settings=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, + stability=0.5, + similarity_boost=0.75, + style=0.0, ), ) diff --git a/src/elevenlabs/voices/client.py b/src/elevenlabs/voices/client.py index 747515c7..40f3f645 100644 --- a/src/elevenlabs/voices/client.py +++ b/src/elevenlabs/voices/client.py @@ -387,8 +387,8 @@ def add( *, name: str, files: typing.List[core.File], - description: typing.Optional[str] = None, - labels: typing.Optional[str] = None, + description: typing.Optional[str] = OMIT, + labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddVoiceResponseModel: """ @@ -470,9 +470,9 @@ def edit( voice_id: str, *, name: str, - files: typing.Optional[typing.List[core.File]] = None, - description: typing.Optional[str] = None, - labels: typing.Optional[str] = None, + files: typing.Optional[typing.List[core.File]] = OMIT, + description: typing.Optional[str] = OMIT, + labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ @@ -770,9 +770,9 @@ def get_shared( def get_similar_library_voices( self, *, - audio_file: typing.Optional[core.File] = None, - similarity_threshold: typing.Optional[float] = None, - top_k: typing.Optional[int] = None, + audio_file: typing.Optional[core.File] = OMIT, + similarity_threshold: typing.Optional[float] = OMIT, + top_k: typing.Optional[int] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> GetLibraryVoicesResponse: """ @@ -1317,8 +1317,8 @@ async def add( *, name: str, files: typing.List[core.File], - description: typing.Optional[str] = None, - labels: typing.Optional[str] = None, + description: typing.Optional[str] = OMIT, + labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddVoiceResponseModel: """ @@ -1408,9 +1408,9 @@ async def edit( voice_id: str, *, name: str, - files: typing.Optional[typing.List[core.File]] = None, - description: typing.Optional[str] = None, - labels: typing.Optional[str] = None, + files: typing.Optional[typing.List[core.File]] = OMIT, + description: typing.Optional[str] = OMIT, + labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ @@ -1732,9 +1732,9 @@ async def main() -> None: async def get_similar_library_voices( self, *, - audio_file: typing.Optional[core.File] = None, - similarity_threshold: typing.Optional[float] = None, - top_k: typing.Optional[int] = None, + audio_file: typing.Optional[core.File] = OMIT, + similarity_threshold: typing.Optional[float] = OMIT, + top_k: typing.Optional[int] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> GetLibraryVoicesResponse: """ From 5f8e82729f3d21e66b7333c559de2e6a25b2e51a Mon Sep 17 00:00:00 2001 From: louisjoecodes Date: Tue, 15 Oct 2024 12:07:18 +0100 Subject: [PATCH 02/45] feat: add issue templates --- .github/ISSUE_TEMPLATE/1-bug_report.yml | 51 +++++++++++++++++++ .github/ISSUE_TEMPLATE/2-feature_request.yml | 52 ++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 5 ++ 3 files changed, 108 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/1-bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/2-feature_request.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/1-bug_report.yml b/.github/ISSUE_TEMPLATE/1-bug_report.yml new file mode 100644 index 00000000..03214f09 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1-bug_report.yml @@ -0,0 +1,51 @@ +name: Bug report +description: Create a bug report for the ElevenLabs Python SDK +labels: [bug] +body: + - type: markdown + attributes: + value: | + Thank you for reporting a bug with the **ElevenLabs Python SDK**. Please fill out the sections below to help us understand and resolve the issue. + + **Note:** The ElevenLabs Python SDK is **auto-generated from [Fern](https://www.buildwithfern.com/)** and is a wrapper around our OpenAPI specification. Direct modifications to the SDK code may be overwritten in future releases. + + - type: textarea + attributes: + label: Description + description: | + **Describe the bug in detail and provide clear steps to reproduce it.** + + Include information such as: + - What you were trying to achieve. + - What happened instead. + - Any relevant parameters or configurations. + placeholder: | + **Steps to reproduce:** + 1. ... + 2. ... + + **Expected behavior:** + + **Actual behavior:** + validations: + required: true + - type: textarea + attributes: + label: Code example + description: Provide an example code snippet that has the problem + placeholder: | + ```python + from elevenlabs import generate, play + + # Your code here + ``` + - type: textarea + attributes: + label: Additional context + description: | + Add any other context or screenshots about the problem here. + placeholder: | + - Related issues: + - Possible workaround: + - Logs + - ... diff --git a/.github/ISSUE_TEMPLATE/2-feature_request.yml b/.github/ISSUE_TEMPLATE/2-feature_request.yml new file mode 100644 index 00000000..1f6384f4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2-feature_request.yml @@ -0,0 +1,52 @@ +name: Feature Request +description: Propose a new feature for the ElevenLabs Python SDK +labels: [enhancement] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to propose a new feature for the **ElevenLabs Python SDK**. Please provide detailed information below to help us understand your proposal. + + **Note:** The ElevenLabs Python SDK is **auto-generated from [Fern](https://fern.dev)** and is a wrapper around our OpenAPI specification. Additions made directly to the SDK code may be overwritten in future releases. We recommend opening an issue to discuss your feature request so we can explore how it fits into our API. + + - type: textarea + attributes: + label: Feature Description + description: A detailed description of the feature you are proposing for the SDK. Include information such as the problem it solves, how it would work, and any relevant APIs or modules it would involve. + placeholder: | + **Feature description:** + + - What is the feature? + - What problem does it solve? + - How do you envision it working? + validations: + required: false + - type: textarea + attributes: + label: Use Case + description: Provide one or more use cases where this feature would be beneficial. + placeholder: | + **Use case:** + + - Describe a scenario where this feature would be useful. + - type: textarea + attributes: + label: Alternatives Considered + description: Describe any alternative solutions or features you've considered. + placeholder: | + **Alternatives considered:** + + - Other ways to solve the problem? + - Why do these alternatives fall short? + validations: + required: false + - type: textarea + attributes: + label: Additional Context + description: Any extra information, references, or screenshots that might help us understand your feature request. + placeholder: | + - Related issues or discussions: + - Relevant links: + - Screenshots or mockups: + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..86bca335 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: true +contact_links: + - name: Ask a question (Discord) + url: https://discord.com/invite/elevenlabs + about: Please ask questions in our discussions forum. From 0ed3f59b419a8e80f00fa987fff46ee07638534f Mon Sep 17 00:00:00 2001 From: louisjoecodes Date: Wed, 16 Oct 2024 11:07:06 +0100 Subject: [PATCH 03/45] fix: add api key warning --- .github/ISSUE_TEMPLATE/1-bug_report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/1-bug_report.yml b/.github/ISSUE_TEMPLATE/1-bug_report.yml index 03214f09..2f879fc0 100644 --- a/.github/ISSUE_TEMPLATE/1-bug_report.yml +++ b/.github/ISSUE_TEMPLATE/1-bug_report.yml @@ -32,7 +32,7 @@ body: - type: textarea attributes: label: Code example - description: Provide an example code snippet that has the problem + description: Provide an example code snippet that has the problem (Make sure to **NOT** upload or expose your API key). placeholder: | ```python from elevenlabs import generate, play From 599addddc7d73539c7936fbbb892bd3d04379500 Mon Sep 17 00:00:00 2001 From: louisjoecodes Date: Wed, 16 Oct 2024 21:15:43 +0100 Subject: [PATCH 04/45] chore: update available api docs --- README.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 884c8df7..edba12ad 100644 --- a/README.md +++ b/README.md @@ -105,7 +105,19 @@ save(audio, "my-file.mp3") [![Open in Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue.svg)](https://huggingface.co/spaces/elevenlabs/tts) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/flavioschneider/49468d728a816c6538fd2f56b3b50b96/elevenlabs-python.ipynb) -We support two main models: the newest `eleven_multilingual_v2`, a single foundational model supporting 29 languages including English, Chinese, Spanish, Hindi, Portuguese, French, German, Japanese, Arabic, Korean, Indonesian, Italian, Dutch, Turkish, Polish, Swedish, Filipino, Malay, Russian, Romanian, Ukrainian, Greek, Czech, Danish, Finnish, Bulgarian, Croatian, Slovak, and Tamil; and `eleven_monolingual_v1`, a low-latency model specifically trained for English speech. +### Main Models + +1. **Eleven Multilingual v2** (`eleven_multilingual_v2`) + - Excels in stability, language diversity, and accent accuracy + - Supports 29 languages + - Recommended for most use cases + +2. **Eleven Turbo v2.5** (`eleven_turbo_v2_5`) + - High quality, lowest latency + - Ideal for developer use cases where speed is crucial + - Supports 32 languages + +For more detailed information about these models and others, visit the [ElevenLabs Models documentation](https://elevenlabs.io/docs/speech-synthesis/models). ```py from elevenlabs import play From 3342e41a315233f283a1755af8d58e7e5d3136b7 Mon Sep 17 00:00:00 2001 From: louisjoecodes Date: Wed, 16 Oct 2024 17:06:00 +0100 Subject: [PATCH 05/45] feat: add security note --- .github/SECURITY.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/SECURITY.md diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 00000000..281d74f9 --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,11 @@ +# Reporting Security Issues + +If you believe you have found a security vulnerability, we encourage you to let us know right away. + +We will investigate all legitimate reports and do our best to quickly fix the problem. + +Email `security@elevenlabs.io` to disclose any security vulnerabilities. + +## Bug Bounty Program + +Please note that ElevenLabs does not offer bug bounties or participate in any bug bounty program. This policy is in place to comply with anti-money laundering laws and regulations. From 126828936033dcf8075101d321fe7036c9698b83 Mon Sep 17 00:00:00 2001 From: marcinn <01171345@pw.edu.pl> Date: Mon, 7 Oct 2024 00:42:19 +0200 Subject: [PATCH 06/45] rename 'Rachel' to 'Sarah' --- README.md | 2 +- src/elevenlabs/client.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index edba12ad..0299c67c 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ client = ElevenLabs( audio = client.generate( text="Hello! 你好! Hola! नमस्ते! Bonjour! こんにちは! مرحبا! 안녕하세요! Ciao! Cześć! Привіт! வணக்கம்!", - voice="Rachel", + voice="Sarah", model="eleven_multilingual_v2" ) play(audio) diff --git a/src/elevenlabs/client.py b/src/elevenlabs/client.py index a0dac65d..348e2884 100644 --- a/src/elevenlabs/client.py +++ b/src/elevenlabs/client.py @@ -19,7 +19,7 @@ DEFAULT_VOICE = Voice( voice_id="EXAVITQu4vr4xnSDxMaL", - name="Rachel", + name="Sarah", settings=VoiceSettings( stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True ), @@ -134,7 +134,7 @@ def generate( """ - text: Union[str, Iterator[str]]. The string or stream of strings that will get converted into speech. - - voice: str. A voice id, name, or voice response. Defaults to the Rachel voice. + - voice: str. A voice id, name, or voice response. Defaults to the Sarah voice. - model: typing.Optional[str]. Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the From f6bb2ed7e7b331dcbcf858664a586e6ae9a3ffaf Mon Sep 17 00:00:00 2001 From: Louis J <132601011+louisjoecodes@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:39:14 +0200 Subject: [PATCH 07/45] chore: update security policy --- .github/SECURITY.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/SECURITY.md b/.github/SECURITY.md index 281d74f9..129451f6 100644 --- a/.github/SECURITY.md +++ b/.github/SECURITY.md @@ -4,8 +4,6 @@ If you believe you have found a security vulnerability, we encourage you to let We will investigate all legitimate reports and do our best to quickly fix the problem. -Email `security@elevenlabs.io` to disclose any security vulnerabilities. - ## Bug Bounty Program -Please note that ElevenLabs does not offer bug bounties or participate in any bug bounty program. This policy is in place to comply with anti-money laundering laws and regulations. +Please note that ElevenLabs does not offer cash-based rewards for vulnerability reports at this time. From 24c75d2712217ff799cc98350d253e90a023c053 Mon Sep 17 00:00:00 2001 From: louisjoecodes Date: Thu, 17 Oct 2024 13:49:54 +0100 Subject: [PATCH 08/45] chore: minor readme updates --- README.md | 142 ++++++++-------------------------------------- assets/module.png | Bin 138552 -> 0 bytes 2 files changed, 23 insertions(+), 119 deletions(-) delete mode 100644 assets/module.png diff --git a/README.md b/README.md index 0299c67c..3e0e5805 100644 --- a/README.md +++ b/README.md @@ -20,94 +20,15 @@ Check out the [HTTP API documentation](https://elevenlabs.io/docs/api-reference) pip install elevenlabs ``` -## v0.x to v1.x Migration Guide -> The SDK was rewritten in v1 and is now programmatically generated from our OpenAPI spec. As part of this release -> there are some breaking changes. - - -### Client Instantiation -The SDK now exports a client class that you must instantiate to call various -endpoints in our API. - -```python -from elevenlabs.client import ElevenLabs - -client = ElevenLabs( - api_key="..." # Defaults to ELEVEN_API_KEY -) -``` -As part of this change, there is no longer a `set_api_key` and `get_api_key` method exported. - -### HTTPX -The SDK now uses httpx under the hood. This allows us to export an async client in addition to -a synchronous client. Note that you can pass in your own httpx client as well. - -```python -from elevenlabs.client import AsyncElevenLabs - -client = AsyncElevenLabs( - api_key="...", # Defaults to ELEVEN_API_KEY - httpx_client=httpx.AsyncClient(...) -) -``` - -### Removing Static Methods -There are no longer static methods exposed directly on objects. For example, -instead of `Models.from_api()` you can now do `client.models.get_all()`. - -The renames are specified below: - - `User.from_api()` -> `client.users.get()` - - `Models.from_api()` -> `client.models.get_all()` - - `Voices.from_api()` -> `client.voices.get_all()` - - `History.from_api()` -> `client.history.get_all()` - - -### Exported functions -The SDK no longer exports top level functions `generate`, `clone`, and `voices`. Instead, -everything is now directly attached to the client instance. - -#### `generate` -> `client.generate` - -The generate method is a helper function that makes it easier to consume the -text-to-speech APIs. If you'd rather access the raw APIs, simply use `client.text_to_speech`. - -#### `clone` -> `client.clone` - -The clone method is a helper function that wraps the voices add and -get APIs. If you'd rather access the raw APIs, simply use `client.voices.add()`. - -#### `voice` -> `client.voices.get_all()` - -To get all your voices, use `client.voices.get_all()`. - -#### `play`, `stream` and `save` - -The SDK continues to export the `play`, `stream` and `save` methods. Under the hood, these methods -use ffmpeg and mpv to play audio streams. - -```python -from elevenlabs import play, stream, save - -# plays audio using ffmpeg -play(audio) -# streams audio using mpv -stream(audio) -# saves audio to file -save(audio, "my-file.mp3") -``` - - ## 🗣️ Usage + [![Open in Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue.svg)](https://huggingface.co/spaces/elevenlabs/tts) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/flavioschneider/49468d728a816c6538fd2f56b3b50b96/elevenlabs-python.ipynb) ### Main Models 1. **Eleven Multilingual v2** (`eleven_multilingual_v2`) + - Excels in stability, language diversity, and accent accuracy - Supports 29 languages - Recommended for most use cases @@ -129,7 +50,7 @@ client = ElevenLabs( audio = client.generate( text="Hello! 你好! Hola! नमस्ते! Bonjour! こんにちは! مرحبا! 안녕하세요! Ciao! Cześć! Привіт! வணக்கம்!", - voice="Sarah", + voice="Brian", model="eleven_multilingual_v2" ) play(audio) @@ -137,15 +58,14 @@ play(audio)
Play - Don't forget to unmute the player! - -[audio (3).webm](https://github.com/elevenlabs/elevenlabs-python/assets/12028621/778fd3ed-0a3a-4d66-8f73-faee099dfdd6) + 🎧 **Try it out!** Want to hear our voices in action? Visit the [ElevenLabs Voice Lab](https://elevenlabs.io/voice-lab) to experiment with different voices, languages, and settings.
## 🗣️ Voices List all your available voices with `voices()`. + ```py from elevenlabs.client import ElevenLabs @@ -158,28 +78,9 @@ audio = client.generate(text="Hello there!", voice=response.voices[0]) print(response.voices) ``` -
Show output - -```py -[ - Voice( - voice_id='21m00Tcm4TlvDq8ikWAM', - name='Rachel', - category='premade', - settings=None, - ), - Voice( - voice_id='AZnzlk1XvdvUeBnXmlld', - name='Domi', - category='premade', - settings=None, - ), -] -``` - -
+For information about the structure of the voices output, please refer to the [official ElevenLabs API documentation for Get Voices](https://elevenlabs.io/docs/api-reference/get-voices). -Build a voice object with custom settings to personalize the voice style, or call +Build a voice object with custom settings to personalize the voice style, or call `client.voices.get_settings("your-voice-id")` to get the default settings for the voice. ```py @@ -191,9 +92,9 @@ client = ElevenLabs( ) audio = client.generate( - text="Hello! My name is Bella.", + text="Hello! My name is Brian.", voice=Voice( - voice_id='EXAVITQu4vr4xnSDxMaL', + voice_id='nPczCjzI2devNBz1zQrb', settings=VoiceSettings(stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True) ) ) @@ -247,10 +148,12 @@ stream(audio_stream) ``` Note that `generate` is a helper function. If you'd like to access -the raw method, simply use `client.text_to_speech.convert_as_stream`. +the raw method, simply use `client.text_to_speech.convert_as_stream`. ### Input streaming + Stream text chunks into audio as it's being generated, with <1s latency. Note: if chunks don't end with space or punctuation (" ", ".", "?", "!"), the stream will wait for more text. + ```py from elevenlabs.client import ElevenLabs from elevenlabs import stream @@ -265,8 +168,8 @@ def text_stream(): audio_stream = client.generate( text=text_stream(), - voice="Nicole", - model="eleven_monolingual_v1", + voice="Brian", + model="eleven_multilingual_v2", stream=True ) @@ -274,11 +177,11 @@ stream(audio_stream) ``` Note that `generate` is a helper function. If you'd like to access -the raw method, simply use `client.text_to_speech.convert_realtime`. +the raw method, simply use `client.text_to_speech.convert_realtime`. +## Async Client -## Async Client -Use `AsyncElevenLabs` if you want to make API calls asynchronously. +Use `AsyncElevenLabs` if you want to make API calls asynchronously. ```python import asyncio @@ -296,19 +199,20 @@ async def print_models() -> None: asyncio.run(print_models()) ``` -## Elevenlabs module -All of the ElevenLabs models are nested within the elevenlabs module. +## Elevenlabs Namespace + +All of the ElevenLabs models are nested within the elevenlabs module. -![Alt text](assets/module.png) + ## Languages Supported -We support 29 languages and 100+ accents. Explore [all languages](https://elevenlabs.io/languages). +We support 32 languages and 100+ accents. Explore [all languages](https://elevenlabs.io/languages). ## Contributing -While we value open-source contributions to this SDK, this library is generated programmatically. Additions made directly to this library would have to be moved over to our generation code, otherwise they would be overwritten upon the next generated release. Feel free to open a PR as a proof of concept, but know that we will not be able to merge it as-is. We suggest opening an issue first to discuss with us! +While we value open-source contributions to this SDK, this library is generated programmatically. Additions made directly to this library would have to be moved over to our generation code, otherwise they would be overwritten upon the next generated release. Feel free to open a PR as a proof of concept, but know that we will not be able to merge it as-is. We suggest opening an issue first to discuss with us! On the other hand, contributions to the README are always very welcome! diff --git a/assets/module.png b/assets/module.png deleted file mode 100644 index f5f151d962c13f947dcde034cf87d180292d9fdf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 138552 zcmeFYWmsIvwlE9?cY-y+A-FY~;Fe&)LvU-{T^e`yKyV2j!QGwUbZ~cfcY8ZCbMBov zH~0Mdp6}P~XYbxswX14ZtyQ(W!W88tF;HKi!oa{_NK3s@hJitlfPsN)Lq>#_=wW?f zgn>a9HWwFHlol7KP;{_0F}E^?fsqRPq=uyazK5;bxQ>;|_;%aa4_$gevhI6Lx3`E~*5Tdzk$f0gGlzQ5>*dE?rWpO_7 zIvx%-yTW`X-;#b=|AiLDZ57U3+l<1|Nro~094{7@)C`VAzjtlx-84IU9899$<6|(kKNBhfeSmxn z?(W+{KXc(UH3%g6EjVzuZP(Oq65wQz-cL9iOSw_@;a($CWkv09 zuZ5W|u6WcZgJ%8PIeS!2QJ_)iS8OeU@Q*nZlfKgo!gNAn#+()O5xCs?)StTh)@lP^ z%hO|GsO%aB5sk*a{^Vt@``G4Lj-f$ilYn}}(kp}feiO-Z;|%YlvtB8ua3lwrCqpPa2{wGP2%;rlGIE9Am*&E9q(ue~F&HcC2z+nQ6-PJy$>-Xa z#{iBTAMjRtf5^#Z2jkJx)jiBz21uM%xw{upfMe%bVwb)FHhUyMtf~{kUU(iUCU6Qy zUB`~#kQ*3tbQwq&h}j|D!7WC&G6}C#eqKl~=_mh%Jg5KdZA^VzadrVeS?HaZ^OwlI zmEblU*8uP7I^pwuOpRzp8#mQtVe=s6MaP&38G2>-ZcH}B?Tem&; z^oDZegRe}>woISW$t&1=t-BzJ({NVgE{Obo8J(DhQNCfMovCF!`=KOQB_%!K6Wq%z z@;C~Z<2VnUOKtI=OgWb7hwJBVA}UHnUcY^eMobU;_*GfSL=`q3qZL_U&0QM4pZ2UV27gp%yoHp2aZjI4U8m2jVvZ+hWw0OL>7ms3)Ws# zJ(T=;Fa@}AFxj#GKW2K1t=lZ^V!i;+<*^jfathG*}7 z!L*3f%LbiicPow=JhlI`_Ov#dNLr)kw74?J8-Xl=&jR1uu{q-`;)J~s%)uDJJ?zFa z;IV&Uk7LhbPk8dWJm5NXT9PUbT?+H1o-$XMBqPPA*vVLz*yuRc_y#HRUJIl56==MH z=`sZI5q%r$2kW`(1M7wB5InhX{O+{payW_w%880R?^78F8FUgLDlFeHCX!M`h6;wW zRTcNX7JhO4YCAqYJ~+`dZZPr2qH=<9+~s@52anH}N;+B#x*54bsyYQN>R#V(?Ju-; zZOyn+xmkP@Wtcv*6ikfDj~JgQxF=^; zlc}+(*@i9ZHt)G-1p}}^SU8v(uk*ZnHzYEsqrW#|iJy6g40xpc6IuuN%R>6$*7_*~kB0`&L7@8fo4#taJ}DGjnboJkt(R>qg9L@7ZjeS;lRT4slV`=m z#aT1kb9UC@R=ZY*v!hlI)^szI)4J8vv*r5^`;)Ue6_N)F2bKHZ_u2S9W0GM~g;Zmb zb|QB+1&4)9pf|33IqY>|b}Dl8T+Z9V+fmsc-#&M!+c+Ly83@iIdK{Wr;GJ@-Jj=kD zrS6c#i!mcRneAQ-I#S+nnE15$VwG>8y6@ssT0q)Q>x?-$*Wf$)tD8fXLp?zO!921H zvW3jVO!Lf%%vb7sMSw}FNewoCwa-(`rJcIu&4Nv6O;_$u8*AI~Yn2OIGg4`B9dT)r zLZ{qU8#gxR2j_;j`8U?bd*`ptwr-}bh0)89vEi*y!2LCbrj@Q`dWd{TGS9H!TeiTC zCU((CAkl2me9?pU+4ivZPwiLYgW??GyJGI*3*v=AWLVUJZv$D-8r!)yPwT7a7wsU& zL@UKOyErE}pK!3K52*9whH`%@t>mgkHV_-*(?nZDB96>& zEfhwK^GfhgR)E@D?EdoF6~zzb-Sfkjv$XxswLWf38^&A2FVZ2=uqhmp2zs7Jju0xN zzkkCZ_cd=kZ=B_;m!W@8(Tg$eNiVCu>Af|pA96&lHcczn^CyzDKk1HX_uo!{4FVR+hdZ6-9+*VWna1C-8UH5ulRS;TOC0Cu*V7EJ#6PvSCurhW% zrdg0Cm&N(ej(!;##Tl2f!>MJWF_`-5yQRzazO~~QM~BUO-F@w*rt5?G_4=W+$gQEw ztL>zpdb=`QGI=zwHtz=YT4D4kG=H}IbY4jfT9QG?-x^3C=pSfdUS;EZ+6&cV@;=Z@ zFR$|o>-p&XF6>?RyHd6@y&CKK$#~F(~fAEv==biZx62L%8WI^l7_$?Y>8^VN)<$kio~|w(2rvpt5s;av>=* z#%=LJ>~z&pp)X6*N42%etNy9fw{_NC^2(+8W6Q2j^LA8wTCwfIV$FH?`GQZ<)#2IQ zC>A1ljhB&^z!}5M*cc?fHMT{?v-)oKo@I4z;j!t_8%3f2{XMFFSq>2qOw%GN3=0Ab zqCl2X4?V;u+@Bs&gbM$NtD1>}-~G(j{rZV)?%9)#4NRE_r+~Mp*R48?ff>w8Fzon* za~Wp(1m2d68)b)MTX1sej@!=i65dA2SxB9aji2@e#{K*wnzOK{wsJ^WGgPApH&&N6 zk&}agsueKEFz~Q1U=W}wSm-STOZ;DH30QiVXMdE#!N7!=!@&PlM;`k8^^1kxzuNry zeD*OI1__#b4!yyj;r>+{LE`hXf2HBtpzmNr--}C2L!a*r9gK}_9L;QI2Z1gXrNob346*j!y*SzS3S6*m-$**?=5u92_jr8Z3@(HclWgi;W}opPl@x zpEt&ih7RU-PUf~Y6ui>^7|NX_kwN(G_mh7C|z`r;B+pB+X z`oYoILEP3F`b{UH|Hju}jsO1guZ99_zoz~-Q2Yt!KT4s17D5$Z`!A#kp&HN=s6+8c zV*W-!1^R@pvR^+~edr(kpHFBS7Jp3|md+doMg&Isji?G3c0U85>BHNpAA}3eJJ_r& z$rJ`m;X2c|yOxG@jMEDXq*S2XVH8ciUX zH;{c^)67bf0`2dr#gp$CG>1~TH#*SRxD@u6T3hdLwz9f2z3)$Y-hn{bMe5~ZhmA)q z22=Jqpw3{2A4I0fmgJtte)ktsdM2&5OD)P@m1%AcYM0h4Awry6=UIj^f~vDM)=X%G ztO=6jUMhP1vn_I+$19y1%~y->7#53b7d0;zJ#4>fR$B~u?M;;?`8?i8F1qirXf(PM z*Q*xFlky%lof&j=^IxBjDHtCww~sb^xSpX0Ha|Vy8Q+|4oY0qU=U6$Z%e1Kb<-WO12DfFRz}iFfCDrETLujX|yX;g_de$r#MeWw)n?BcCCAl}DEM355Ma z)lc_Z!lMn&mKtUHegc=%hMNww8xt*F9=x~PxpD$ng6FZ!8Wmk$dAudc`Qaqa<4XPl z2~29Nw1XAK1M6!s!qI(}vM>Rh^|kpulMC#t^0<~2Z4MDU>B8l(T@_>2H{{2Zb;shrw_kWEXXAIqad{WAH*aEE0=gPPQIlKEal~H9OuPM*77e(m<@~I*% zU<<1PM`Zh%H2ivfksp2M618j?4rZ%4BPk-aJT4~Gj5`8RE`ckPlY--<p;^oasq#g-*50tA$`w<{e-tuSpp{l$qRWg%_3V_(e!J?y?-r zR?Guv8#)yrS>dqpQMNT_E);j_M#?pdU5IpJUas*Bgm<}`GkI!NZkRM8* zgab5M?`Z1C_a|1c7#ToSYNfgr-2_^1+^-tlYJ2*Qt3eCa92aC+l7xDm<@Fa2S4&B) zPmgm^!96cU;Qn#Na`>g=NqqnCo!0C2oINGwwS7+0X+IM?kvI}c=O=cj@r(QYg~nPg zi*YH6Q<1t+M9|5+BZPZ3z#|JYX{QCBPMJss>jXAax(rJ6@z0)*!95eW1$sV5R6<*w zY;fL-{IY?$Vzt;*zhAc&O}kH*cy;%F<~3!7n4N^_0%`R3AO+-?*tsj*L8k$}4$FQ>vt|atqiZjB zxc~;pyt@TVFtZI2Wv=t~Aq?;L={T4+^B5yLm~Ogo6}WB!uQ5b>P!Gd0cN#J6aW@b= zpi9O0WVIUvEbD4C?e=1c%!$tQJnf_1pR;Xc$(4vgJ?b2elO$sx<#*xQT1ClK@~^x& zje6m2DHyGk=|V3^mT2QfLyetXB69pzN2FoF9IZ$lMZp9NptHP`gafB^%yAJ_Lc*U* z;7B$wb1=+(sVclXq@t1co=Z+ev-_I)ukBUd@ud`_^TH`))KK&J zoyIyY#mY|DCCN>9FZwvQI)^8e8m3v-q?nf;7pZu^76ZRPiG}&c^#-R`8iW_t-Q<_- zffals4rQ07)FSwZj!K76>?_yW=r|5#6ykM^*UZ^imOPveu$@AAGqE2EsT25x=(+z* zpb{9NogWuY`kP}ws?^g%d^9jziyfUPLq0S;xpv6qbsR0Sx>O^-WFbNAPthMgCBZRN zm|&nE(i{HTv&gTgzy#djdrSbZO#my0qv^H?`(jK%xbM}Hr(ws6Lc^~uih8nqe+I$Y zDhkwxT>;z#sNr_KW|GRIikt-%_2TXloQB;gZ7C`MOBn|7CG6!ridF#P@+(1T0c}BG zyH=*0woanVVIVqZk3cN;V>`)W0ycp@HX`!E$7yJv^9rU9h!R~S_g}30DwPulYY@88!jd!Ymx7SW{@8>NMy!NS^ z#e>nqZa~yJAiOiJwGc^9f^5M%D01X7M-Z3K5RV+71pH(gfb*KDC1w0h$7QCw3JH7| zfF+cyk``FT^_&9sA6KX(g=vtI)0Y*TXG*|+?GelsAUO?zu|L2JBZHqQQxq)P1EGsF zXpt9i-5vjW3Ki$3=iVO#f3*%nlz{!)fo*wuS zGH-UmN5Nx^V)O@{E{3xQU!1$gbrd}>AYX-stSw9>vgm9E~#4|-w-P0s#Kd)3L%KWf`o^Xln`qa$qG0R1X|JU5V=5C55r|8NppGrR;$nk;!Sz*& zL;2B1rbw}uf$Cod-W6gvYHO_sUfKnm)c|1r7ncBtJ7A%cH%E!e)z8xu++?Kyou`#NqL%LJT8n~ObJ%c~>t_XhPZ&L@G?oXF*x?!8@ zyerf$YFl@2nytI=S8Q>+Uv76lZbu?!qxHZFd@DfjvMCl$ugzVH32q6oBxA?)GwS zHcT8b_mk^(9zR|-``Fx`m)q!hjXmEkb6Il#Mz{7goe}LW(rI9F@yKS(?HA{41b;f> zgb^Mk^*SG6k66jJF}!dG?{wbb2J;0rBoyV3WzN>E2%`Cow{TvMpdS#TzyG3-!+RA1 z=fO5BC`$y_iDjw|(j{I5&0}F1J*<(;G9I@}VOsYcLwD|8Gyd`Wn{0`~n7e;4Qz&m; zrdHhT_`-(Mmmbgq2z-g`)fbMi?K|_Sd!hc-g6S@T?)(kTLRr2&dNzE9kCO#u11PPC z5`v7S&~KzJ+6JfPl+cbsx@5$X;ULXpqxU-04e$d6L`+sZ19ZAwIY7YSiu(*32Lp!^ z=)xSi_M#s{j3f{hnf6#9f+J8=0~9?fyLl-q35b$;Uo)I5uYtukzGSqU?ZwkHv78dp zkc>*$bcCHkVI$53sWJZELTsudSx!tUu#qtdj* zfFVC6(*Hr1dJ+9Ns3T3ND^FI-9JfJV1cPrzrP?IhCULbL)gFU;QLtEK=FmhqwtZAb1jpo}d>z>aM8auncDKWhx2^z7&Ww|>6dH23o5S&21N`3*4#ycYQ znfZBH1S`k}G{`PX{ZArJ;b-;yGw_y8Zje3P?WRosc}S*7W%!1zcJdLPV>FN1&$huv zBsQfY#;ktN)S1uk)z!_Kz81))d>lpHoB{K!_3FT3OR}pmZ5j@4HpSMb85S^;qkN9) zo^My5XZ^$*au$bh_U7LtK`%HwiAqQU`^SHR^zTB(`fDh7d38l($^IdD{Jl!7$R&y* zU`Xiw52=6l|98PNb`73BauW>8?)Gm`{d;zXP6WhtCrd>AAHEfh`x%(JhS0Sh_rDL? zC<`WNWS`SG^6#DhTbftix5F%&5IbJ-KMeZ+UjXr>0yP`mt-oqDm$Y*lwxerQn|z4Q zH8_ZMtSPEvUUjj#2*X6knn)!6c&R&Z*|&>< zmZv%Ths;rvK50Hjg50*K)THYHe^TsDK z+F2i=C(%dnq0fvr{Mx@S@;}$SUmo&$An;cI3CU=?D_fc-+3y~nkNfDQhizp{{q^3o zD2Ls-h))}dkXE*g@y^A)U>rjp@I@HyO3>y)?I+UP^i2#&(0D`;-M?#|e<-4-=}_zo zFf2c7AfcB$a7ySKiailukAuZZ>P!FdP4z?nhcDO#51;95Fro+23{i6_6RTVpr;5nt z)rRM_3O_i`9HgEN2?dQ**`3nor*GQyHO8O%7SY-)#pGa~yhZP?hJ+sdBmJTiX`ZQVo{903V_6+T|7FUz9tw zH7g9lxV|ii)YwkxBqER%1o+o;5wd6*l~kM0OEgp+QlTZXCM@+}CEmQ-B#bCgt=0s+ zAYjpZX*pH=$(?(*ucqDW!VZU^ASe`jEQDkcFw0*QQVdOg`%psr zf$2ka$f)C9CY(JUkNd2d+wK`%r=^SO%s1l-hYJ}G^_;m~4Ai{GP3t_}y& zYt@<*7im{@5f5h;saJ=qeA89I{7%3YU;RCoFr-7#B`^207pN?B{F`r|Fo(~6SKZb2 zwHliqrDY=FC%pp#xqS25VmRB`a1PsZ;b2=vwD-nTkp>pe)^k-WC6t>%UIB17$*)}e zi`2`qqctVm_q04w90zz73*x!9rzHsF(-}EKqGbUw(YPBQer7wzX8uqv*3I}lNH9>r zdlumE*fTCjn)pA&i9d;^K1xC=(0%WuRyw{xZr{D+S8! zNcK2#e`4kRvL{;6ZK4&pHHEY~M|!kCX;>J80s(E6;2vSZ|D>YadW<FjG>12(#dw36*g}5rArTJFo!so4)7Y5{MNx>HW#Ir!bmpfdA6^mtyucAVD#8pq!wd&9Zx&*nQt4amk!Ev3QyPLc}!p1%n>bSo!{+v zuQ0%s4{#$#waz~k$v!=>T`8~IH+0Zvwi;%QMJ;becutlnot`x-49tEirvf+guoy$M z*f?u_Z;U|_y&ce`r&J3@rco$EF zLu5k4uXyfoTZvzec|W{&soSoybBFc7g&(uE^=w0qxYTa4=IbyMZG=iI(vt9Ej@eLa zD3{vTyGU*()qcp8088Z)*?}%cPYF;{2wMZ~YPMc|`fYq+U+t7I1$IC#`q_xfj#oO@ z(${xJ?i5y7=>2Yo+b2pR#{$3GlWnvzpT`47_#ZtAxr!EkM({3vJJ$qUERxte46pJ! z+`M}*D1q9Gh8noLyCg}$P}=A3bYoR3-`LkDC?AGrxG(6V@ThfocdYBNyZLo~ggl)UP!rQNW_aXO&0F6EDn+?JIo{k|VBrRf z`#M2I^t47DN6}?UO`1baRW==3z4Jz$M2Hpu_b}G%gm9z@i$6T3)8Q%sM22cYq?rbd%>o()OJ%>sEi-fAgSFl^^1l&TWViU++2e zEK>QZ@=}|IDY8VXR-TOPIM&H*QuK1^(pQnf-*gE#OOWziK=vge`6wNp0L0@~Tcg}P zpeKbu!dk55c8}@CdfryK(gI`6Eu8XXk_@>?CPeOq3DyvS_#FA+gVZ%Ta?#P_X>?>7 zJ1byw>1S7Vvm*J^`3c3Np5f|GBu&8EN8kp`D#;7 z%Y!t~)BD>vdhlyo?`v;3_Tky2q$MS;s>RIE=>o<`F(Ci-*Eul|uCw8O&r2Rq1>4F^A4*{>V_t76~?hb5*MYjW&cPb|ZUDXmv^FntZ zEcH&D7)X4#lF2V!c~D!Ew0`tT=%dn+7Xg{%ff1A*Huy3KW3k_;<0TxWe)^t&X(9*T z!4O=jb$36iWqeYVa{1G$8fd}RmgYDZqW9wH=a9?H@Oqza8sBzI+rTxi2f!xHbxGA_ z+ffrNz;ImfuyuA6)vMDiD)@EmTTs=a2jTlI`pDqqI`Q9$X`hjyWb#|4xr?pPp$TUQ zE#^mj2})(LqCg$D>rbPs$#b|o?zd+3XAAs;k3mSfFX!-V3@?w|o@|J4Lj5MSLFXGs zx*toQW|yT^<@@_4Bz17nO>M1hQh02?Y2V!lQ=YD?2^{rt9 zWMNrX&2ovdS@>swP*;y7#}D+;-aZ4=?BcCDqGthPx517cyP=m2%~|6tRv`H((&H#} zuM<))jmw7O(s7JU^&plsK?t}|jAUZ6flfdshO;nsF=n`F6giV8k||RD{*sXBzKiLy zV&>5EW@glAZrH?UB!TGkZS2;Y#AqO@g7&&;$17nZs!AVB9pv_9q_c@OQpZ+~C6ZOIT6@@M zQvg@~shy-?ip*IDyjwxY7NyZEjh3<$bFG{CborAefN0)F2HM{swXK z#P(x#%T}oSoWSNeULVB}=_0hb+(zxB7taNjzr+e;&aw;6J_r%;GJWmICu2A6Ci?*( zc)x+vaqZ)Cd(l>TN=K?wSK2IAh)zNkKo-f;^Ng%s>Dxr9X@~f6;PKaD?^tQ*B+@JnNrd zGxog~f@qr{ju%T`40v{sbJ=m;khBF<@1;Dks}U-s;fF5Ob_F^GjVQ3SNGA@b$rM{u4^}_j?egA$*0C1tZZ!gaV1A?vV1ONE&p4u73+bhPMkY^} zT6NgoAr@7$Yj&u^W$ZpECU)Gxt=eLS1}d^xnB5$YaTTKEd;;h?39i~g?TNvPM^DF5 zAlU#icil;Z&Mgp#&kj2;Uw&XSJGs$mQkzU*=^w_c>n1B$ILh1+smvXU(o-J+Mctq$j z%;MKWO!x5`boTi9kn=vjJ6dOOWn9mHNK=gSx2d>0d4?DLF~ksXQ*Dvf5xKJG!pKxm=Z^5>!$8{{0 zIneZ*t^vBl3pa9|z4gB(`yz0#ejGHB7LWm`jz=MR zpi(srVOnZtR)2$1(?cM*o+l{Jad!kCL3KfXA2-uO&Jrnf+n0NKCmhge>ArQ^Spq~8 zSeZ}iY>8EC^_kHA0jHdFv1@r*t~g4haL#krB1}jI1_7tcM@T{gdN2}ugrYJeM;3E~ zMj96z-%H358?)tCEhKk*Az*ti%@r(ZnCv7lX5Z>_nqT2EvD>?Rtg`Q<5=UrnL;*p_ z0*33WUo^Rtp$b@CKM8$|J0f7QP|264NEIp<<$BnTWb*G$N=&i4RESAq`{eIZybl_k zb2Fste>HU*G!oVhCmulVGefLqj+~3ZUz?;8K)Ju+pMBT|2<*r5(8Ix1LCbAnK)2Fo z_72*?)E9!hPhVQC6VEd&^(x!rMy_lXzDbq6v;K0$r|?@NkJT(Z)YpqAMg#vY%N%t6 z9i-RKwA5RvKeRL%bZY1FL<`#NQ(y2`@q}=~9d!$5K4dlHxkfl`BcxXKZ2!QM z7*H#s2zgu*^k+dgIPJ1ULd;n!)3=$ZZ*MAO8g!FY^6mbtpMsp5`f%!oX58jh9`QK^ z3El^oWZBbs$^9NjrAV_oGkQi$7LosdIMV*ypk{DsMZnwJn9{y2A6m`?@6On}^N`Ui ztl-hs;`)%g8;Z!oCq-aF3&^i;Os}+s4Js>f4C)o|L;V?YE4e1Yf zAU_r=eJOwFuu(q>-W!q|hpG}9-afn4MS=xWXYIS|ej+FVQxr`|P%TQ2?09=JcnRjvzd0-rn{E-J6%J6>}6RWYA%ynX@X;x zP@($QTFY785`p{v<9lN^UB>esPSSoVx`J_UbRH%boV$j zZLvv-b~qD2wwGarlFRQrui<{hoO|cko>Ymd$~LXV{Gw@ zFv9m~Lv?q7F|vYV?I0ve+DQR*BCinrASXazQ=gyI)Z=Htp_BdAp5Cik0jtXeVP`b0 zUftR&GzA}z&S&Pusk+J?^r~^f_n)Xv=8>EHT&oa{BKKq)*6y;?bNS2H)3r< zk{y#x&o^sro+HOiGJg2-5-OdE*L{1K?{>^zdN;BNo_Q{TMvUKIEs39xFE&L3xr%!` zKsaOcX}d-&U&i5htTx0S&^6$9C1f3}-H0VIJ$bj!c&vWER^N4IbLb8w0rzol7r$WF zv^1@#s#8`H?c@m}s#S+HgVbf*Tl&gc=ypGKIJNQJx}_&{$MtR8aO2~1iIZ6ItVS(HRi@M=N7<)R+yZ^1zqLEcrHVi) z4ZriWw1q0u3soC)^6?A(uOEsNdAop)EUc^RY=+)YxDoWV`yso5DI_{7>KU8EfY+S# z;Nj9WZ_dues|AxK#h+al9>R5!!3tp)ih;*Bc1M!%T7a$*g~#hIaNuOIk{Y(;gXYB$ z_r04@TPKIneeCh{LSG^<{9M4{NA53U<5?*8-ehmAPjK)4+e(DHG7xS^pD<@n%7fLD zeF&>K9`RkD>n{s}1|hm>DO8Koh(tc~6;sz1urBcQ-YG_jSNhqa=sxcx4Zw_G93tgq zC20}bD;?oDN%LE|(9aCe@4nW`lR3y~2tpe}!ipyn4^A^xwN+GD9-868rn2N>WGpBV ztj4w?uwJcmgoy0E1-uOM>=9Y3=%BbK4Ffcc>N!M{%Q%=B6@K+%FCuWuKFiZ`^QxjRa z?-svVxue>S%W&9AnVlZMqogWG3EgYysGS(zvGR5wvbFTlKFga>cN9$~h%O^Tx_$S^ zg?02z2YrE7*;xv^)(%ZzR$=Nj|PWfjbl*Azd3CF z%%<`AL4{WG*B9+S`METfRRIgOa}OKI88Pg&qF(~_QRY1Mnm2?S$!ssPpsFBz1Vg*a zVO>P};+a$B(~p?a0aG7ulgpDoY@^M*M^H-}uSH>QzHva}iu1lmy1VVq#?z7I7=z(P zq^LcfxL>Bx<`)<9?|wv_ciZYAkEt;G@?{5&4Co?na_aEu>BxE2zCV+97-a2{$>od6 zlj&w;Up+u-^`bb!a%a1cdS92To0eFyMpO8%`nJ2C;sx@D<6)A!lPNeQPK{3B879&d z`kSVPY&wd=pjX>abEFRQ3v|B_#fZCp1l>HLfMcGUo^}pCbrpsYpk5l*c>Z%vNU3jkDSxo}F3X z6l;e$)-=?kLwS6yy%L3WR{IFoE&nrYtLqv zAmwU#7V4^Y4{WiQoBhmk0cGmnH4NL&VelE%I5J~-SWXCp7>BrM&x&LwzDq-`!Y0$V z-}I#(ibR%!d(%rq)2++g8uh;gb?EN~F>rWvP3I9F`S+NMD%KK(&}N|cAEtg!0rhnY_s7-$TQJw(vr>N0G2`j8)BlI5d;dZO`9`ny?`;f! z04PrcI%d=|K*E2Rx-b+KLop6UtbZ=9-~KEA8Z&}Ay3Bu=I{7a@za%dQ;eUYP|1F>Y zTRy*E9AnU8K({+58-xh;o;wac`(JpB-~2CP z{i@&ZYcK9EPMk-Pg7LemeoeoEu8ty-iJ0Y}_1?wvWWD`8dEfaJc_qZZC=J&Q4}j6oLr*~!zatBbHsFRC#ch<_UEtI zp*@0{SjMLU!^P>a3U`f}w4}rivl>f;U(rMK-{Bcmi|9XmSLN>0D>n!#(x@ybG6~wD z^^*%1fgbqEhaTdhg@A2u4hh&Ep=s%;7uW||z3mGBnEZgjyt+sT0 zX)KHQ7m(Ng2u?iwxy5C^=%vM0&G0Ow_cPKH#3PCh7x(pc-<+7}D8|EI9ver6q}~&u zD_xfARBj&oC-9vihvRndb2oWu2^mn;%N-NCL-&ha6h5x9?g&COKCUp?r2u&Z!uWS@ zm@oOjQ$-q_>zHv?6W>0R`+*Z&;h}2}9~Eh=P$B8&Y->x0EG7FI9vYcVjnAk8^h;#c zcz&}e9w<%>D?aC2Ruv`r*?#TEuft36z6fXpz0mVYJh}!B=Y7l1*p>P4RV3I-5<~Uo{=?ce8V3<2`+@@ zpSykRFngJrUI*h>V%QT6XA1zpH$cx4(Ams7Z6U$Z$*0r5OJa@kz+xl7f!RXDc#(`* zgP&_}yx8Q%VfX{xpx^P=3TyI>?_JzXvn8vk1#g`k-W9o>ZNlN;BSIRXOB|%(I z{ah0y(69m?%SqYY$zsc#LSQQ6#(oIo4SCN#1|5S85-YJS>l6(&p zEKvBQSV!`gVT(cm-ywMy3^t(G3&AAq0OFmA;i0%z0a0zUQaDXhDKAtbX$Jz4�h6 zXcPU9NDfgD7qzGHsR=YHP0b2V!m(5~nd%Rc;x|tPje#}`lKGUU+nA`_Do*@Wz4+T8 zF{kKe^b5N0iOlc9WO)Q6n5K$sMt|9UCK2NwAVfDYgIKEKdc@@bG*wwPy>c@gzhBOR z2JRCz=OWG8ngk|2R^(cFXpkXEMyr0EO~z2F%tN$wzDYb{g?>i@1NFc?pg4V(1?eZVx=(o0^+@Zo`Qq@g0BslRsv0zC`D(a<#6t z2A^Xz!&(q-VSfYscl}!ey>H5eiClX%pP;@!sm(|LU1dm2BKJVN-c99a9VSi|n8Oej zsNWCjji-Ue48;QlQAFL~6ag0^eIxH#G^=8v^d)V)7;7;Gdc+N}v@iI|2zme&P0(br zSj#l{bZ4){O4P1<-U1tj-G?D~k*=~gRwo_)WKBb;)6Sd!l|@M@JsJovLQ@o5r^+m@ zFZyzH0nkBz-HwK89PqRp$(56?tRp*+=$OBJHkjzh5dp5>Ks{;J1inRo>{L|ySj>rR zI~FJ7^$ba8$x25|bD1)@uI<^`_N*mXRp1#Cb9#V>J^5oUYf=p`+{u?_)bHMx?$n<{ z=KOH?e#Lto>15}gA~HjjI_26{kM+=>?9ma?|sBKm$b8;Vb#|%qJp576? zRbgYH+&){NoE~GfjbJ0N&Wf~VC8q|Qr1+zb+!@YXgh;Y_EwpD70ThemiAhOT^$E;_ z?u<0nLgA(U65CmwxMQvXnapgi4Gtm!&O?~8$K2_Bcumfx!z1)S(nwy~GIu{+2HmM# z&SX86rjSe4hTxjFJz=JEE+|(kq|E5S>z%Qi<%T_}UJpoDbYl`TF3)Ah%uC}~kKYol zdCN8jY}pKEpDzcTQ`jyW?pfi*(1Y-tpE<){%`|)`k~~Lvy$iOOquH}dESVr0}*i7QI4J`zhS@V1`Qx+<)Iy*JmwB}d0ctgpOE~) zLqv9-LnAPqCK{?MenqwEZw;&#fpXWkJhBq2nc}s+zQzSrnZyTI?0wjhz6d4`?%m~z z^4!8Ddfe=}O_*8QyYyDBEeo1?+NwO-h(xVNdgvb^Qes~b!yO5YuR47Z$xh4-BmW$LU87v(RXqf~b;l+pZ*4<}M@a z!})rN-cI(G<6LTxHI#?>uLLRu*Pj3-Ga<*ufV1RPv>M3q+a~9|$<602B8GJ>kgBre zJv5OS%<$n5c^`AMTnu6!F0$_}E(GRpJbb$a6_HknR@;5DMAMV82Zhrjv7_aA0^P<% zvnVma?2{JfZcNZEdfPq(qLX zK71)0HR{D|x~fYgCikWPU=%*}8sgd7PUL1MD&^@Izkjp}yA;0-WZE;bSd&$q0juG;_DaHs&r$s8wmbP(_emUXV>&yuw8>Ic@${c!j=crXmY}@5a_$ zZ{E%opD|dUDhaTJoHdjPjMpBi#DtZGrwy(pKa9v5de^x&`JRlg`rtfSUc_t2_-oI< zy8u?d;`Tl{OuN+$FmlI5l1eGNQ(pIyu0SU*_JSC#R-3vjPT*T2m?l15B5Pf1iycOP zhw)K5NJl<732(|?M(#@MH(wm?BZyqI!7?;E#)R_=W4mv;KhQ7gJ| zn$>?}R4brWS?E7fm)V21SSr0LTlTpv*dNFjfyVGkgX*5PK>L;f#{!kx7;&g_vU-_* zlskc)4sUKPUlB26s#J$@-3PTM1MBkk?CQEHF41vD!uh}@_WYYj-N!E3%jNOrE7k66 zLI@8;y#P8u7@7NXHMppG z+9VK<%O&-8vqqd2MMeS9eE~d(RR3tAkJ;dFX z?s<<1=0|&s*~oY-@I#s=^sbu#->KH|Oweg3Mpb~CKa5Ir5q4BN^p-*NLa6#OtXD~B zL{pTfz}+d{YTpR&Wg{Bzeh{B*ZQmr1wUFjZq=JTFhq!7`&BYgD2$YKEoeU|8R1gm| zfR`kL0VPUmBT?;HDmIAWm6D!No-OEL4<5joXL=>Wx8wocq(;|De~+07UGdn=lu|5u zKvku#J1)%YsndO?ol!hjVyprlfWX7brC-;zJE=CWV%S2@vp$J@6}nvHVMEkQ)F*~B zwUM4uT6=N_oMET>S2xeXPtGZ>b+eF{(ejsDX95N8U#u}BKQ#8ia&;uHj&E5P&N{;) z{-GO_1&Re@W`buIkZrqJ@4_@g-5unh5sU&c z%0G9j`CN~z8`}WDNnD{xUjD&D|5|)D4b|M6f7N(5RY>(?3%3P5g3Ic5c=HgW1xs?z zZ)>|kGNno7#e0rk7S92;WxM~~NKw1HNc1<^J+Oq+IS@_yji3V#^|;Jn$taqHYqj`0k+j2E!{ zmpm=a8x9ZBhe_XeYl5rHfwxuSejQZ$j&olkP!cXHevQ3)xZI-`8@z;FF5a>KFk+Dv z0Kdc=$#We|SyKI&*SB~C_S=GJ*|HbA?SaNyoCqP__9-K>LdcKHo!d%iE#*pS=9MR#6)g&Ck~boL7*kD zKBqdsdA9mnYuvzQ z1SA%j>07G>f!tKQE{VXOj(VXVMcTT%Q$sjI;H|_?#8$`6%M|1DG_;Te8zkP7L!RDe zr)a0dhY7_zA_l6V@<2|3>O?gp12@byYkLJtZg&{}#)8Q3T)y?Svz`O_bzS?tY%>~h zGv2)di2d^smPZVglmkJJ4RJb|D_JJV2n;+zv1xTQtsqThP9nhkyBeBHkqSS%?P=1_ z#5O_Om_16_-lQ7>{&MA&B%G=j^N&s+;2J_(&Y!{ zu#>dA%w$Q@%t6DY^OQO#;_cSkA0)ksaik^UD2_80285|fKX_&}$C+bDQc6V`Jwyj- zlJgp7?Ys(7MQtvrg<-hI2_ZwuQv^ZvcXCn zXD((527LZu2#`hGd(=MelG-fA=-4li#Q3I%EsulH4(pV~r21L&X2Fr%J^&BE?|-Cv3<;VNeMGl8h}lUs4$6 z4Lw6M{Kc-8LZ<68ygSIU`!w?n%V?4H`7ZmHC&?iJ{2djrR#!4V8$Vw#Hgr}KK%kus z9zL}lhiEy+BzQqwQmn}Ht+u(oJuRL`59Ro+iQXk>DL8%cDHO*vW_$376gMn()zflo zuT5pt`PqUZcl&nuBcSwQ7u4EsLW_FflRZ@Q#sXEA0uqNic%AcJ>{6l@BE-3*`Vc)+ zn#Yle5#(^DMRW7 zfq3kl`UwAv`Y&f99KenZA~1FTGDT&8OW(OjgHMQ-%l@rd16L!l)5CS+fm{1_HMM!u zEvn8+&JY(W2dxg}Kzk}Ir=F(`S32YSA;gd>P);9N1`EoEc;e6V=QAjFcU$#IZ@^AA zU)z^^Bv?K%d<*Yrnxf3yZ#89P)t$5Mn6u`cwQS7ItC?>8+SWI9dt7;Rm6bM)XBwD2I@_dnx(=UKa7 zcI3Hd=1rWEA-Z&{dJowIP_pH7uQn4USr<{FloE*{T)5k&9yXJ410oxDRxV<=6{7o7 z>D))nrXl|JN?CM>di#rTTpX#{-dCX;iOAq3O>meYX2ej_VKX=4u8`%BJM{ew0YC-% z6KL~2aP-ff7+43qn*h>f^`-zAtq-aQGsctUZ~4{4My7`CCkrR91)iZF4mxZ&%R0$; zx92e=T{fpguzek@@+?1l?spOR>5NoB7@UHM6z<#i>nhZ0zL3jkUU?0b{Bt^N62w=} zlS{%LT2N5}-X_5JxMaKme*l+%qW{Se0+gIM3HK%2S1RW@KCEAK~xR< z02Ex>hdE0m$zHdYX=8<=6(@O5*hbCl)oYC0-ZXPW@aOtx=hc%t9RlDyCy_Dh(iLlT z4lzrc>p&z316osj>Bk3W8!H}EQB9UR!Me2(yFn`8W=Rjg_Ev>4MR78UtB4P;aoT~D zVl6h}u!8WA+VlDM4Hl{e;)_`acJqgG)BLBIAT%2M_U}74r4fx_$YD;l6DO{D zW^stfG}lXyA3UYD=w$AsJE!eUEAHRGXA$Wuvu>yz`zyy79XDw_0XM}Qed&K(YV`92 zu|oL=-Yu3HXg*^2PQflPVRgK@@JH7wz-x{`6HGDohlTX)H~j2f?F8j_r0>LbOY>FA zk>-}%Xh1D4fJo!{rJE}bQO?DE&B={FW|2PeLOm24eV=SLj){0-+nag~DN$iBreCoUf(ufBn(8+X zer@8g(852(9{MC$a(1ctX1pDgbsB0ihi?x>zw7W|4qSn0TPTjbIZ}Xy)%{rPxz!NrV_JxoQ~ew zw=w2zUASLs!Vu@A>0Qoj+~x+VN^$9;Xw)%t+boWx7WOijnX~W@QzklSRqFY|>K?A0 zb>%M*J6Ewb@M5pd(bcLx**I@@GCu8`9swnrxvy690Hey)mzHurnVe(kEaAe&_bT|0 znM<)bbmo?P{FHoU?DA%0J#&fZ+Pz`IX_1ZUTqYAn=X+!noBW~eoI;@n=dy-1gIDb2YUoQuccgq$Z`)e?jg@`Bli7&TE-4G zI~-n}MOGzELNu+-UZ0k;3j!t$(a%>nfLz>=R?+TrSM>LwFR$1T5{>>ryP9MB)nAg? z1v`=yogd}^VEzH;eFiN`mmt+by&(linNiV;h>7(oVEGTyC;U#o8zIGa*&bx zW+38p9UP&p{n@Mvwj<9Y+_BdJNnAlPC4S)q;){5d&Ni6+iPEF^!@lxn zB}|5r!1x2E&d@+0!hD+-5wHQJJLY2VKNRrTViJ!JVsxz6W&9~z#T3KBu=86^CXFrl zY6EFiF>T%Kl{4u*H9xvWn!XDmujUA$qUhTC=YP8u%|8Z#4z-`U4h zX{XxFRJ9PUDqOj2@^H4@&Cgs?5{T~BO-ovbrT4cUj0;0lStEnT&r01L;mffRgO(XW zTf5CvZKn-39jguGi)xL17X~P<*?+?>e(`$a6G7%E>T4=Rtx{t!V*EHmUJG;8m=^Hb z?se)DovIsDdiL;^_h6GG=4l^0^vJo%Rp^(|Ovj{pk-&ah6gFw_%v_gjP3$?TyNIT6 zYFc@Yc-c<2Z>^g1tRv-Yoif>+lgc@txkGpC$LN`lPZbK&{Gl#SA1A-*e;@K%&Om(& zV=;FYU>R_ZF{tM2sHEbYA9p=*2sNw^u!4Vi2gc@#tD{)JLoh1OBDPoF*RNTY?iLg4 zYC|C|gsZ}8q4EdH`nN^e3sbs8^!wR4q-I=FsWsPTxXa>eWQs*yPsv1Pitl&4{LEYN zJzgEmzRD5YIo`=2evm1!cKkbO?-kn?&Ac2%kxSm#Vxls<~t#k(SZ`H3huU+Zxd*q^0FXWghjWP`> zvjLySud{i|gtAYci>k7(P$>M?>Dy`Zoe(RF-zl={F5s6Ex72icf=vhPxK64;bXrM$Cksb-@(5%2(@FSkO({Q&-NfAiv1Itx1Xj%&;2|0 zX!0b~CpHG7E@$X#e$-EpF`P{qoZekIS)ztwn~zaulBs|fR;PWl*9@e`sDTdWrRUyf z+s7OOq~hO5eq4Exkj$+2B7NSkgbMB9V`Mg?=36bg)UA@6H!6(Gi-RE%D<=TizvfmP zj_T0l9Ie^bm^EwEAD~mF5B&}sb27NvyOgg6IvD9S_J^h2By*%HI4*?Q~?M5prppYV1#!_N*j2(H{ z1-FtGh4rTk8I*8-v+}I0UG9zw;Vh7Tq1VI{+&@a?ekLB&g^Mz)EK2hz&QV5K!$1|j zbm?g}>SG+k9}~j)O`fdOb;ctZhb?Dj*ICY>vR-`U4rF%ueAXge9kcKFpzi~`Sh^$( zumzkSaMgu~9@HtvxvwRh1*mU3G0N~VUeAJVU2`{eIWP=f&io!(+SWEu{qx4>%HQQR zsc-J<_G_#qj^($Z>F3QNXa{en)~3#dQvP|s?DsxP_C(iXft~#uox|T)dop6t-j`&+ zaj!FU6G3=T7wyWrfzmafmY^QD*n_cg=qFC-&p%tK1@P}iGYobMfo+IvVyJfg{nugp*?%~$a_5RWhmtnA7 zBhuqD#zni8pJ_(wEgtEW)2@Qd^OlVHX#tuz9>>XIowp2FJ98t*@tIwy9s+n!5+=5V zEmDkQ=7W>NADh5(jhhywV=zP2k}pf@l$q?fj_TC-23>A~_7@*sy57b zM(JFs_+uEVWQ)P>R7v!?VzZQU-A$6+NDhEAWdsn4v3yrxTa}?z`Nc#bMmbsV!fkzM zmUwRUM(~fvzwu2+=@I`))JHYjlR(Ot3ygROmM=f{s)6_p@Tg~dI zzg8!-TuPhY6Uw^>AEzF=&SDc&x@?b1m(*>1JDNU7a#?isG4L*df}Mh6lGjxIq5Cw% zrP2E(qXmJ&KH&#UJ&EUvE*F5a15;>@4Vot$^-SU|FJ<3i`W`ph$~FBz)oEh!s9|3ll2rxwj<-U;BV~W zHLvR_hEu!+{s;`1*fTXjCZF5v8NJ9L=16(M76;8FQK zOj07~Vc%gAubSb6#Khn1E>!T_^Ulr?=W8jRY74(d4v9v$+421yX1LjgyN%+CH@PVP z&d^WLWAZpdp6{QJTdxfN9-;DN_G-HFbYpfpxZeyglYf}7VZ~iI-O&L`37BkdT6vft z_@XYI;Ed7*zHG!7SXgAV#YYVDd4L|eiBYh5)cGCBaka1lNglUi=OeZnmBXj(%^X{^ z4G-;~<3mn4z5D{Zkb5joq2*pqjPz=L&|m5B{!q; zYI|OaDPhXjrG*>iENYCALL?OJNg?7qLc?PrPJ#7%#h(s7zqsaxZJGogLo3)G5dRP* z@h!^D^*c6gLeV0Y0IR|r;C}t?u#UK5PjmoXQD3xu*H^s^A?m;TU57Mjy_`z)SW2xg z=Hp?>owGa+a8S!vVS#mfeq2;^nYaXq7;^_LfmRN`qZnM(wRfVaIpXPfd)jFO3NN4dY8B~_pQtd6O7eQU6-_eP!ek=K|PX+2R)z2W~+Vjw4Q&ME#gHfNb z6{JTa(&0xoWdR(UTSu9hSfRfmW67RVg-@gV8I z&R32$Ttc(45lZuu$t#}Jt_N*bI1rMi5B08sL?wP4%ttB31&#fY@16V8>Ko`?~hZ*DzgP-Mtz0Rjm&2IwdyZz$Kj16@cTQ3X)!x~R5@ zGEvsBuV|0uD4;qZG{;tlbEpwJ=hV>xC4KW#yjsm}iN$A{aPun^N4C4wOOX|YT$X4L zJ|(DVeVT{(g591e7nHC(?duW`%kqGKwmoj zN75K+gb!OFFA9m;x!hg5BUie|O5J?jJ~scz@djyRLOmo`ueB(nX1qww#q7DZ>ljA1 zlld4floN&duy}vX@EHa;Ny;xArtfwPV*6*3kt@r z*#onKUmZl-t`Ctc;&d40X88e&X;^-?4b9^Xc462%4jz@Q_^7L+$inY-($A~OIj6P? z#N@3B`*|?EHC&jY&W!z_eu-CoRNS{U2Jb}Ps5Cr2<23O@a$L)owd$J>BuHl1k|S#sT>^OFJB;$Iag@@vr1W2G>Gg3mMyq zHpi@m)|6b^Ft&C3`x|ho&OSp?b&rxUMTx#_K+a!vqQW_<$+W=!dCGYKV>AjMu-ZjB zjT}Km;?MoSU#!O{932U{T1ceij{MX>LiRKv8c5gO)o*hc5OO9uM?VmWfC7`~IGTEl zj8(FL6Ut)jq&%E7sJfab;5v%^`|kni*s!adwIH=20w;NB^CSW2QoILr`2cquKK{^J z>*s&R*em#d6->2>7?KUENk1O>?GvuHJc`CvV1!<+hGTca*vEV z6&qdU_t^l$48B>)p+oqDDEXa?{C4_a#qVwpVdU&Rq9;qXxi(okU)NqL3>26{W;3^o zX@8cHDGvze5m27tWkWE8%$>T8uZ$dbfHZ4(iaNG#0T745mtqbPgO^{gGk3JA1#-5S zGAginp#ek6<67}bZxHnyUy&g4l~b}I<(4BH4P071%}B+VGU6)7q%Awipt59bJ#zF2 z#g|eDsSTuroTHLP(}5x?J_nTu4gN~;#phdjf9SYxyV!KiKJzMqTMvK@G)AsXcyywr zqtJ5fq*iGYLQYQ}@FoXWgCLA3l+MtNyXnnUpbD3oxsrcZi% zw^{Dyx~Ggv*1aGRGk13r{YYtOAnV=fGzS(i@HE+4&pYgRx+8e=Q0GgEFM@S`=#T7YFQNIXpQxQXDkK&ajRfi->xr@GM|4v)1Kw)a19pA&X%~o82V^ z$2NuA?M^6wTZy$TZP9aXJV6XUCUz(X7nC{w~bD_P5 z>wMNXQR_U*`eSlxDfHQE2bC}tH_MX4ldSWj3 z*onk1iMsCH`HUi(PvSxH#z#NezQ5Lcy{2my(1X#4GGZRV?MPn6AG-VAyrW|cC3%i% zg*y5wNBRPGsy#)cX9_a+|Bm8~kt{dWO1NDqVKg3Ta^CD19>sW6c^DY}*tPTe@o`Vc z(_<;Splv2oEnX&-#)cobC`wv!tJwR}w`e3}fr_|ia$o$fi?m-uD8$LkFYC5_>>je4 zVY=ruO#tUBp6H@MCALU}p>~f8Htdz50A^}r)EjpPVQD-@`A`o{a+!rLK{QD+AF6VF zSkLmrlQGfQG|w3-i<1l^&B*bLm@|IIE?tiSA*Nu_U_YqTLvixM=+gcLbA}8`40bbz zq^R5K~FtOR$3@B6!p!PSz|seKy4R9<_bv zkQ$nbeS{_Ldf`d<{q#diVZF^NX&&a8%yOP7wKjLED0<)!c$}-UZc>?p^B2XXKkTV5 zBjfBXliZ0fJ&YE|y?TvDoNOm(NbY@LTSRs|!_OwQz!j0dpjEBi^vV{%eg#xc_h#J& ziTr2%slj!xX_CZ){L~E0pKTQR)tHK3i%DSySV(eKm}@k=IFplpn_Kv4VCP+_H49Ha znYp+Cu~IUPx_SXCX%1c96z|q&;_;}uOhy#s&u|`-Bpb?QjX7B2AjGbVyd2r1@SFYM zS16T0|Ce6jDaRPgi5JeN^5*umiTEPh^9qSlg8bRRTi{`AgzSjZ~vsz5h zjbV+N^EHtm)O^nimjM*#5|5MH)2>^#ytqLk;Z4$ ztH3_J(YhYQxGMH7qmzcuI$@HJuh}so${9Uv8S@z8wU|q0DsV3D_(%!;%bUlGqN#ab z<>#5me#yn53aJ!NZzmcJg^@TtKIG|qFpizYI3cjwtuYy~-|(2hY)*OUSaiafJ(2u~ za>mWqaamZt8fVT0e4bg7j)XPK&#ExMy=J#rKLA9wkw>fUoO&V zZ64zoQ3-uYAIx-jx-O8mb~!l9`!sCreU}p1f6njD`x!$74Jz^mvlsP`T&8x$?@r|3 zJdU4u(@gDVpWYfV;WNPj0DNo{cV7)*hpzt*?wL>J%ChoC2{d8`mHmY87a3IHZ>gh?1F$#Q zP(C71-aUVyM1w`&1u{!@!42416TqSrYp%mqaewan_lDT4w`qW4!%Bv(4PpGJ2beo; z%a_4afgbzyaH@#EiyB@X0JhAxP;NRX_Zj#&{Lj5iDzs@$&t)wR24nC$Tc0aGLD&BQ zH{i6T!;+Nbdj4#TO<(=_#ze=YW;v2>_GtcZ+M03PXPpytQB{Lk8mpC=h)a?E`AL{z zdB;{oqCofGv0Hia9x-o8-+i+262SVu))7jSTK)dxkpLPeE4dFU-aVV}7{koqrS2Qn zX76y`zgJA_uiBmR=fkPUu7JlHxc zho03;6t^NA@;=wWAS&veM0?LYGfZp{Ukr|YsN6uEqR0g4TMjfRdam?$Q;pi&7C`F4HSy?W?%!@g-M^J2q8q0aGnNW?Rj&*eZazIGzf+1N`e$V zO0RM!KpDQ>!?k~ZelTl^o&}OR?LBdgeD%@$FAR5D%>6H?A9j~NAehc^X1jENo_t@E zOnez5H(SX~s!`^gtXizBJBboquXlC6btD3Si}eQOY5qGE0(eZ&$;bE?ajhhR){IjK za+b%NBvft=Ru$c!6jwen{ObQ&_-mGWUS-aax1C9G_L>QChZ#-wmzn)9s^Sm(N7)RB z4SqC+2$WwP)`D_f`snNX1UWxkWXXkTjtFmU#~=a2Swt-B8^_Tmvfme)bSbvy3hG|l z9?rGswSd2<>9{)ebK(96Y5)Jc_;k-WoM%GIp+!SiHz0iLsz0iF>3HzyweS(4aDn~f z==u@4I5Mc?;Le1BMZ;7H^HjCDIm-Ja@=yK8E3fvH!C`G}{~vb84^qH#9delBBf}K* zkb*V?F^4Ye?%Ec*iBO?y^bI}%1Q`J0k$7Yd$^&>D_YxB?KCu24 zQJ{OeDDmG&C&~~EfbaqNIPi^LE|!%!^Y!%0FA4^ZV@#$S<%Aewg!lR#uB(8wjd7a~ zYPtL6r#23m|Fwqto>A$2^ZavE20*}w1{5k}Pj{qMkbojg*s2*037`K6IXgK+@gANS zojR~pSx6cv%Q*lY8)kTde#n1ilycJo5=K3 zM2d-vd#4@y=5CGWtMG|G>gWEi){Oqrc5xmS-fnII@F@Jg_}=q|^wT-tn#76q#j*_; z%GrI@t9LY?89-N2{BJiW63-hTFvRDdeFu+ay^ZD$3FV8!0v;rN$Ja8O3={y5&Inc% zwVCIy|7n}y^m&Hie|Z4)L(pfD;c9EGOk4zoV65_|NWfyrSqT~P_wcud(|zCG#B&+- z+Azd_`fncOT?8^)YHkWPJ=q%32#$y{zUL$F{Wi8ryx$p}2t(CB`!9D-3@?qdK>yb= z`+q(ujZ@RPFPxe2e}6r1t9#0kb;e9sM7x=@D|fZR{c2x@#z&n0ZEb2A5NY0XezJ>e zH^U)*8m;P#VGJzuQr7)dpZVp1$!W6T{PlHG z#nSAxhu6?%rf_l7ndVZ}nQQWr>m>4TBD7TC=es}KOP<$k0%3#FY}uUOLZ2k+40t{Q zdb~fSpbLiRnV>fY9*dfcDkL^jaX8m!{X9c4zz?bwak)O*F9$lZ8r4<_fn?yGW6U<@^~?sFuwf@U z^Si7d%Niy`z2VB+e_~eBh)L_kr)Sb~YHuq2M>tw;xXs;#{gf^M<(mHOMClMoZh zbtnWwD`;|Golf)%JP2Gz=L5TFX8caVphzhJ z+xEe?zRmM}QKs8PozxG&%0ij-sDP@ca*OIkKi+adootIr&+bKoK0kb1t~vwg zT-6=5V`o26-WXh=i7bgU!_4HHxAUd`_$#W~@!BD4E_nx;`2w-pirOT?J z6}GH1qy5r??-(9JE0vJtpWg0g>?~@8y1Yr{+T|vexkb-}Q8#)y<}9?_UCG(htrJg) z#JqUl`_`_^e2igT+Ru;VUZhL4L|Y>=9lqZC6+lVH_;D>N
  • ?>!nNhtK#QTd0x=0 z2-k}<=~ZsgC|)HkN`RygfHF)lgppNo>P1`>8{^;d6O z$oK=fx&>>Ef8r-_83mLA^AHXMlGHRQed9-pF#JCA+d;MxM>)ARVY@Y_>vHDnHq}2f zC8tXViNrMn5yJ0~snlaHDDwP7tBcgn!eUZ4`nWHy$iBV18L|Jv)pDZsp?Y99VQm zDxCTgP64NIj-LW7W}Bn?iW2?B59OP7OpYxTi%^d03!h^DxZ2P%U%-D6(ci@jF0$v=}17`=hrJA6VA|XYp)L))?-u@ z?YnzDaZm&h6rTKL#Y(9n?*MI3r)P&xc6{^9=f1tN98XgG^%h&4@NIwNmkIID9ZscG z*goHE&ESC2TTE^VvB&=1GLpzABbn(Zd)&^&bUV{(=qkC0+A+?G?ceUvy>6QK52ep4)-cyzaF8$T$U%5?dzBLMsT zq#Js3bviq&XD5>9%&Eo*Y0K&l<&zNlSA%1O?%p_zApg@i@}|q3VV`(K_9FvV_vi() zHxn(_FN~0aGn-B|V|Wm2=t*HJP8YqkV{dv<4M+ywl~NVm*H z>Jyj|B7bg>pL!@R`L<)p1@BACoz`5fK!Z;M;ko@u-CQi=-BVB|aHYt`l`boJPcpcPjCJB6x24)%p z)biM1&Ib*>dqDAwfm=u;8Nh@qPTrxlo1dG~UQ(WR<=g86rc%lCiKA#_ZNj1z*{HAv zeQFgPN%A*CX`|Zsn#T}yx9l|x5XrNIF7}*vjQZ8sp3E7UnP#8kLx9uk)An>(X34wT z2ZnGZv=J&!UvyO45#uu;yQqC?n!_0~gv~ZwHxWoBa5VGKR(0+O$Kb9Z*yN+e_(S_a zg&R!6<`s>L5wv~zk03Le5A_=LYetxvu%)Wu`)Aq<2oh#UIRtHfXVB#|kVDv8mdj1^+PtS@*UVoc$n%0}3ncuPkRRK1)63V*@Zr{bUS3Hd~MF-`W zW|TahD|la6a5dmC+c8keX%Pwo&Zk0m;CL(;07Yt6nytW;B|f*ted1FpyHBrz3G@Tq zTAyhzGdnH1Zsuq20Mud29CbBd69It2);o&#C;4g(q(sTT3tkP)wKH}9C@$U*R@b!( z72KjjK?n0obasK;s0H*dSy4#92e+G|e@Hh?NL9U*y+CV5Pv7SetR2sorZOuD+Vy}f zX{?r-Jw~go=W6ekDNl|wLfHXR((}BO7P&o9d8L&2(mW&9a@TU&4I@+h1g)Lz%X4#I z7>~n-{QAmtqbrhi)f&*3Xavzjnmb%aJrBCm{`8Z+mrh+2jWYcWy8m~yquI_sk-P}k z^ESzm2+o3Caq!b|-$(+m;FvF=-0+Hy85xQuPZEmMAB~ES%(zl8+f|@DE#!xWL!O75 zYz;zUe+>h3wZ9EQ!&Ms(HCeo{DuQMP?BEI=RFUM4JuqTiI4<*tcCt*$jkggkHi7;u zCxi~80@}2Z09>d?1>6+@2!9e69pkiDkS^AM*MJkS8)NMP(ROX}EVjQRQ)kM1s2b?< zp)X1!FdM5*;YEiI1dy^Xt=b9?I6TKIfXW~rQ1mT8b5J0Y8VC6PJCZ}iet_cm8NlV(V3!usTFMz>LnCu7O%l@hZG;+)^SEJf3`*g3uIhukZ z&{haOdWl2!#9%E60qa}pIQawna^=k*ZF|Bg3KcK$AR{H$hb>F_kM!ngvf=~IngO-j zf(3q2pg>w@GmjN-ly%3O)SF(R!Q>iYBc?%7?bP>2i;WXU?|$b5ZjM|Ft)r^&hl-wy zp}Qb`{Lz?ZF?{(w0F`DI@e!P=&we^VF7ZD`_m!0Itq2r#cUtO&(fx+36Y3yD&0wI> z#LL=8A?#Mk(_9}JTZIo$?5{6c&>LvVe+dK`R*PC@Puyc z7GHZBbvWA|AT$;SD5%mCLI$Whs6%L3eP4%fp$REET|BCx>O3NKoH2>ZH_gBasz47* z!#?Di%@Pf?Z&t}wv|ls&b1QB&hDHe!Q(LjZdnWbA>!IgO+x**)8%ydyb!&Dk@K3`@ z>b?bO7eR1;!PO$+dUS;eYL>s|3d+RQ(}E7>YrKg}xNEZ~TMxa|%9g1pC&d5~61S0j z>0iyn>oIXJq(;2Lv@?aEx0OT1h|6qjpIqGloiwqB>odX6>D3|D&kpWuvH_LBW@dyN z28yDAQP0U_n}>P{O)}tgUIce~#)Q3vY&SnQ5l|29H;Ow59ghjtCf@Z{= zZv4kf%==T%9L#j@Ja^7x5BALM&Bj zM%8%j;kv!R>J6rK@to2`TAFT>MSGBq{9}$Fn%S`#g?P*iQT2`DC6)br)9T09XSn1) z{h<9G=n7_nD6jp#avF(A2z8d*C)dh~as=I_vGYJxX$@3Gr>re8GlP(rN={?c@nNJq z-FSnG9_Ko3UbFItvd?XNL8gg1Xp0!=BRM7lOUiLSL_DBU_Y1Py7xDi0V?h{=)P4Idir%A$NBi;l z%ZIFZmb!jn)jnT?eTcLyZQAhb7JGf2Tyx0fssgUsK8pXRRh4nSQmD2k?EhBpfbJ)U z-lEXEGGot~io>*`KbECCWyFv;n`t_NkBmb>x;g41bt^Ytb{2nPR{wl##K&|vhN;L- z`&bi|F=X}L%THUM|IvTVf1_X}RKghevul6zZD4x*Y+; zcMErwV#lP}S#Y2_p7uSr!Fc1)d=IQ6lMFe>_FNKl+lE~BfB`i~zD!3n!J}-&UnbRz zBY+`FR5q-~YH5c`%8&i=wdBa{!F~ASJI?ia%b#LDt{PeDX_`8seuW&;y z^)m(qpkppiag$QPj&}SMLEU9Blpj40)}{#~2VR@@fG1M>Y8j#U9I%g;(8W9NM?wJU z7DnXTWDV9*;j99}eo??C7gkF1$A%Nr#6?;JLeXvg^zoNV_rFrVXsmw~vZ?D)PX6%G zYLm0M-xvP2^3xgU1OWHhX}5$JVtP|A=@zcB`{DV4b{+nuvHaAFFQN?PgL-u;^%1&c zp^TbuDC?_zG|Q>{a`QaXF0&O<9p)q+00QynYq5*^$Am7|g^csejQCV&1(*p7dT{vg7L zqGyyC;=2>UOuqe`zaDpkxE-re)BtE)f+mh_=`y(W=kFclNXLMYS zb17h*Oi&K}7A5hN* z!7e1erwBL|P{)=fnh1y-5xNjStRwpW+-!>a42j0aaaw#Vm3i$R3-@r2_uwU@ z&REQ|U%Nc%#~OBu)&>bQ$ns|__v)pm{|Hc`Fq{M_jFV`AU!gwi{OPM76`W1?eARV^ z29cqQjt^p}q=Dp;&MBpy&*qG!Qup)_7R=+Lr!VAiTJ%J{TB5lAHe>3LP{M|IGC?(F zi6b|B++3fKkj9+AIa{hHO3w|H>GFf3f84)57$CQ0CF=QNKXzZ9jh*yPZ1%G?c5~tG zJ8yC4aDMwSw;<2@Bx>XYg>N%BP6j}gGidTOF*|AXyNxE;)~Bq63o5$qTHwlh=-Sf6 z&>BQ0GMeYCtl7d+rZSlZb^IH^@xF)rT{f zc3!qrqL$6i5kks@76~*75TL2!RGG76p4zG*`Um3se`DDk(6F0v`$vONnM4mq|z79lLWl+c6sZ8NOwEV4t$g+~SKgo!kmj1J}uG3|JazlUe>piJ+S+qObyuEti)-d2I5U%saY zE7Ksf<)Ww)-vQQVm}1VenZt~jEL2mX&y=h)ZqFkfZNCvTdD&);y$hxkFff3-kG|I-2u2ev(FD=2cjsL^pR+MD)JK7gC*K!RiEj}rA} z`#F+qOm35S{GqH$ry(llQzi`jZ-34Bqwu_W`B?7EM3d<1P}I{ODj1;mVzuN_Xs;vs z4I?e7sAprZzD?ZQb$$vOyi+uQ{uboDNz+L>w|C)n=7i)=zC(E}lJNAs`a7(2^w_Nh z?>HK6?+z=2mw9&x^4Fum@c1|GTutsOM350rW3)i(B7c4d4W1IT399k+k4Ipcv-1FK zGJ%9%D9-Ah)09r!I`Np%_~ARZhNR1cjSc!jDhszKj9Rj(`J4r*!EEtHp5B<95MX-7P3eN=c(M2+~MNOG-$nAR!<~2+~plf*=Ue-3`*| zyDpA$yzg_*yziH1=Kb)@oH@fd*Icpxd#}CL`o)rB#P>t_8auy>q;hjFy^Z6z+;Dq8 zEnMVoBVSL)=6)x-PkBr2Hu1Ik6tQ~&U&%phZiHWzC@vhnb?U5YEDGz_-*G*f0niyg z@>RMsA!5XSDn;^WKc*vd7Kq|B=T!FA4r+`H-_&S6H6`1w-b-hV{9rUX>u4E_0p9J8 zMmT@m=Eagl~Q;9f8@=b%{;ve{D zK=acwXaVasvHv?m*NfCcC|u%_G2XQni%_5w1}e6&3?HLjgEdVvy&>;>{?W=%|NGV% z!5Lm_$a&qLXv;k0xnLAvc7L3O%z$7kp7YN7WP=jlFu|c$uGV`LdLUy|Qql%u3YUXjL>LeIqwy3by?87(G&T1OiyLEy1l zj&*z0Pa>OHG=GHdQB47_kzF@|>eJU857;E7(TAn5llbCL81QbXd@lxYBf14jE!32I z^Tb^D`0jT#lx?*Ot`e|R-RO;oBsZjP`$D6nYrDrFXFxz+Je%b3)D=sLGjKGJ8LOc9 zMgrG@P%Q4Fr9MvUp!bSJv?t4)!5IA%eYEkCJ_|y#_*IA3&!bWiRqcjZ_vQD!Ne{vNa7mRs0Fz}dscFIfXYq90|Yq;lZ`CF*my z0s`am@hFg6TcGg z%Z(0%d=;0Jg^X)rFkD7=Z^WpOw|>TbfhbTduQ;FM_k2tw|(?SzDV9RE!zM;M;vPVn)s_hDC5~FHX1eVuJG}(nL4F9KxgPUWbzuD4p`!;B#lZ`2FqvEh{TH3685H!fXfpQBSDK z2rd|kj+i;8S#Zx@ZLgi|^6VX_3o}{bs7yo*Aac5kZ_JV|z{f@bWUuonl!T_{=W&Hr zRgulH9H-Z@d+ik-*64Lkt=+A@ox2L*yb087eUh6EW&ynJPbRokn%+0=8YU@8VxS$q zJ10pd7heLs>`aV_Y>A=ob$o1nby-JRVLJp?oRhN)SSm!KuPJj;@sjDdi#(h)*7z=X zo&U#dyc1?nL+|jkv5X8q+5}`jhlyG)X)Ah}BpU|PH*^Z>YttNTQ``u#*7Q^1842wS zFmm>j*d3~JE5$>D-yibr^H4SAqJ{M!9FQTL4mu2sfiZpouyUlcq^gwz3r*BkZb-D8 z)f$-hp3V~n`N(NfYZw)Am?6{iLs=8Fq73LK?dM}EHnuT_bHSV+ zgg*yGu@YcoDQ+wL9>mSN=Bu`WcjCVRDwz0~{aBVD)f~!qvVzvuqCp~=PBCX~BY7bq z6iyk}J;A*3;f5}=!L|;0owFK#5g)_J5o(=8R>1mDkw_BxK1ex3Y8X^^fP2p7mb09p z^7xT#?4!V*jNOakh~)y-;T;BZgYM%CA7}Aqx+WS!dd#RRuYWoIa76s1gVz1~9L39z zDC8B0T28i0x(_2a%F_9vzuZ3fie!ys_Y2=>mP-Jt`)*s)Py8{_%25O#)e?P?58C-` zisnr-hJgT=&?!xh!w)9af-MV^j=z}~-ZVEK*Ud#z_)4)N^C#^5NU>m4xW6BSq~aUd zNuzret85uIhY_YLZ%MW;Bnp?P9uGun&iz;@-}4{+GWlguB!6SqCz!p0qtqlvZ)aR+ z#rMNmT(-#Z5W4C$NrE*Cz{L@f?`ZMK?|QZ=!a*9dg_bYO+ZKGOto)LECME!yVb z>;#v^vp(aLCMfHkdz7}06Q>M48g|qm%D{ zpZ@^eI82z+sm)fg#6{>T8DRR6pU)v=FuT>@+8IyHjqG}IH-H8m_l|O9v})=YKM%Yz zv{g>jnBee8{+%mPdaoh7Y1N#Z+ENDHuWLQg-zH z<_y1SoZp0F=y{R(taa6v`m^TND}pswjzj{cPk9lACnMv{l3MT?Lro>ZccLCkQ!*M9 z+bySdl}eZO#n~;M5y7cRs!<|$_wrp5wY_yYNmYv6{76(~GY{9oxaEnkh()k6XP^?| z0VS`&yg+QIq;VJd{WqJaKIfz;!pp*)Y^xSycdlg8nIi;Eq$Q@8 z+d2)gI@;@fk`RjhHKX%VYk&@BxYtx3qU=*DE{GGn_ydl>fVAt*(sN)ndF4q zaOLW6ZUcq_v2^KTFAK}Fr@}F>=}4Y*NpHv}?$fCj?KpB>Srz?YV8->Z=*Xxk(- zN=Yp+dBJ(J)0(FD^Td``95+LbXsK>MCN?S_F=~X<^1w7N>JmHVlmhaa$V07F{srit z%sW7pffD{6V^KOar0YJG7(JgOBqOMQ_pal3@3o5b2mL7a5052N8HJ^*WQhtUA6^h+ zacR75$kN#JWJ>u>ga5Z3JAjT6!hj;B@tIn3{=*maOFrq~B2GKFvbTxRvPnr}Vzk)= zzQ6i3y@8F@k*ty?(}?-XRGd~*eER_mw-Z}IntwkE<#q$M)o*@g0Fnt-FE6-drb4Wg zOtMOO<|c|PM9F&V`>G0gc^W!SD8nuoxbcT9p ziRnLH_xG#)p2N4gg%gf!0-P`~;ENlc{WSAGMuLd(BKgi?wr8`R*O~u?&_csBhXuQa z`I}Dj+bI6;LyaWWVE^BD{Xe_{2tSVJJB*Q@y)khP7DTaXi94T#ZE_<;OP0flF?r*D zS9AaIXrQw;d$OxfAPFd25z^?z*_4-(bo%D^K9BAwltx+@e;YkWayX%!|BDxjfh@zv z$$?<`i^&DTKP#1o{hx$yzqLFgeeVl}+cYvL*AoS&;U?l)vi)n@iLsp!G<~N8cGQ)!YuQds4G4-y46ue5IobHx2L0%y;PKJmsGu?yvExX-neg zex7_U2e@1l7%{Bc>YG2~kAJkX{{YHegD%d(SofWq&lY-eD%49M-QR;OKqr5l_K&Y3 z)=u;C=g*DOQO{PF>f3F;PY_XX z^gX16Wjl~}0-%q~o8VR2TIYY>q`%O_e(d9!UM>}Wey1hPo^Ia)J7n^ELR8EzUtr?j zpGzz1MvHU?%X}ZvIqg#GFZRg{_Bh`7H-%J~H1d`n60aTtTQSMV0iD{wAB!;Q4zos{ z>I?k#p!yHo&TIGHsK0&gIw81PNwZqDqgNv20ad5{j~0fC?-x}betdi;uh%*1kB3b; zdjdsrrk~c7Y6d(BjM)`91_UTq!FYAic%g22zsq1$Y`F360T$5edieFJx(c97HX7w5qqaR*!==al2c34Rv^m4hN+} z^k-!ZvqpPaxk)Qg+1^ZIvXR5=+tQJ!!@Ui`Ov>Dy`EO19B1wWbh1UI4-`-Ud##_G3 zXRT8rbXJ3n0zhusuG30t35w2 z5-sNu87CKXafUxaFmCa~#NRY9F5wp^wgQT@j?473V16MNs_(m;qIFq$=stZ`rNbE# z*mDj;GwvvGN0UR-MvK?757F!59KW+NEV!a$jgSEj7jt7Up~HUIqxB(&4z(r%88VhX z&V;uPav0HvfP~+@>N7QU<^rj60P?C0b#}~<_0zRwaFn}jm?j9h6kbku-}}*G*F?sk zX{h~L4%cjIOsVg^g@#s*XO#mRmLPN&?r_SClOQ2I*;g%E1H^tDSmKVrugYG*qf?Me zS9qox`U&W+*Eq(u~lu}c6|OPbr7zkh@unb-7feXoigu~ZW0i>+*@)@sJu6b%V3C=rl-F)&hIqt ze@VdV2y)7F{Tk19&PUid-jsfw;MLsR5QIN@SqDh+g!HfO)%vvWk=j(MI4$*!UvXeC z!v_!v_fZM#DLXQ+gXK~*(dY3p1~Hs6U-mwF{4T4`;5E;mBr1NVi3*S~x68|Obv!@4 zqKg$cs8lX~U17oYVzl(<%T2dO9(V1eH^`sKUjEL+;$k%75p4+MKTX|)=M)A69mFE; z9|+j>bc9#(YmTF|t;-s>I$_!DCvdC>isc(~q=f21*sC?^9*N^o(pn^NnJCm<=wd|g z;5pRDx<&PD2rVVlvrswBy)#uBy1y2^Y_<05su$MzIsacsfA?HgAkrY`F)(39>5}X} zt~Rfjd2VsV`|eh3L~9{_@x?@bohy}u`&xJ4q#)%_OxK{XlGQ73^;L*++GU=9DKyhXu@ZKRsyr; z!o?1=Vhw9L=ef#%wP|GniI5Zh+j99YPjINMmFZxExUakpQWR2G=0?JqJ5|DpNm zpvYlXxu!l%D3Md+E5lF-amBsl2caqsN#PKb#pw_D9cEr1CWwWrj7m27oF+cB5tAnv z1I&f^XRrMamS@LE-oIF$Z`>xt36{TA=IY!&h#2FuSjV2{Hr4$!k)>}??6$MwGbPGl z%x&{6BW%&U8X}JJxrc#_2wt1=d3rjO=X{ichoH=u^1r0Vag$l)jcho%f@*NZ+URd~ zUXI`O(#7im>Bko@^Vn8hu*(LTbzbj~@`&fbvO@Q@xaW)Zz$M3LVbz1tCnD2jXcZ8F zzu|T+`ONh;N79&p;d*7y7&`*o=lWQN8Aph79=;XTf4U zMmbr)Wc^A*RXnMt=UYjxx&0+pzb!?!GTPS4uhX}1qQnhE`oCwX(3C-`+I>IpC56Xk z*T&uX5>$=L`GKSM7lk6f{h9DEGC<>?U1Ib{*SKv3u2v zc@I8h8H@O!=@z;>&+{Y{akCzzs@T6`TKSej<(}ud_htJK1p33mLEP3BQ#2*nA zCr{S-&L2;cjKv1Mb1ITG*Zv0XaEz!9;hm&A=uU?iY4^e#?hZuUYZdxFd$c{B zlVAOr+I3N2rTW17jebqF$mUSpB)(6jRR#yvvEIh{9RnCSyjK6Y^=lqO;2wOpwPXLd z%(&Vd_4DOUB(&Qk&~%x!veM@ZS7KM*5ZeAm1`F-4!5QUluwpY>tsB&ih@eWU=YG@@ zMYm7`hdQUJ0y8a8=>k=ko|8@KB~doGPJ}VY9{muF4 z7}m=0P9nkam|?an08gdWIs9>!1W#7{`kyN<%v6#~mXZ{9Q*f$<->pS?esAV3|8d`; z^0v$HGAx8P!O=pdRd%^d<`soNRJ&r0C6|LqGPNz5XzL@a6HeRRPr+Cqlwj@E_o}i0 zj1#Ma3xf%fNtq&c%lqB!Dp$w%JY<+jHw?Ooc*YyIS{Pn1+GW6wen3)2R7;svY%@CY z{^A1q+l-~r}kmRc+|14RWBKE~{B7OZ1eIj8fLhOH?n}^eQ#q9Uf%oK8hI^`FUui4hgxK_pF%m4cr|pv z6}p+)?_(%h$HrZ>3XSDGD>X-t31<~gD)$=nbb!1{~ zTX-_uBotuoaVw`vhVvYf#iX^+DHhX+M8C#`L-^lbR2+o~Jn0Sc7=z9M#Z)m3AHC0R zy(+pk=ogI`#HBvddYH&(lQNA?0DZC-qBfQ5PbS}l?NEpsQnJ)mdmN}TqWkTAeZh>O za%Nm40qRMaz}|nvUX-&?s9jzuIL-dVLM%dvSQYy0|36bAwO0Q(C6c%&@01d`k1JEa zLPj0Mi;<*Bv}6jz(8WMmEE`q~mkjZt#|YGM{ByPFDGw0J27xpfHk{wyT^T0$tmiUK zxVQQ3<`?Re1DJKB)(eNz{E{D&m3dt9JB($Iz3Di$3Kh7n?{gV9BRoO|s6`m1#D*CH zA0tGDV;3dF-s+Uu#|pGzQK8mHHqcoxO$?nNZ^_M zk7se_THA?6685WVxgPmZbG!v|VYYRWZvc79S*owR7!C+|+@GDiKrZY3xbfoWVJ04~ zqZY<|{G$QNhl%8$oV>b{ZAu_WBKn8k_tzzr@#x0YZUb)U=qKX5AYtpoU3u1eoqzfG zwSY(lM$jI%iu(D!_`$%pAtrP2_=tU6tXf54e~#gbM>d&z)~i>A%s8KNR7MkvHpKOu z3%|os)ca)HV6U3DP3;|TRqLqBESU>r_YZO3Hy%nhpCg_j5h-2s4Y@yeX(j+<4Gq2N z91rlO{#NLp)caFxC$!g*Q`)P!Hv_bb|DhZPU#}YS8hbhPv)z?{_9vF&H*Op{b9=Uo z4{6dW-I%MY&Ts0pw>qo>3=Tm`M3t-di`+Z14od5Qo5`uE~-N!z*=WS&CE`B|?jzJwGTy3iVRin6(KzZv(I+!35 zq%TS?Sd&*i*Dk7%3Wbd?qdOR7w9*X>)MX`RQm*hdifFgtJg82nV-zx{itpdf_c~HU z5-l7rw|L&@)Qyv-DCS6(Bkq}Aw#t|^D(n(PovRlA_-XcxlwOr3Ie8yYiRZ1Br{9K9 zoChRn^3eP*P7;YsUY7da7lLw(-(N|hL%KE~+6P(nOgM_(1}fq6ak8;5VY3V%T?(38 z`=i+ceHi4@SD&u`YwFNc3xq61bxCT{ouUXz?jzqx-p`jdN)^IQO}O!7lvFlT?}Rja zuzqe#ag-xC>TSOE8ULDYaR$a*CbsbtSv(?kfcx3=ua&yt4z4CSw5!A+a&YC%hc1vq zSh)8F!va9;3_1Vwisq6$CAVkuZ}n2c7vIF?LuAlOpwtR2^%U;LlXWARK#t&IT)n(p0dW)_?9O2W@UezM&4p$ zLm)`)al6ndc@Aw3-ZD>6%`qKkyX21q5N8V)0{$nu zJGP-TxHev&?2{e^~5 zaVAdq0Q>WP4Yr9kn7?*ibTh77VwJxdb2av+_4S{(bn1Q)?-~pP$zx8bcd1lRD>igU zl{|2JlRZpwhgfOqmB}bjwKPS-==!an^!W*pQ#J5#&!lU+cqpHDL3H8K>H9RJc+-&; z89k5)>>P=|hmKQrgQWuA#`_AiD|6(6OnR&nYNNN%Utv?m+%JlApA>2^#(08gs{VO= z@PTf1U&O%zD=j|G^nyl_PL=CclCio|*-N-)#$3MaoM*56x^B6x@X^@SeL>{o9lX}+ zf`^`vU}9EHe+!3VRU)oGoc;hs*ICoyb$1U>D%$(rT%42I50KbigWf;fSkIl++bZ>BOOnL+p4kh{dm>xgLED$2SXjO)JlfxLUEKm{YmT4Pbp{CTyMM-XiG1;Q@9^f^;3J z)moCz4t;yAL)Ye$Zj+_AKm&A_;Gz_L5;}ww7%@F>mb{Q+p7Hfu!B?H^pp}QkyBZ`j$oMC_l z-{7q6i^PPK;@zdP@h7B7hBJ7(Qk}OC0cs*@pmXHT-!q3#Xsm7g;)9Th{!;qV zPi6R6k-bWl4MZQ$mgLxRdwbvcH*rrGVONV!VON0obkD_>K32CbE17>-?|Ue3 zqQM!F!!=2l$uK^yd4-`WsMLxnTP_JPM~3ORk)}qFdI7sPt~rq~v87fClE8DE7Q?o${Z+;u*t@Ef zEl(JkG_CmBj|whwDHKmV5d|Vh0^tC&_{9OQtsV_QMeF71QtP8{vxlyssg)!&>j`El zDvBMB!fz}ofYjXS#RHPPEFsQ2Wut5mX&~i`8mV#{l-3FVV$zbq_fm^E$c$3;zOQYd z$<{zye3D-4vSa^^5;>o<)(8s&M1RD~79NmFxUkfI(s_5WKz95YMiKr3>b23rJJ3w> zr8xUA)~VFx#r!Vn)lj6S;C8m^)gE`jPr3b>z^}W7Ier~I|F(1718&i{rO>VN3o{ra zYRg){>c+tJ_Pj57`yFILi}j%;YpOP29Fc7V)lqeo3b!kx&7a-~7-(mX$X1~(dtTbD zFVGpT*h!bQKvXVj1gKz_#Jt^Irn1W$>4;oaMGlWw<27ABRwjL~zAEO-s;iuC_)VLI zFkHn!i!~*{?v5bkKK0V20={_qTP-+r-inJV*WWgbROhTCZ!pLbDaj zvjv&WbSQWGE#k@r5MLK4prmU7QKNk_iAb5@W(o_kWMxa~ECFc(lQJ7qf%Hx&)6AED zwvQOfD;p-7h|L*A*8mmziqne2Y*B=Dm z0^F+6{sBwJcj`~j8)HJq-Y7!j47{yJlKvv1*iXNnxCKQiq~H_rkUO z`!@a}kI^lE7!-0oD18qx^IkkR5tQ8`sz;cH)j(8oS{>1)2lezCi&GI;kYU9>sUPyI zyU_&9vF@ZybM{JskrIoLMf29Mja#eX zJVz|T!k2OnA~Hs71f@7({8)0W*Ba`>){ma>D;W*&sR>ukqB}dl)Z5z#e1ZvrN=ySr z-|3E$-3RB>7|*Zvbch_>P|wr3nwjkMJPBsfD(Bt~Y(k7!g31q`$^vIy)>Vh^}x_7A5rJMCJou2o(dol{cR; zso5*joWWnH)e(PwSAWkhkeI zh02`L_!D!<`EMgV4sbH0dkHw6zHVZ?j5Cg7h#|xPeJ3%Ds}8g6N~$a?ceBBRBu?D- zTq7p89pPPcy*kF=S_fS6&=#a*`GAUm7mQ?znD{z@@b#f(yRwLxYtnw=`SShwQuYsg zF!5@!rHy(1DWpU~J*@@93-vnU?#dX=B|w?+A(C0<9cV%*Q~ek)EMSXU*^Q^xFXZlNA*BOAEYhmyA&GEe2Ek0 zu>m9ukVEXMob~AL;LE;M0or_`z`;K)dAyUPPcGf)e=c1Z#g;nPpsCYYOJSu-5HO^v zvY;wjoLk**q&S^VHs46a67!MD&gxvb8=M&JU^?X_i=~q6A zim={fY3WkgWiWm5Wta)a>z&H*=v)l7y;rM!)J~RAp-GeHePLt&rJ+eywq19ftYGox z$Gs%6xJL5=LdP>5FIJy>2x-+*eHG*J7Qb~L@~1-WLY*cqZI;=lE^Q3T9F2VS)?|&h zcb}zOrs8X6Jf+)5gi!jtBFD&)b0WBQ7JW1Qfu$A?vC+J+pQIN8Awnz<-6XlZki`|@sPko}|?R>gs0@@f>bLcZ9Ggs

    `F+Wwy5!+Zb045iESy3O@)Zz82I3%GC!EA({|^o zFo%BoC_PSfjrcajb;gUzOw>5V$1fONdjqc(8rP?6GPTn^qI#04w{YI>4fwv4y`JSS zO4O?}Z(m|{`>)yS2zq3N&vRL85Jo;TO}dY$+_h^9%LzxH79fm45b?l z0P*v=Fj_o^3{}GgMy>x8Xz)8J7f!AkNVR^MlVHGD?exWGinDQ@y4a1=iA9!~o30MY z-Fdffk>A%A|IviUZm=gLEIk|q2wZ2-{Xn`(O-PKbr0f|4SJK^-ghn1E&O@`%%%4<@G0dpQ9qhd0Cbl=E70xjtkvapOYCNvHnxW82mvk zdjh|P{u#B}VyM!R>u=j#h*84sx!+vyz?LOzog8nC5~w<^0HrH{NP9Zo=&8!h`JS9O7QM>8 z$)Esbh{kFu&J&o9?|?hu0-gXSaJn#8#~T|^p$C-q=^pSdxqu$s;P3M6KhD74=RpTP z&e0n$^$r+C@(^1pY+@QSh4NYtX z^gZ89gqf;pu~n79rNZ=S4odIotzP|bS~RS*$Sb@M(f*8Ga+K&N6rI}B07;1t7pfFo z&TL@r7F{~x%AA%`KUoaP#1BaNT4}47%IPzLOfB57jb?LMO(E|22X6CI~&EVG6(8_*G+9o|Qh7v{gUCkuKwz5=p9 z6xg4s5f!LKKE!G=7d1>L_t@+#w4YW0(nPRpibgu)qlGt_**~GCqSX{QuE!Cjt(5Kc z{{cXAPk^0vgZXfwiQ-V1F}f_7W6Te7c-keDoZq1vWdak+<)h8Zdd`bvWrEE|6rYWr zR+a1f$LpXzC?b%h_>3bhpecE ze*3BZL0Gee9IHE7QL`N$VyoOcGJYR1;Pn*b>VyO$xlSzn0h$j;{cg}kE0}_dQU;!= z3U*K$rS-_eZ%PWNJgd@9yYkWWEzq02mqHX6f$YKoMdDL(FFI60nLz*2fHRhL)1)6_ zh1z8v^N497Ww9));qv#3*Gi+Ig;9a$xG~idJ^cfWHB{sMKon|RX8J;G;x+IH?qCtI znCste`iDxe^#_$eIn^#3IA%j2vAg|B0<9zaEB4w3v!9!K!}rGu(n7rZ$7CpaHKGm!v|U4~t|*gSG`Wq5G0PyoPIByCGUXquiWbq#*p zYsEM=We;TiB?Qc{+o@8Rvq^*41N^{2n{HWf5{?8otVAtG-MX|fnOU#$!;fYbsM?2I z4*uE_=QsvTc1f|FGm19ZbAP_Qpch+61vgP$8^(wuSD|b+<(t~|LC|@&faTd08@*Yd zR&3N|c(*d0@vDDSZzoT%c^Gy&xU-$!&v4v5=F{U)QwIRQXL{lOLtaL>W~F@bwMlF2 zglzncE%6MF?A2s`hm5-m*mmy#>$4+&0*);YC2+qI$c!gr;@rYW06aI5MXOK>Ge58j zt7Ne^KOD5n;ts&z3m)EH9!&p*YHN+Z4T?{+R)7CMjlnLq+H=sV^o?!}l#mL?K+pOC z^0O>n8eDZ~CS2*z^rQH&{)tqL80AQ)s_}!tVe#@vm3GetFxsxNAq#xu$RZ|7Z`RO> zyTC@;e0v)#T=>C7shd;Cv?-ioRp*-^(#PfF;C;t;m#t=w~Tngst9r~I%RHE%N2zM5i&*BJC0%@otYZj3^riH1p zj08CkA)Ju(m_ZGnRuaJMrxf_gb?@4Q3iZsJI6&1tA41UT%Dnl4MR8Px!cTzpvSb7) zr=j0uK`?O&#Pndd3$bay6&U8<0bjmm@atN*!Z1Jrw?_GrEU|P2CG7*r3h2~i;q(xl zz`4&N0`Z~o=I}}9l9cYZ3mkS%X}T7+Ha}*|TAJ^+OUE)nEqJbG1RopCMb{ zS1e!HPB!c^9X|aH+bY7)+L5t(NX?z&4jh>hoA-a$H1hz zWcYwfa_A=60sf97WYX7g5wQankxt+(6>L*jZYNA=(CvOsNE-KB_m>;5h$N^dik?5I z)(1w-0>M72xC^ibO(SErxp8GcXv}K%yjQmOw-*432ekoD`ndv*|46`rh&{!1@=M14 z`6bk$)Y|;9QheaiOZn7r&$-6)2(|7m0<7hPpT%G(w)Ey|(FD>3(XFmYv^Cr?cnF#= zCZ|kokBqSdbN(tV?3BW__${NIA%}2)5&5$jx!%*rM!dr9WIGp&%1!Tce(0(N_r|1<_{7Yrpta0)JM1pe)* z{b$lM2I>u!pSe$^>tE1sm@2WN%d=u>ECT-Sgpe7}*L{c#}H5;oX1cI|;i!gT6O_Tto^-Mf?O z!Jg}cQL6hVqm-1_l2Owz@8U>QU?k=`8%FC3(Hgw-IchmGssX4sjVEM0z-zVGkctOoR7fiQSF>#>45^+BnPr)Uo@l=qKr-ek{VobKfrR0 zE~kB~9JJW?de)^u1)1F-+e_+^(k4ax-l(x$7_apZ?pedDzdaECIoy=Y@NKmc{N^wC zcXEWC8^m)#0SI_5w|!!y`2L+|VZxDSCfT-QVOaU9eHE+K%s-EJYCJG~S^!&0LoiS& zAU(<2<2fZ$AX=6h0;N-vyvDE*tJi*89)wqw>MH7_g|(}vU=u@3O z9C(uJ+($O9PYn6Qvg@)|^nUrYDi}*#_zboMck~r3w1I2%%odZ+Yj-^0$0t&o*J|N| zO5l=UURHWlhaS&e$)FgXaWxvRGm}J&A60EkT<^@)%%q8ig|?^Zn`^m#Yq_k1b{eB; zrKUO^>yu5bprY7HFD?N&8}8HLs#=!P>svkb7)&0wqym|cOrhI&Q zQ_2BI#}{ycgc>@mw?~rKezY|dQ$m+gh++K}{Z_)x^SYDl6pUQC*>jEr92ewXhzc#08I9b;@2ux7>8kgl5I z6R8iEVzsR!!B6*kDgDF@fx1P^PGbomj&Dyn8ceh+P=8V3(JX)gU0(!vC1ESTx!0)O8L-G#^I;cKty)5A z3n2f`X$?ffS{)ZX-mTAZ9@+!H$eaTI+3Z9|NC)4&CVyhAe)q8>bL$`Uy}zSog!bRI zr>7*o2vUEQ3!T7|H-W`rN>GJ0L&FR&rkcl#Vt3Jk?YH1-HY0sWcS`kQ_$`bVU?EEs z=mTU`$2>?j)dU{Sja^|F1aly1-LvS}1Q@ReC^6DTHS3-_CtZ8c9TBZ8ApaC8Cq}It zqq9t$pANE+>*Dz(DE`P@1ThLFT!%xi{NWosa9shRf}zMxPNoG~ox43Rx&&%49E&q-GGw2-_U4qG4pp5kGl{ z^=J3{@6y2gZ&1;1Ik3*WPsVRKh{yYepGvaH5!g}OZhgaj=9T|@o0_BVEy^$CJ!YzA zZo39XSq+vvv!DK)AdT8uBDyz)PXflgLoTIf?+`B!l@)zt=2WZqK=OQ{nl>&Y+@cEV zeBpJRFnu=_AB4T-d9cJ+>Gga#E483&%_%Q>eL6pgT|QY1@QiLY*z)*#BZDB z)AmyH^hr=aITav*4SM#Zj|30oy!r#e0dxK)U*yjDSaVVvi1+7K>bbSA<;2pc|c* zi0VuYvZXOT&wuaZUr7C@!Ut}HI@AQvO${_8I?)eMelM4Dm7MIjKU5Jth^fJ-u(o+GYY&^Ln|Ab zs#EUJIIh+x57VJZJRX@%n#BI^$_ttFiS3?T$Hb=|SP&`!)kmro9+;eGM6o#TV}2B- zNkdw4>q=N(%sq$y!XY4Cx92>`UEEJ|mo88?sBC^K)1HpR;Z-43+6jqacJW#^!n+U00oqRGc`pdDMpg>M3DVbK~r{K`wLP1MJl(yMyxybpf2ErPKSpON(y3 zlCbYl(~yTIbGn2-D8Riu>1K}LOLDnwTPA?H+|sZjyJ^jszVkSmtR=D|J`MH8rklmw zXQZ{-bDsMX0}#%gT0k{vygt#GM3Lub-z4v@r(;`-X5Xz+@Ylf4_YidIwpP!21`2>J zvm@vGWS;w7xQU`RRa>$9m6DQ>_|opSwPKTkTPXpH=GXTN&~DKcIo>tuJ(-j7cd~{c zJK13Bf7a&vgwnWXoAcOB@<9?VXnL=c^>t~^tFHAH8^(n5Ok@GcSAgiR9IFEaw)`8D= zu0>CF`sIBnf|`M`vtJ|Qwv_DtqlF7-B;h$f1*?!i0g%j)&F!CpEmb<=CyH0dN}IL_ zy(FulPxZm!`iEcrF}qnvXq@^hT^xqW>K*T4rIijlB?25k(f?x(lzeiL)Y3YO889cs zpW)Z$rnEoFADU0k)f#hX22Jn^crP@P9a)Zir%`Ck=g21I2OrP`4g(*N&^BK9jUb7Q zW6rO<%^AS3ifpbYdcm!m4&;gI&yuLCpnA(MEUUVu>ED@|;;I13*W4%QASbTSqi64PY6yylH+7-EnUXsID2NFE%7;l* zP6DOXODt~!noFQ;qZ!#&z5F##S8&UOu(e@*nkP6mk0aO?GsCt35FgOYg|{XiYCGCnCMA2Oc`#_?1X^(w`FG;C;k_sqvOE4E)xZEmNg%_R#lLu}CXN!c- zdT4xJwc$lnw7<$>{9ofkJb_EVwYqJ1wVeig9o)=}iN-y#>EUOkRyeuqZWMiP5dSZ& zXCTu_U;{$r?*SkfY5AinS`T8FDYYN1#PLdMG?(bE%Lm6W4u}xlDYn(4Gu{rP(|%rN zF8i65GI;yGo;uqbUDaX1e^h{=v%~S*32!exfRo$8S~@f_5CCWsKPDR>P_HWkBi>C; zgs`hCx&gjru2p8A4wOAQgHXPh`Z(Le06+0l=z+!)Ce3mucRMumMGJ>E71)Eins>QIk#R%E8iA+SLq6% zm3)r$=pw*WgeAHNn9S##=j z&_@n~;179(;EXnFiyvQfKs)kJ=N9?E-n@gpRm))p;8kAbfn6wP<7^jnM-~NJRG7dd zI?Z|KUEyCpsl@dmYyXOBl3)2Fcue^3!4JK5VbPdrn(4N{G0O3KynEo?arXDv2Q_S{ zCGf%EgvIn|CH*ombI*%J13g@JZ@F~Ty;Qo0n6us2H3B@q2>Tg2v8PU2q zU5%JXqOIVZlrOpkcU(j7GE{7nxuA%w57U9~gfFo07#0r){!#mdgdT=XC{pyuk`4J(frK8Be6h!8C?AWq9@z!4Az(Exht=gVL43{}zz`xLdsF8-d}?(YY(;4kG)ClhF;t{T;r4`Bh8A zmwq9VQR#4*O^!=v3lu^SP{p{Cpq2ua_bV>XvOU7tmdxFfdsOh?c`v6Sg+%lB84Y49 zpsfB7_`dT>Ur)ZD%fpL{3_3^qjAzKL8Y{f_DFp^gKo)XMgxG=|ya;cZAy&h!Nk`7O z=RN4uc+sovUM+E}7@ux_(KYeUnGXq%Yc3)lZ_Vy&=zF9W_APdJv4cD$cg$g!HBoTi zhqx?$GU$Tg92ES~(>50e{5oh3FV=@My_0H*yUc5uRrpCw7ZX?IevT8@oueCQ6wLyJBL^@tIXPP0l0i9pwGp$6qn;uRqioXUv zybAeXy*^FQT2J+{EMZu^=t8w|>4TonLeu3qxTxd_zlS{_X_~ksOVQ=P5n``Fq?r<7Yq!X% z(;*Tog8V3HI_heuPfhP(U06Ib;;(VL`=nCNfmhaKZkSTIuX*Qiz+qfF7Czpc6wdMK zO(1%>EU{)!%5m>8xAL~5#|2LENi(x;-cdna-u`Xy7YN380|f#=+husAjz_y^8n#og zCxDQPoaN#me*R(k@(hvXdjB}Ro|%A%voc~8XF(kAS}}KmxLAPZyy^VYaMn<460U|I z{lnAI53+%?9JF?%pqQy2RwAz2U&xsTDCXuac^X^bDq?&XxU8o3z}PR>y**1gWGHff z15f72Q(bbcXCgoEn}x-3SURX#f_|%6DDR%6DV6`EDKw#}iU{^q8!hEx&Ap?pg9z^~a)hc)Ys*#iV$g>^ z)@%Yf`N6RN+CRHA%XD#wiOuw-zns_8N!OcSCLB#%p)nb3vy3!%|9=varnL@=u`LqK;%{ z^!LA_x@#!!LA`o?!?Vf;OYnNYwq&KB+2sLD`hnI5za~1c=%w97Ix|u|Mqom3MBPf< z{>99f#3syV)+2!cnGQbL`)eJqjy<*jY0#O#jFD^$X8((+?b}g`Y`7TL2F7rM>1H)0 z!cFFODaf6}YtObLCpgq{nX*g=@KMCylP}D5aEiDcy*aw6vtsOOWo8M!LI0x;vynV$mQV@Se;2-g-aJdw*lR|9@lb zvA1sO;yQEAdCcFzf-GUj;#{596l*`zYzYJsfcIF!J$Fp`QLX&i8(Ek-HaV>t`4g18{ymcL~7BdEY8|-Xr-_WJH!Rf?LlZI*7#Yq2@L5cLwvgz4DPhv4Am=i1!JB zfrIVJ%0~q>Smr|5XyVr!b=KR}@J6@wK7KB+%fBud^nbe12fEugs?Pch8p}>Kzlh#~ z+4XNuqs>X?`d_MmA*495VJATSVt14G`hstw*_5{8Z5^XNPAp267vm5F2LdVB^WcPP ze!ns*uz;eM`Y6csIa>ld}!J5%-90J(-3fzQDd$o165# z13njmXnzlZ*9TaiI)czrPXSU!oT64pWFMd-f2bXZ4191QnFZ%i0 znA~0b?FoI%O}jphuEzsXw;t#5AT@r}ZSW#4WKLxxpm{@>^4YQ-0`y96%M2et#0P;w zW~7TN(#PvvXOen8YqL0VhzQaAMk$qy7prTBVYGRuQV+@ypej-dxwgsRnLbkaXq0vv zsRq;#!*sOYYOk2}xEPiD9?>Sh4%Y$l6AytB_vQ9Lr2tBJ0y@m&YFqEDSmqSZKxjL- z)+1;$g#gBRqGciH=Di}K4zul-xqUT~nsd##e1ZJfPT@{4vQVY%AlYLZ{j#C^r3%T{ zf!<07rLVOKG1kD5apqqOj^lY^HSI7QiIZL^9J3y4c#>?xyDPW7_frGGN@f=jF8$)j zMSUz|ipBw#DVKt`g$&`UiLMcx}Sws6t)n3#-ygqDudV@C!)I5d80mg$~hXEaOih=)Q3NJ;CAv2 zU4n;b{sdQNp7HYdST|2(*r;>xG&x6OhLky-Y6dBp6R!@GUlz*lDsV5;nH!~m=T|6V z@|@7;#%&IxsHnPqdB>kHG=1ou+riE~e!JJ|G#3rz(7aG@Cn{YJj?q+-$C9Pwmd(rp zBhZ0B{{tJGayGpm_67Qpt`4xNDck+m=!VNWJx=lFsft~=f-hF=@CBd&==b6s)NX&rZmBw0Jt8Ny}I+1zE zW+tOFsc`4&u8-34ujSlA*0EE+dsa5T?lR|j2332s#^66#jNr{?Jvvsn1>16{$GWRv ztS`jgJ|ZA-vWI>~Rel?9(B&!|MoQ}?!vW_y^f3o*#&B?)N6D?N1=1&PRsze&!!t!b z_oDUdt_I%IWxsYEaPe_)(G*(qsxZqs;f2O$MOt#9%Oa~Z6G73*C6Q6nPv+GyB)023 z^BwTAL@M+nN4KX8_$S>y5zj4$#?;pAd8`hqH*d-uk<=wneNDX6aG6Ij@8oD6@9}gu z_uOh{)n(!G-G*FkE!QdY?VypzS3~Rfw3hIbecS0|Ij>R^Rvqrnk~aJJ-<8Pk1g)f9 zK`xlBQC>jHf#Q>*S$63pbubnCc40q-EC%ZnmgGsiI*CX^K@i{~- zW_L#Pv~t$!Xwmm14f!MQ{5zGVCm8Au=MS563Kcvh>;-DczH`j~UZY?lcsqhRMzv7X zp^1L{FHF8fdA*l6M7CFp-uY>)?7P*v*tnm^50CM6JHm&sp=|8XWA_xt&23gZSdV+0 zyP*M>dg#&#i=xo`B_B#cVWJ}I!cRk+T6nUeQ0YE%fi@0-5{J6gMj9D-8eydm0hif9 zX0F0bCpkp?4%hF|1A~})K>YcJkC1I3|C$I2)%0{XWHr}EH%Cq{86}2Z43*)|VMnaD z2XSjn+Mk42jpQ_1`4YZP;hHqKQR*B{{=e+W1 z!2(*TGw}m+l?E8SeT^Z1h?0%;mSc3(-CN@copyeGegvjYs=VeQZGD&X=c10*So`(^ znr>`AN9SK_CH49*L-3qw_;EgesxEO2Vl>{|R_}X>=V&PPaw^8GM^>;ioDjOy(S7aw z5O-+*sn~V^)32dQ2ROwkoaLTfW=ko z(dx!~#x3ymU>{u_c*@Opd50vKEL=ZWx4Z@Oql`l={ZJ#>d*RJF1G)4Y}uFYeF&9s}SdD0Dm}H?B@(wZlT3s$>)v=E7fS(1gaTjLBqA`5@Re09s+84%I1fm#OM-j zyhsEG@1q!t--gPKPw3M}+xTB`Lq2urtf@CM9yA)JaF6PmDa(IQlG=YbdrXKA6=-QU zw45k;nwhAJ@&=jxiB--*VT&n5?`4k}lss4_$3gDUIh}3Sg?v;zq^lGQ>N;bV?eeXB zEAQ)tB2v30x>#VxK-ITx|CQBUuo6b==UwiIys>Aw^rZG2DeUpttrx1UloIES2~yoWE~R2DiP z7>APeiE_W$DO9?ZE*Oc6Ds6#zEiQXDp&$98mstVa<#tSUdoUZUB8RGSn4gi&!Z5SNZvrLV4zjyMo^Y5%x`Xs8mXMh;&&zQc z2W5VvFHQwxP9z!>cW3yRF_3s5pJd>i&rEEkCNr+N{HKjF8dvm z2Qp4MAJv?5dpONAn|qTL2ev$)a-u6&KO@b4>%=!!+138RSFv_f3Lze!r)X|RN#R)L( zT>6@S3@r#vAcpyI{L1sLyKsKr8S$bX+TD7asL!YXPr?BupTKnpUayZCDu6_UAxr(> z<=giJl2P^@hPy-h+`Qrod+HbIf+Hh6QZCWXG%7cX$^xq0ANwydu7Z)$FX97T4D8;^ z(g&CY=N>gWTIJ>~E_`GK-pXJiY zTCAiiP@I(U`XdTK@2s#H*rV>SMKqh3Gw~Fdj@*`CDZI$}0v%eEpYvRVrO;pb(;J zpE;CnwFNtqDdwgOmD^MKQRZQ4p#XIS9=|mp>R#aETQ6YPNaKp`N{x%ejnQafxcm|8 zBSIa%HD>rE4YwQC>Kr{jk=w!`r55X`m0?CrhU)FoG5}adJC$>GGT&FrOR~zUyDro} zC^5B=*+7$^~l#R6SZKk4=`} zQA6`A?_yXX6H)CxE`QzA+03x*Abe=&v#LH*R-Uu%Z>AIDS*T&d7NGUYTQphSUIWdj0KuGZU(hX$RlxsUqZ zQu!!RtdO%i>W&c`Uudp6Z30ygbDsIvE^zhu_YGu53QWCSn)jV;A&+C(OyURSzL!UHa1ex;9a=s6{qUI4;ctlzLz4hgTx0 z7SsInG9e|%0Jxw=Z4xNCYsWc-jOTvi&O$@ojWi)`sjiv@d2K3wsuV&UyO10O2-{|@ zQfbIT=!1_1u8Yeb*T0=4+Yv32uk2!BhIClmRc9Eja{e6(AO&rDMYW0j-~Q!Ok>j+EMFOrq6n~&Py=S_xGKW|6j|w z!E2ZxCc(y z5zd~w-$Q>awJYN_96rtlg149lRDn+4!b3JRO@TpOs*!mvhO7Y@9&JE2Cei~XS=vW* z%q2HSTqUy0fTF!9A~}Ur0(6w$5fbskarPr03IPHTEpKh3Jh|^UWzfH@f^y|W`B8?T z7i^cJvda2}5VB4(Ps8Q{&~L2WctN{w1eB+NMyA^ZRgNNf9Lqf6J5pMA^k1GaJ9G&M zknzbzbZ)wdgb3+1J-Zqng`07)lfTQCdR6|5(y4m6twme@jsQ;LmuY0gYpcOmDPbxRj7sUN5fwvv2lnX``_^t)9-j{;7>f2g9#;n zJ}i8_D>zxg9pf=rpn77CA{koiT6yW*JrX}wKnj&!;ptm?N}1+02(mmb3T>8^(|+R% zhWOt~Su67~V5P#zGDv6~0+Lj&n-98Bxjx&r;qkqb9Ymnf+{4r{9C=OI6&fRo*8!vN z9iq+oO!lD*)^wxGz9-mF1s-Q=_c>Ef?#KG?0P0{T{nI6`({GPLABXpujs_MC9fIo8 z5p@hLlth7vLg8*T5)Iyp=Q{dRgJsfYoP*cHh2V;112!`hPPP-=F*J|VX~3)7K(M>IJl}bJ0qs!CZILvC2Du-eSe!tN_1q~A zDfVhWZOl1mpk|1K1BpGAzW+J<(_`evY3NtM$cH@IxZWD%5aarxH$X3#RnyB>C)-G3 zcNI|SazT%l&P`x#@th>J8;HDmmawNfE4s%vhCqm2KZ$Q?_;%$gU`W zigk;_@GGO5zISc`s`7 z)xm2m&VBD~g!t-5IvAK@zy<56mm5V;gy84DQoKAxR2S*=tHKM)N2IUO~glQolOv`(j+R$Q(C2q~wL3}lxvaoMrJM83E zSHNo;ui7-Jpl7$omczP`k{uHh2%03z!eb)$_oH9RV< zb$`*5*(;MUxSSAzo?)s`TrA&-v+xONbqKBAce`zGtcCl%zJzIQh=4xPCQAM?`3tW4 zPh<*!4C(q z2^lW^dHP@7roB<&EUbem;qxc$nDE4eh5gID!}CJ;nDxm!ifFxohAREQ*_$tF^Q|2f z-bz_AMP&m|t|8(dhQl670`TtodF|KpHjfXO_#r~8%2ZNMMzZbz=o~8~>dF2?P&-hc zz9%)&ONcK^IVF{omTleRsIlDLL8R*?*0&#GJR?}b$4r)NAbUnT@km#h9HZS(VOj7S zkt5+5fdWVOelbcUeaEA56+??ajvia@SR|PG4GKFVE5G-Q$@b^|R-3)jJ%W!vav(N7 zxihlF_{fa99bQuz950%dZPXb9TW9{_N{Az4aQ9sRDO@7qcPI1O`_7EKl7VQ-(>lv#9c;{#jvhZDD8UyQaTWV!>?6?ALT6sXccioa;wu#hkQ|po$6sH+i)vdawF5=6Y!cnSs;F+gr9^Be|Xb)l=2OV+Gau(#Vy@=HUAtZZ2sp z`CjIGCH0#tYj+3|Nr}hd?7LzroiCb3-o5t_tuC_B%MsVh9gY6y*z#iBpH@V~UT5il zVlPD^$Xa<7)lf%4)tl0)uJuo-8uBNr$e znhS`BQV4f7K%K%C#)Tw_npH4Gd^!XO9@S0dJyE5`hE3Q;6qxO1h}?ozs;KM~|NZHR z1)g#T{T6J(Ns=67s;^5Xcw$k+eTL2D8wNF*s{^-YKT|p)lB0al9ufih zB6BNl4#oQoh2H(cL$XZg*i0`Nth@TSVQ4c^0* zFK0uH|89d>&82C-zop6FZ`shoU`Np?!*|wisb-H$%IaX`aZ}qtQ~>n@4OW{kY-l71 z3BxbvK%d_lnrvx*^ru{;K>KgJp<(6b?E!Z_xjdZMQdRRRCIf9-hg}bt1-oPV{(YtY z=Re*8AQ#e~uDiWwfmnr=k61);#!w+Teyf!#Jpb`d5sAouf6bOAH~}5z1^?cJpwJn8 ziZ}iLAD^$O1U|oj!3z1+JqW9a*w*t*|2)jUvjyPOPXsX#Q|3l}n_L$B z6>q{|Li?X|+J8I>_`+0TFxWFe%Or{w-oq$jWjFm&EV1 z`2Wuy9%Q{KqMhL7?rt>{81HOvt_LOIl9L0D)`NNv{fjgIbBh1xD&`JAK_nj1|BQ-v zJtF?=^suN);`^a2bfjVJz8XRyJlk-7-L>X$+VXe4{Lj(dLfoHD<|Y58F(?t&=$P;7 zI}>}u-OSL--O#yBi#K&Hdt~=5iys*L_i3f>_j`>eS^rt%V>l9`46Yk@sjBU*UE3%) zoj;8&r`SmCnfdIT>IB7@itcZ{aAZ{AB$C=8#~CO^j)s- zrNPrexWrCn^vy4cZliar*1BAS!@LkuEz621EnqD$53m4pplNLmL$X|y1sCUS`LKi) zfDayLKLMUWXF$`)j+g`f~nPDm7$jx2$kF(uXH zIWKoJ>1nKzz~1z;gS{4yZw_QL5Dj5)y_<^|H&D@>r%!oL$U4fv3`k=;3AN>DIcLE4 zQNqk|X?F*7F(8WapgSN%%Wbz}9)~%9(O?1i2dsfuodeuR-kl9gu6HE9`lu_Ep)jE%T($<$10HgQ!AC2Z?Lhvw{1bl_p(8P@D^PbNTJ(JmR3bg}eb#+I zC!=>=D2#+%qZSANki9o--Dj2B*E=S{_5h*WD7*@qPxr3({e{bWmJpKCs&2Xjv2Bc? zWZO=%Vav*qztd_)0*shtx_yBp2UPQ`U1bU*fl?|tG@dw!(BwE=bkg>#6MS+1A%)BM z+p{qXDv6K<-mf0lM=5-6o3c-~zz>5XZd(3T(vN0LkYOG_Wc2L5|Lo+e|AjB~Gqnyx zm|wSo0EEqjVJ1z~zx@iS%Xv%}NJ|5a-?nArj4?wpi}*n0;rkG<3!e}Kt=Lfj zP%}nOAAxvHfroadT7Et;eOOi`f+fMf^6%?5&r_E-+%PiWJy>gu3_CnD2laE-6uz@- z3dgAyPd=z-bcfL26kQfL?;In9M!oT1Z^FBA;!viEzM)81eE2f(DM?^b&duDT(y!c* z5l`XP$2$gtwP#kqn@3svG3g`Uupwk@{SQ~Irnt-`*r1= z__;5iDtv|s*r1_LJS|g^0 zO7P2W9jzVRk-WcDEo_7#(QxbZPyWnQ4IFosUw;wfTj`g75mRj@`c^1H0(D*HbE_`0 zoL-%NFug%L9DZL)A?3!oTiIy_&)y=4dB)&;ZEjJnuKG4@j*T^Tp2y2P<{~p_{Bg4{ z{a{SfueHOpWRD9oCy?=vYMS-0R7zC^?LEt{4u{&9bJdD;3wXuLEhEkA(jgGb+zO0z z1XBUw%N|dxX;Dz|UOgmARWrB#y6*c`p;oc}v-3LlpKqB7P}A#+`HzQ06Ow*{M8Jl# zRCa%JP6uRsr9~*=pH#>$;+k@*D0s;MhHbJ(Iv$0X7z~@|Wt)&yTXNDICT>hMDx1f6 z|4p$=_(Obge8vu<-I10LeGxL<(UcyIu1H8kf@XV^R#p9~2fH4tzUc4_siFP z%wPT19xDyl=f8l$r55a|iJ`3dpy6}P*#^O92&H=AlT!44joHJTT}tCc^1)=&H4jfZ z``SLyG8OiReSx+z78w%#o}yCe2zdikVF3Tcyr1705c5ra>TOxS0QY;4H<<&TJ4*;e zv6{*!_F$p04>5G3w0ds4JZGyB3KtpN2F8h~8xJe@8ZV=fdC0GivxPzBme%G0nkfx~Ny+32LY`ODcwydQ&VN==Inxu09mwyyUUDT5-HEu)Xat#{&LxWA4K7<{Jx1D#Zu8gC-`^kgySq)O8{wsV4zkZ5 z8{~lYIHysR`?H*kc~8EcP>*-swW?W*)*bB#CcSjeMcrGV8J(ms#ww%6w%FVwkXQEm zs6`_t&BguTDeuAxJOp)NLNZ)4B6H&59UKw}#L`bO zc@2k5xaL4MPp4MF8K1lLBmRY7=W=DYe-54_s^@zR^^5TwMt)7aBgc`J?w6-02d0xc zPCA|`si9<-_=3lZucS)zejsY@iZLawpyMEV@9Uh}&yXLSQO%~TlcA;$WCRjG=&2

    4nh1t7_i0W07`H&>*_ zg-~3bz^zg&)s8BJ54N5={rWgL?@z_kqJ$^TxPN#rc>atn<&}fXa|R}2B4o)jpJA$- zpIsrV!Vu`17wYZLZ@9It;Z9A=CK>B#B*o_qd+vjxB08UGgn{a=2$jVQm5Bm?7%7>4 zoE)A5v5+k|EA5yBWP-D?#ioJFaAs{#FV(L_Ov)`Bg0ZJaw6R-p=xAT&l_V+F%3~4> zgO5=3!-;iw=!MS%w*eaSeSbOHV?Ea&nf*QC{2DVi+j=)M^TGF`j**avJ*C8+y##WX z{z{XpFWcv*RZY8hxlcg%Xb1dtl`t?!|3Xawb(k{n=Rb|$^(984W~*5Z7gMYRNwaDy za5#GKQE#SPAJYqWwq76wBDI!ZBMVUh{0$szlkW}PyozYjQTqHze3Z7PL8eG+tV71< zuKeSc46`^2_hiRz3No^7Vsh^Sf|gPw+W;K&TMZfx|tqw7uj zVy^7GU<&Lw?r}8G_>GGL;+X6GLw{2F5>gA85@Gzw_T(U`f=nYC8m+wmdDjlmW}TL? zjl3JbB!i5bPnrBV5w8#@;w>j=c%D_A(mZ@urb}yNPU+Qc4cja~#Ehm|st4%}_m`RxWohL`^@9rDMO;X=>}bR4IR*Pi@L%}z(b_B>9f;+ai83C2iw##??B z_1f#!g)sM(7nK!m(|7Pv;DEDnOYJFM-jJ}_`;tdS=tu#RYNjGgspJb^qIR+ptn6UB zL)?&#OVXk`nzmjgFDrFF^+vBqn;9GR+Yp5tb@ zpd5&{ERw{m3~Antem8cXu-IVtJeLWMVNsS`IAS!fC${w*)hC)7xMDQ>#dV6URBSr@ zFzib`P01jTEh(MC*{uWK59uZ@t!rLNeo$&<{rob-z6F z<@pnYm~K~!9Qcn-k{ndGcyjjmr1T?WyupP>S=`cPskMS-;ViB4qOQ@J&bNMeZMW=i z7*bvc0uL><@AVb;ApKuccdc<4z=$dZp|UGnsVdMH+jYv|&igeysTqIjWYbQRbdnM} zhavc+MopSB-T#`iu8RS`-bk85*7!m|@i?FHNIMX{CiwP`-O8P+QtMq^VfXHax>R1y zcw*Rc3;n2>%aG7!>W4_>6pt}43=A3#)pkj?h8@GP7A$&7k|Gld5q~Ogy)^3i!zt%egoF|liKyvQ%0Aifr7m7Gp&fc!Zid3p+9W-6-S}*hl za~d{Xj2Fct>!#AGY`H*-I=Lan!ZY~w?e?D?mrrQ*_W*$#!6&g;;$g!7xg&#x{G6tK zzZJKU6>+Ja1^IylZ|Qm#GQJt^obVGE<|6H4jM*s(cT76Yg zJ`y44+I_{AG2QDnuIkl-08$MSDANhFKhbQ-S?de7%2CI@;~N%Vj8x-W0UQo7tLo0G zc8xH8j(1N1%3$rQ9_OyQo6|BeD?7y5yZKnha&z!;fSf4iOQ}|#POPhea)&}efyLN$ zg;=E10%C+bI71_4F)a8jop<1wo6|}ImWXjZ^vVtNPBmJ;rlO`;VJxrTP#JS(UcSz1 z$w{O)N^Td|MunNd?-_zaC7}+XAhvLE0XV9QVwC(;s+y3!x;-I1%rlcM>N#X6A2w|L z2hi%Fn0Hu2PGisEK30R3Y6qL?bMKuz>Iy5Se2-aaPhro#*hn*ZceWS;Sv!M+Mt|19 zTZ?CYF>hd&@Icu?`h3{0kUNCvp(oOGXL3Dckb;b8OTK?HBASSpwxGy1`4J+QlDuny zgNB2p?FJkVKCrSjg(rFJTsO?6eiH6qY+Q}VgpX}+)Ox3EFE<`^WTDBip7m8qq%t@x zCk4eYE#&R>SqE7%jh>Z_pPJ>e=yM-eqr61UHv}T7*+9vlGg$~M(kFYg+Lw0fgQ;Au(`6c)@X%h`4_2`GS#Qo15 zkGOf=3B6_j>W9Q(69>7)S=9iET11fqcZI(c|H-0lrzkJ}F2`U?u@z8JF(Qyqp;z>4 zUg?^si*}pi!FSp9iY%23Me0Gc<=y zdDadZ3rCBP9|v`?j8mpZq7NbEwm(<=l8qxnCIXM;Fk1Z@{K-N1WK9u-N zSJb?>r%0{!YZO+*d&})C#S<0otgs5CXzp1*fS^04dsl)o*5n7Vi4Jaqt5<3|uQ1%5 zw@?y|It61Y{m%0D-{9ze*FtMn*$#9OCES+-P`E6da-C4{j(NBqQd8N`k#2!)DsZ|cv59l zfXq|;lgfUg9A#ve#Q_wFT=K`R1Kthu&B#d0e$fW#mr^E zRe-3}!?fRO%)3QjinV|KPcf#A&3+3eki*o6S34)(wIyt09O`$>+0RskN>@CQc)J5_ zW~8QD$b5hnc1R9emg+!tH;fj9Ais{}e&7P{rDsW`$`AqQ>Cr7GO6YP>z zWMyVZypHcdjpD(>%cCzyi*m6z9E*3Zh!zx%E83i2?^xI{U9LZ^n1V;I_a9_iOcZ*yDUY+9)33UpU|jNxn&KF*a**r5E3 zCJ|iifo1fd$^IAozUyY%x3Ruf(vKX_ye^Ls_dw|bLSqc1=nmAe=A4+G>pxMd10oGv z`yncjs$Z1~o^U5#dj7TEXMXPc13t%{hJ;NFrHxf&*hhS$N7HJ&5w-YiEdBy{AFx zu?CTn`Iq=I6d}GOu$I**ia}Bqa4=E{=Yhyc&!r3gL>Qe>btz6#d1SH526ui?=SF`M zI2qM`f9%ps5#8oZj|U43x#Tsil=`j5;j0#PyOl?;P~(tTmLl!l=cY`&`QHY+d??lR zSHW7@D%ElOMknWTrSTno{ERjAk4`#6l&NV64-;83*dl6Cog5X4%1{yVt6tlSeJBWI^`e&)?l z=cJW=Z>i&KKB|p-XlXC|N^*DZ{>Gf=7dJ&6g0`(=OtIJ|RM=Qi%021tU ze`e_*rt8p0iJy1z&K@)MJzJx-XL^;RlO?gaK&)lVy?)gbX_HlHD1{vxu+nwyY#NxI za{sAzCs1F$D(4~D2zL$GpzqE%TtlRUNS3hnRUefQQQu>r?Bv6n&Gwcaai-@jbmr|M zgqjg3xVQ7d;*zuOeU;o(0V%7H{p4nz4;yR?qq1LoS}P9)!!s*+mM_Tzjeo_+Y=ZMzF}0zYL2lMIoeiFk>Uz$2#NMZ~df~XY`phg$ zF>=M!6p83zm_H<$#o4;__KS>$2br?fVB3L+aHg_kW$S0V>?|4t_oQ+{r{|-jAPT4d#L`do~3K>GZ8mSH@@n?@hIUT}Agpb=SN ze(1R3u9eK??ID!6X1#7g;;^bOaBFf{mdl=6?e~M{DAuy|_ak5>(%(OFa5AP;c^U#4 z{mEX~X^=e7!D$P-_GNYa8r!|y!eFZ?b)i}SE-b1X%+~)@o64IIZ@LM8L{nd%TaIG( z4bID45jP*DcNAL`IV&_PmNlk)ljub%??y1P5z{jb*_qn zK2ivJ&G}GyMCw64qIbQWmdroUB~Pvm_1*0ihOJT!p(+wcl#vDgDj$$EB zIfbMrJleZK80j>$uAog+>%KoRIdH-?JCO%!%ab9=_CDINW{}n*MxXx3PfhZO(tU>e zH42k|VDE9c-6^gfL$SQxQ)exc2FKWFDmU*45U~ERIs5cC$B>{|Z9?U9n2tMMS5k@oM&BsOjT5#>R5qI5_2e zkW3f1w8oMeU*G-+H+^CupZjnn0I7-{zjPSMS^RkpL3OcQSJCw@QKifV&-k3;~E;?D{nrsI-W4J@h ziW+J;=(YGMCghqT4U{5C{mWtGPf789Adf_{_v8ttJnYm`I;;B9k~2{bJc5Pbv{HUF zZ9qVmwI`9Uh0kkH-k-Ac!S}g)bWSvQ{0-$sfs2Xv`V8=RYHk;uuf1O9N;&b{Q5K zp9mXIJiBQ>Nyv#QGYSlm$tpx(>1Nbnviu7`G<|=e9FqLbxr;XL6e1Vo9$#)fa{+Xo z({A0;zZ8~3Mv7*byp?*woh9gRqugH>WXUqJqm!$ODRqg~7|0GdP#wD}-~lgF$;2#F z=Yz{-_6x**n(m2dWFR(#kvdR8!;bMBDjk3|AOp+LrzvA@jU{Wj!g>U^Rka>_7-H%0 z=k}5%B7?q478<+v=Hz$5di$=05-Rim6mJr{UsT}F+fEL2<4K;N8j$Dr%zT=0%O($1 z5|oQI>Yygt<_VBS&50aW#eIUz?&WwTB z!dBW;AVGVqKdZbK3VQDXZdh<3oXo{6-Cs@cYr1fl?NU)0j_R!UmE zebu%q^5>QAxl2t_V{bpUE+K%GCOe?WXUiq5v(@czGA3K_SFg4V>|v_*$|P%JRwezd z9H#mrauP3;)tEs!^p%sk5xEYsH?i}^T8Vkth<_JOTFzT`ln`^Qsbu|k9mHzKqsD7# zoa=!kC^-5G${5zeFQUi2l`G$WAq;q z00wywqIZ~0lk;2&BOxK(+-xV2zWE2sx|4)iKh}P6tyjx^=Lfa;zULi9MIvHiu{-s` z;snGmccwagh{ent#nG7^pAm?6FzYs`l{hJiAuA~b9T&dKxEbVnT(h@MQ`c>En#i)@ zBAj9$E=Ly;w?|W!Ko2<2`2Z^N)E7`w39!9Af2~l_r2w-u*7RRiTiQDELa!ChUDqXm ztF>)Ty>tu>&X}0gdG_wy@LH4DJ0jlYJyEKDlHZECOkonE)$@1 z=!yUzQ3(T&{Po1>6@tkr1@PQ`OCUR+gn&QTr~=WvFT$Nys_At%;rV8I=xacywOjPY zlm;x!M0a2y9)XCo&lAlNsB%6j1l0t94rHRVtpF~@Y^FRn!U}LH4#5X;3w;5hTLx4L z!BEqc0$-q5ZiMYT@KqU;vQ|T|bV#*ryssDl)g}T|K&0RF#Xgv=vW@}v$1(sR3CPDx zoF)VJYXDL*sT@vh;|`cUnPP#2Z%6?bVi}#Cf*7w{V`pq4tonH!WWJ@Q9D(e=8|=_=#R%pF zCxZ1R?Ae&Bkicu1Sc+gSAX*scGP?W$j_tYP4Pf4pzd<-0{{q$h!<*~xHxZ)vf-eCA zoElt)_`1@rY~5@7*S&Ya%^(I}_}5U9Scu@MI)eB=-Dp0}R09l*4>O0)9yrP;@XCQ3 z$WsKvBbALI7}3-CZQkj-rLZd-{*z7Wn%&CTMEAub(Aj;*UhYy?|u z_7E)c+f)tVc}-=OA3csiqB5$+exnC4LHinJ!2o@{UMw3Fijbsq5i_x#;4(`=NW{&@ zatsco96nLsfqw@ZfBdoO4Ok9w4wvjfmzY;hV6s_89|8_S>?MKw2sV8pH8Immn^Oa0 zV1Vc&q;#*a4OEt;L05cIKbOe>_Zij&-B=AM%dZQ4eVH3Bbp$MazX9S-(y<3~JXvEG z39II7xJkk_U>AggqQPvacKAO?yw_n!$)x%}b)E-Pc^d*xxk&~VB{(6&TZ1vypvJ3t zO|nDKF}*o-WG>U|SJyr=aMm9-e$Oe8!oX}Bv!|6r;=!+_B_hLDoF87GKEwDTrzk^9 zRDIMUx(_nJVf(2&^6A2L;vxD$!hTzsK`8OyP~$+^cqxBb(|ZaZdD>EQ z?A_jIN|+?jQNT>|s*H3t!T>Tejl!_rJ$F$k^r{|UH(E~09d|YBx}97AX7S#gPIm8@ zoLpbArj;GXfGUQv+5Anv-CQw*LRJQW7^lZ|it|}``i(jYx|P^&(orO&3F%*T4gTT@ zj6hGGExq-#!BHY)$FFCZKOKIkmuG>tAx8_7u)Pr>>IhDFeMWtOa%tEDSezOMsoeU_ zUTC&NvsWxR(4ZL8>hC+}^XE%5Ue$7c2z}p#0EJ`PB(a+e*rPlT?F#n_78z?bO^)#2 zX>RgTWf|lzdVIY${V{iY74FH(2g;}-=s!F)@felrWAmr0b!zdG=o2b4=wZOpu}wK! zp29vP2Y72xjoE0|Iqu#M6;`Qkx>;($Pqsm@`4mVbIFHOZS~t;A~L}Q-RKzNRwUu{ytdY zxE~cQ_bGP(y=tfF<{Dvp*uZQ+K`BSjvn04~On|Z@>hgp#{oCD#bOIaoGo)YI0N&q7 z-~}QNt%BzK1a!IAV0R2ZCvGq1TW}U-tKIza9)(ei0;|Ns5P61}vS-4cd5Ol;(@P!i z4kl%#Zk8U)t$Vz(hziR4ZXajZaw;H?l5q&_p_?=c6eCk+EE)<53Er*Y1+D_B13)E{ zt65<#B(#6a0k_kc5xK}XK&-MrM}xe`$k`j=(+JmDFKd*yVO@?t=F-Lo?e4a4cI^*< znB@N4Ztm&6{zP)#6Xe2s&KHe9_Od=ReEB6u;pJoks~X)^0twaFy9EYx9)q*Hn^P?Yd{We zwViVi5#nb+LT)2-$mSp=61Dkph*2$*ieay!8jK5O@V`Swr9^7M-2m9R$~%p`Z>RLd zgt5GyQ4OFbGnsJ)Xq#%4bM$znWkATu+vlD~QN)J~VIX>sr;0Hlyp-+0z#$UAO|lR~ zu*E2B@7o4Zd!2^#Siy#`69oeQ~DN_v6 zTkhVP$gDBL0H?yr*pwO4TlPhRx4bKI35cyu0mq{Cic7G^?e}esb26$X@INb+0bzE8 z8|^ez27rh%upd&?*xorGl@gb~b*^Pv5qnw->cH}dWqQaY;l{k~7o)CZ$1@`WN-r}` zgnQOH?)Bd-KWwFJRXMH=3pSn@>Az4cbR!VCrEo7|pci#`g}^F^16eqYsZ;#Tuw5)t zQPq7b0g|M@-YBIafU!xEDiKTTJB($($xIjkd@V%FbjOuBFSc~r%(DCDY25WRANFtW zJvckgq0Kx5TwR9RI@ch-B_bMDfih@28D%)sxk1OipXb?a#@R7K)ZziGMPj%`am zOLf)YzdvjQ0auf=L4ODF9Y#UvA0@&=@uh|Nun9=GB`J=3CD)jiap&l2_C(Im292nU z-CS%>mt~VNg%ClM1Jgb~38AEOY88_zpQ9B?V{)2hBb6E0W+m17Wc-&f>%9Ze=kbD- zN~^L4+&DpfAJDGp6aj)R$VPXz$IkZhMh>NA=_r?7PWE-0ucA8|on-6Ia`_w`_=Z)p z?32=i+pLqCu(iKfRPP<&w-v9`7`HBOz^&pVkc>v;4UQ+J$l%afEqVU@@V7JdDpoEo z*hWElrw5u8fNdDJ_zm9nkt}v@15qcW(=WZO?vq3Dr*zGUv!u-I%6J6`KQ30PA2SEA zQR)}$zQn1&_-n?&07cA(TMnA^U!d_Myp&=n3uw#KFxMEE*8h#c1SCuxV0Sg_Lq`dJ z4-k9@hUSDXq``nH3(WmDqa^Vl@X5}6z%SXk3STD12(WzC)jyO}|5^Qi|L|EHJVzN8 zpHDs`OaduPdx&S`5zXZd`_P;zYbujpm zZ|+#3X{bOxAw4XkMwS6gTiO=d|HcVLJgXjX>spx?V$lgfjH?k|=klK$#=rUT2%snl zV$*s!hgAwZz>vk>JVS{_@>P=Yk@@>t|3Gx95s%r30d}`KclFx)kHCcNB8PSS9q_+o zTNcE~i&x)yEoZ=u642*^#sa=J8ZGAUOZpE}2Jo2fc)+I2>9Y78zXUaoLAjW17;^Sx zWIB0F4Eoo~9z1roaN!B-K@LI|w*QCU_Fq`l94%JT_sBH+2F ze+WX*u!Ip6#)&or{0o8+VuFF4Lk7k`eoIq{3j}C4#tm z8eg97a@sDpqdb13@a+HkMT_eY-UfRlnc!ci20!dzBI*X*Q4Ho^@ePPj81ujP%B%+K z@lNTWHwvtN7R>;5PRSed#Reb1xy;V#1lOJ}G4eNw*Pi|fC|C(!;C**<06Z?5xg(@L zajhpRf=#FP1yE*YfM^eK@6f1oEXdccQ3m`<7QOcezHjHH+1J0daEboUndLq2^Lt@E z`m?ZFvC^N3E47ok|7?9ys#ZgY!akxq$QDk9d&9)diJ;Kk`KC;)518bfz#oxBZw1xTAi108&B=#7XiD6Tlt<`o%PmkoJA(2b)`=GYF|KAD$z15x|_s zaU)RAeTn>BW*2YaI{>=>GGBez85N&WV2Y*8MQB~_f4)M+cVMl2a+(F}@84532O1y3 zPzw(8yH4*~%N=~BX>*|FxCqWD8SwbNiskB^e+Fq-^uf&lJQ_cK0T*jfuWJ5PRI^TQ z27=GjV}ceC^=xIBA=)Z|7BX21VKV3fm{t*4gX;r8m=r|p=61ScCZPZS^gIF3_+$nq z&YvflhuyFfUwJXybPzp&E8;TxTOkHGw(|TO8I%uafzefxWlgaifDOe% z(jj!$t+?!~&%=(QICKn%mI#52kA%JnFaoPuqGiwu__mi{Y9Zy}J4u==iQ3^F# zUdog0Z;g~M2SqdJ%FggGv(p9(cW?HWC*h|)4h4NAA<07`cWQlhAY z#DJ8PD8i6RBdwq`NC>EubjN@qf*_zYNOvhE>AAp!GbLjus2uu=@M|F266ca=p5^=5n?hi$VE3|ev88FWH z6Idf_Hwg`lzvDEVCv$Ey?V|8kh`-F6}XK zx(wBO*Njc)Wv)WJ_Jw|Bm~rZGZ8;Af!PR`G^&{$YCJIJSGxDQ1z~E|<^kuL$A7@NY zQ01$(AEmYvEZ(0L2ol;pL?1z^84W1M#W^mi@z*N4?C~{TVadOK9(<-B&B*=4OzeCi7v%x= zXvCc0i@7X06)s{gbmlqElvn;sfHBM~s}4wm$1=mjN6sMqEm&CXpt41aC2#~3COKBC zLx1wDXw>H?$p2^hjGVN1TsXpV9$z^IvZso{m$ytn4X65r|M}d@rE+N7+X2_Kggm-c z<6T+*QP62{;G|0R-UE+36dGId*86aI9D`tJyxc0ULD-2PF00!fvNLc01!MWR(6byj z0>l25v-6K?l|klRf%AN2afhfbd}n#P{(_a*^DGKu1mL3z!+f9EudI55lWUjTr^?<+ zxhX?{#F9KqHgFk!_h4dz1beBHtbY&O8d4CHtA_{wapnE@k$a^S9>)kb&p(@22+B}e z2j$OY-AYpO>0cPJ-qzSV9x=(y;W zA-JD*Ln%*xcQje6C)dh$ z@XakE1Wx*H~w*1agFA9<0- zGPGMSQS4#i@zLfAnu^g2d507Lr;=pv?Ln*2Lmd*E_09kS?f^|v^;D1xe5ZF9C`zYG zj3i3lT%L1OxDPlN4siJh=H?wmb}bTRsZr~F${kvQcAsDPwwbOe-O|#(0yxlR^Y|S& z%FH-Zu*xdnPyKp|j)c5*qf@F2Be{6r{1D7`V*ovAA0&1CK=A!m(@OwC?*SB>G%?|b zrrKZZ%U`1R1pU)TfZ)tzEpjXRHP->^&Hpa@;Pgi$cj%TW@l7y`=4vunV*HCOjAhN= zok?pQ^BM*o+>qfccM@{#&QPEQp=Bi;<>OAAwc7`c<|;zZ|f~F*ArOb zc}&GXWYOu$PBVaGg()v5kU#vT^x>2s1;c~SW6kSB3~)}K*MTYhmyqU%$I^FJSuZy3 ze`34->D&#ygLd6(8nwGT%=s&+$$LLCrRU-DRola;ANPK+)NtBdruC&(dBRnDZQ>=d z(^t9vwNQsU!E3>v#dvjCJlu?o)U>uVB60{KW%wJBvwTP79RCx4W zX#5ef2w?*_?UGk&%Bvq*Za+9$BHnqQ)TN!wjY~J?y;)%I$}&4Y-sE5RRATsjB*PNw zJ<8iHT$d}|(Am_iT6SUVqGtH%;*B6aV{cV#j?`6y*WWZ<}MFggx0h{gwr zbQG3e+K{&j!EyZRFuv#umb;|_=BpG@L^?dZW1ixt@n|{E+)ENzgMA?jrae~KQ`4FW zGxzA%sZ=zt3HB=Tw{(KkO^k(Zajf3kvxMw{BE3e4-GU)R33cXJw0NX2rRJ@_4sd%F zcXvsa(-=ZBqx$ErX-lYd0_j#lGM3sm+|SEj{*ob-2oe8#7%dFQ48hf(=T?8`S5iD!+_E}g~v!anyF>AK&n7+meN zEorw{jiFnzTwiMcVIHy?Yms;(T46MUQbkvsA1@J`{WCaWfBzhB+;WVU!L0ZY;-5uJFC8bKcD_6r@B0K%k1-R`kR&Xmv{ognof1YVPe#n6fekuOzh^{b%pOAztRhb%u$ zZr)BcaRTC%%t8ZE#PPMf32;GJRr!Nfo0&(3mrkTPNGzY}uty#bh*hJ}p=s^?-wo09 z_l8*h*M_JDjPdIXYSo6+D&4(sgJtl$Gm5CY{4w5cQR9{LlLvbTTKA^r-?@KRZf`TT z?h{tp&RnM|!&Rm&$X53{kkpXjmRsl)IR_&-iZ zzQsI#gATnU0ZMeKYQ$M9HA(3w4)#})3O(tR55N*xCB#3J+D$8dR@S<>3ren3vs}6V z$*X%|gNp)RbItjy6+#nbHo9yc&CEoK+(~5FD0%MqeDCY!bLkf@>r%~iW4Sq}n(}7) z0RCcIA8QfCXy0*COL||2TkhJLr>I(RfqT|HDUkMMx<1%Gj?N7{9}=h#BuYT73dZQk zU)cKIXu2jDA;0=TN7XzH^GFEdK&J$r3cqt680ME`hqq zOE}4Vt#Asd_l>(+L+CA8^`5hr3y0qenij5vJO*^W%VT9y^nVIkRvauPwyJ8i--+L1q$%SLl`cb_eRtQ1KinKsOJnFPD@Ot$4)5>rCz^}?j5sH@ zXSrlg&J(^0E_Qx&3;7By!^M_ihx0WfAv#&(?>wQa1Z{6_LEZ(vw%G^#KYNn=C zG4|g2>Uy)@QWDFJr3`138A{+LQk0rZRCF<|#k^&^3oafFzR?jE4qTfA;lTg-^!(v?{c_~5x#Jj3RYV=f&R0YjKkZi5F!@<>KHLQA zxni>>yu#ku;Zs6qGT8YH=&zABl)sfM5sGD0sxkH6x}M{OZZV#F(QtFzz@`o*L?Wf! zqzP`Yxv9r^pD`yyqK2yOTl+?n`E!R7rPmgax9F-EqhEdWy?) zWDi%Qs@u;qyf@%8L;3;7>-INPibH!NfStF^NqNrPVhp~=e#jh0&&kJJSwMPP-4{Oo z)%z9_ttVpe);#c*^E>L*Wjt>jKQMy}bdA{t+r2ZS@3i|%?^b#3-ourRCoBU8`++V` z63(X)fNfg`pmhzeC^~6OQ9>;dk?h_)n?g-iG2rzdeF+$Cp6)(=f^?LF9veqQs?#bX z1G!dV@E6^ad8;=^N9Q^f(y2ynMiCsROD9TB`GZ4VBdt7u00G>N^#7m6vO?w zXAA8bNp6c zH(2jDD(M>?@%dESGE7)M1GFWc9l;{B%#ix35AhyYt>b=oiTa>b0>9`=vuw`+Aaj{< z&XO`CZVQg*FyoUW)zaH$Su%&4y*Nhx2N042pZ>6p|H1^R6n>ur^M9QKA^yK%tOM7G z*p71=-(Jn62647@zezP^A1+_h40F5{pi5;E=J1mgm-d$=<|LXLly6vuwX#4=3DmAD z?hDNbhoeF}a3sC2{2j8?OsJpAKi%!v@u`vj8C$ zdB)%3^ZWS|>jd32*lo)=CATV*5f7T`G2jUH3qxggSm(VHlj+rU+GVKM zy?glM5QW2agXi4z9`lC->YWi&#D1}w%7VL!O_W5bN3iN>edG7 zB-DeHk;B57*`HIVbD0ly-mq|l(#BjoSD~?!zHz(v`6m3nxB>`>lDPCmX#tmx2T}|X z_u$>&DflW}?~V#;WNCKMXV_XANiEQjFD+I}?72BVK^j~!ymE#yUhZ%+rWUpnTq;wI z#J4le&9K$u-xTzB;|m&P+qUA7nhaslAJWjvqoW29o@$W+c8p7f;Z5j_?$td6`2QRrFXn$&A?1In5Iw9- z>n!z5-!;2wBK?KqKmT9(Ld0iOY!$dism^1goi$&dAjcNnD7Ym-*+A9$@=V*oj{<<_ z=sKhHpRa4Spj-$;+JcfiX9CszP|DEp>|HIlH;5q*xGpn2vQ(K{-gJWIH)Y72y7apN z^^n<=etn;#oq&~nh{hpYSQGL~LAMaQ9G$nORFmO=6OmL}LEg8yP*C6S`Y{7Re4W4H z`oEENJ6IVOqtc3hJpsVa>Fxi;&qZY4dH3|*`zM|WT4_h6Ca3DkFX&KFLmzj~U8Hx8 z$k08~eVwY#aoRkg%%wy&c8L)m(Od$;O)H%%#J}nv zUE)miYm>q|=jps3c;EjJTa}cxrlFDLeke1Y%2<}AH`bzoP;tAd(~U~@%L<%ZfC4Q% zn7v03{96mEW)qo%g~|SFwf}3UWJkc+|A@_%31I&Mb^RB&IIrkqx#OEGO8beflhr4L ziE8*SzI>_q$~c2tM&+>ZH8lK*>62={l`%-kNS38OR2AivvfD{ERDHK<%b(3mZeORz(#x*BSU0A-ct_6<@tV^7UKS*q%xm5HKhunv zeH($6IHh`>o9KKv4Y6ygiyOz40W5c=iPy;3y^rW;w|i$(?zOjTAg^6`_f8bR=O+A1 zeJI=0oIAS!M5YkMFUt!2Pq~0QP?<^L5JlTidl?dEp6p7t9*H1oQ9zDHd@sK94-`-I zwcp%G)&IxL5!~^}i^wpI|3>Y9FGvF%dm46^?L#8g1|N}dk_bl}^Ar8kS9^r}tovti zYv?yg0_ZTk9qJ#6nqoEhl{6Eo(HDULW)>znQv^S2c;+AODBM&)-*ztIPJ`bE0bD+b zKC^$Mvq1h6*v*HHV77xeNcr0|>}K$@W$cygf6;4y`v>l0+WlGTRI2y$n)Gk|cSrF1 zAja2Bw7{T5HHe%^0FX&-%@!T@Xck}4 zt*C?EPE!D&7OI1p;uXiOHdQ0NLKEXA3eN)wFR{t??-J2y}>Ut&1~WDZmqZQ5NGlvqrxHF^I*bs?8n^t!J*w11xa9 z#lY0HDGi@~bZH`vrhv>dwX!>3v-IDZbFRR((Qv+S{^BaB#(1J#!4o)=5}TQ8r_Q!p z4v#LzM3f`{&3JGw0a5f3++AqqF96j8Y@zJ-z;DG1zNA3GW7mqkmS-aEDt{h}8a7x9 z-yN_2MKIoG4ZrbPPaW5QoHwnn&C`FY2w`)l4S-_c#sl)_*-}AGCuR!@yUPW2+lIT) zD=5x1wXlz0dQlYE!WJp6s4csD`&uoKSsw!u)_LusR|1XGm@WC1FRUvEUy{U)!9NEM zCSgnOS7~&J&kEFh8V}U6_Td0$0dpM@cU*@c3}Y7IKLnTeZtGzf{|;YY$%W^NhUX}j zD+iT$hJrF${w`huVJ6On<`$%16npQx;u?vQSS!gJHLecs^Xb7^z;+3Pym%M5%igx2 z)@XTIyg+_-oG(rf*b81O0IW{M5!g5{OVX0`q9w}gyufFoHv@GyeMOg>A&Pwl3il2| zmcZ`t_3vhXo+s{}-&@)4ADS0Zeu1xRM+Y--idc0-Lz`lyw*iVn<3v#h9wwik>5s({ zb*-f1K~3>!X#>hM;WXKw<0leesL8kZu_2Owd5?;__JR&X_u)*@C9u;#JqU()--@Ow zLm3WiomX>}rv2ObBVP)iK($?DaRmO-iruBQ#a)XKjv>g#6w#wrx*;NjNoE9(g&&~J zY{Ml{so#(Wt+5jG9rOiJ>M&;o2>c|exeN$-pfOWw+TRXPX19M4oVtGzoWy8c9Zr2< zX^x9lq6gOr0}55pGnxl2WO~J?djJPeDz$jF zg-0)!8Cb1YPzT4`;d=pzz=g{a_cmZLMX3ztr@(*Q12!Tts*)1`dfA$G1HUEE3#W8> zaJ=4Q?!wlNxuj$mUh*=>e9nltTgMi}J0zZqGR#mWnv->nvxR4Ux8Q->*}^Vhjp9z6 z7MDMJ@y_ibs*>TUy&YnajD3pfKoki)`@(}@gZ1kfU@ubN&O<4;;7NALfN4_*(Eg&T zUyXQ~c#Y!MndlxoECQ>@c2EslSH|)i-GIDmx2Y`|x786Z`vXy%PKABD%{lSY<@e{b z4eGtBOt-CSw4*%fb}sy_1ihE=dy8lMYo8DGL;XgrS;ui06?B=@Auk=9Z}Y~xa6GGn zZA0%@;1wfy*g~Ss{9UZODr9J!zD*eTMROA7( zd?W0&jsfvFTYC!bU!CtYL%!zdFtNoBIk>t5Cn)h<;5#Bp|E0&rFlEaD{W!tP1Cof20Ur;wO#Lvvpok zGAqZ{YyZNP^$Ic{rZsfr4olg57Nm<9U2p8cR%LW|9P~bLExh{nJCH zN-mdb?d-%IoBU30Thz~hKLrZ}k%|q+Y~}2po<8+wrwKU^o3xNad_$PZe~Jd&;Rahh zF=L3xTbwO1bgI$RQM&x$Sc}j6W{dZldA~n5a_mm}k-tQ+0+QwtO=-(_3T~4S(Eb4) z;}!d${$98VJb(|ym3*>0IYSR82*t+8ei^D7sA7-+dyl9mcdff9#^Y6-q*lFHqle`V zI6+A4bLt7Q_JCA~0%vq~$!1G~K=%}DUff`1g~>j1XZ9J@ZqYuwE?)8)vXv9xmtp^^ zNa_$~KySwA$O^f0+}_*?)?A{DTl)0IalyWsycF8k=9?&`RK%y{!+84}dobao&5U(7 z_-wG?EIm{U6s*Z0yFCs!osVX#W>Qy+pKb=`*0T0v-P7BKOc?9aXD@J{H%g`*>lGWh z>z_lLN2A)iLVo3RNTC8LIL{Uy1{hAKO9U?zrn%43P31>Q!-jBs&mzy0j2Q|ZKUB0(WSOPt=mBUNUYurQ>KxO|=G3!shH>D|$4+yg-P zHIa_KFS6rZlz${2e!0RH{bkjki)&F2!b>G!kRXM?V>#Gusv|+f;c7`o5Gpjh={WXS zP>K?}ia}m~KU4u(;BcI3l8N*qb-HGUs9=_4?KmDZuf(U2dyEefD%XXTM)R7&+CBP% zAQwJ?IJ?B^(^Cjrj%Vp#CzV~uilKKx7w(zOH_!x6)P<&hMTLAZ5s3hRu>wyJE!bWv z>s&QfyZF3Ql+cLKAH4z#Mz~+5Wf`=jvz`;tnn)R3AC2-#qQx=}Am|hVbz79m<<34K z54tmT@>s#c+JnY`dk(|p7c&x!kSdi#BZYm{ZfmQtO#RR++cnn()*jXth~$&kc9k`T zea1)NH{pLz)1|em6cj?I4ws@ry%#nKZXi{d9(j^0{d|jWgvm^USs$Vf91d$relM0~ z^yuujOWP+ki)9H8{ZhS(z$Z7=WRIKV?^|2oqr-?W>sy8gIM&-VsYqEu6GCJ4$S=}M zWA45$@n#$2!f*MMBt{a%!K!VgHw&-~V*x^xCCE@mq2;?$$2GZL@*%YnkF7fpI%`^= zzV@LI7k_6%x>g4v#Nv!^Fyp~HNunX=Kqj48LS&+W;~6&Sc!6E@dk^}C`78x*)Rz$t zlsnni&L9+V(=0f3P-EH*d;5vXn`%yHL=BgY2ihGo7pQF-3A(40Cm-^NIXke48EccH zof58w3w86bielNuu$a|FUaO;eR1_hN4q*usf102lzt^(h(Vo@0t8H8=uO43Y zJ|gkkKx%FqWT4d1gs+S&PqD|`j+S;f%O9@jq9iR}QB~(tP`$|0g?uWmEoZY&gui<5 zosJRx(|m4Vdc0iyIE4;9!g zZtUdag|+X;i#4Ucgt5-}ly3=He><;C-R&j(_(wfW%}XtkOdpdw z9>`p&&htM$2B8+kghKT&WmnoKngZ>rP})VK>adi5{JvB;0w+%sF4FvEkDNpP(} z&pm4>Dp*t6-caMB-e9&yIqbnwooTY#o}!!@IZUN7?@(XdEf_>>jTFefpJ1WK(3$TJ z{`5vh-l@}cFyu$YSXDI7ub=D7kLc6izK-uoY6c<0VG z+bmDZ_gp>(G!+OM;x$BHJ>xfhJ@FmB{`&=wl3HPdMIy(iTO@lvUC&X(-3M?GKLDK0 zp|+!~g&39+tKc4Oi{qBClse-wMWA~6(*Tm>Tv}j!(6PSer zLKnk0p7(qo9$GmN+TmRjb&#(rT+Y@H?EnQ0^STaPaEO&z4C?`9esC_(}BIm}umN@r#a1nO?Wt<5Q484)Y z*C3s28LWG}&CtY|&edZhw|aI-S$f`kDt|bIiSbN`yqUz0rV;nT3E9xX;5V5d((Bde1S+C)$|4G zwP%84qVsJbSIJ)L+(dqTyHQFTTO;rBvp5b(9fd%J%cl{(|F0?%Lk98Lqw^MZAyA#-$!T$%3I0Q%>M9sHG-xyJI)H4^;z#)xTS&Q zU!;{9C?+f>6^Ugu&q^^{#vw|CGwEFq=2F}ei5?e`nY>&V_?o>R<`U2i_?C%4!@<26 z%i{VMKwS1fhc@?RkVdskOv2>(_)JK|iHI(Y?)=u7cjuXJ{vlPFaWe}MZvKBT1JDhB zRnGeWK~ah5pahTh0sU|Q2JSP8XDYrrDJVx#1h`yJp{d+lY}@2JTLt!T)a&5%N#*!) zYdGyXC00UdWb5$t>g3>kzVBZlM_L)Z_pkH3Vck*h17^3w=5$deJL@x=fr#4$bvM6D zTwZ&FQq0)Cpd8)|ycg*^b(EXIHKNQE$p>d{4Wk6frApXZ8h&3YnvBqTb&BF+c+{jU zqI@^Kk4-jX{YMSUrB$EE6+8iwDe0PfU1O0YVfB~P4}j6{OH-17h^D;cP+;96EY56R zWO@BTp?F)uHUZ8b(Rxqi%Q63;Yv{O^~Z*Sx)&^vb_7y?nD2V1vA9XT=c zt?oQG@@&GMUPM!IeO7FR)*)Pcm3j_Q(%;;y=NTq(a#Np(jU6f>|Emo?-Z5cuZqSLX z+M1kV?ku=iC|bH-)Iz#*g5dOpBxd>MbB3+zUu^yJf?w&36jU&8X*@sW;4fMnkohZ}*SOMk(zl+dwf18QCNUT6H!HRl|lr}!pB)@hELlWY*T6++=HN>cj z{Mvv1$B!TSbueZ<0~P=xIoPsgYDJD~a3~BdPuq(VbV>l85wQ4Rb!4zZzfg=O02O-w zcLaT@6@$t6g4e^@_K+=trfCfyK-)qON`LxxtrG^W1A%76mVpk*z1T32rc0C=KTW8#n9tI2tOZulA|d_| zZVGu-xZR%Y<)v91M#G44^`i`71=n7GTQ0({Kh0H_uM>XvF=;?J0J_u=AZ#%V4$Kzi z04Q~oR%st2pMmN3ZhI{IO@whCi=#ln3Bpnw?1`087I1hKpj#pSc%nT1`?;}9*Li+? zkuP5z68Z>s-z|9D?-xwz>eCoyBq6sdWw9dkOepgHj~76&jip7{&>`@@((WuSc2eJ4 zC{yehcYKYalTc_s+~@_9x%5eA2mNN5up-umgCAiFMrXww`4Ah<4D4Y>;Zy$^+P1&( zo|j@*UabP}{ExeF#|e~mnHXLlbNAyl(R;>caCNBEmW}M}g+?ojYuTCj z0JzR3>$l4Id6Igw58?Rdd*HxGC8!r`+K3n^ZPlF`0F{K zg2g!&g895t!raU%LZQkMEkE+gq^{bOqQjLC55Wks_C!HrJ#4P0e&xRMIZ=C5v(-v; zw>@#D_>SDKU7ad0k7S54baMw|cC?;Ow0Uo~VP~40DI$zAhy@FHML0$2L8QM~7~QKC z@<6~den^0B>LsYFeGC#=_L0w5$~Bk#4<=pes+Qt9tua)rs;I>o>q@u0o8@V!*K6no zuaV(8&Om?tQ-!*`^)*r5X|TJf+10IYIeRd8<0-3G00vact}xPZ+uqrP)cF$O6M!tM z!&r0)GaU)~R^EpCBGXz1#1({gJ4E9OeKG0B1P$6mor7(7f z363-6p5C8Xl?zSk@KZz3WUcT)1e*3g%=#XTOPq>Q5$sJh_MtR=ndsFv8|JdkQ|f$T z#WjQpS%4dxwYDjaLlIB3g;FkPmYa83$E}a3jIum(r<-J&^zi>MEgB-Jl_dJWNqf(L zBvXyBOXe#8Kyigz?Q zpFsa`LC@`Nrqt9*fwDNUdjU5&3R?T}jikmRe7@fiGI%=7CVBl;ZGf&PsdPklA*Rkn z;Tzs@hUS9_meh|R+~tM%ux!12!;9a%2OAzfoN`|7WCQq>{&;}NxvN-jMocuecf_Np zdgsztrg07;&Z+yJHvSww$^QPSw@C!`#_JX=bvOG(O=ZHFEEixva?O23=x=nLyufcl z{zm41$Uk{bY;FX};zcQEBq`$YxKQxK7n8Ru*y)*PH%KxO_l0iLrGziV9XpwDs^QKH?NiOeGsnfIw|^7 zIOsExGrl4}KE=+u)sC~S_xlIFm0??pUq!}jDY>*e+>f&h^c(3p;QMZZI8cmL62Asp z89e$x!RGWWvHA9OtXXHWgsOC__cM(k&(mA-@y7yoj~Rv3oE*HAK}wkBdlz5-7wkz| zA%56t4A^ZX{CGhMA1wx?^y#Ru=KC@KaUF=3?}L;4)7c?4VHgD@QngaVt04qpNHMlC zixt(tl3LUG^gyn5ZQT9D5@Qewie7YNy$Li{hCkRUTJQ8b%eapVDrVY9&@s*Jt$!j9sl|e#K2THoG~VU@Y4@C7{cIE zayJpokpx5$#K^PyY6j>|2k583k)|21$sN*f?O#h5$%Gj{o`VY63h_FaY>^=9)lWwy zp1Q!Oh)03IK*6!`u8lNl1sh26(rs-FO>W_F=0k{lATf0Z`?Ywl(=0mne3rM*-e%1Y z*Y5QH!<4^>hT58c`jncp2XW^4W6okg>H`57dY?s=gOj8wizQ^ARu8n0bew1RKmpa^ z&uJCY8KjH+bn5!7I^HYzVXaRg=O@))fT%7t^-3%e0tc&W-%4#`#*&>FO2noLnK{p%mG!EG(VS1QP1^Obh4Le~ zp+|}1m{AscRnGDUOE(Ol0N^xO*FQt3B~OwW@9P6A&}yRqNAAjjZ5EtQo=~70B z$`NOF|G4d_k_^9y)#rH8-aowf1$_1EDD%q8=nFv+Sk@T<_}QvqS3Ze9Fx0s308w)b#Pxy?KoG-Kr5dDKr;|1YwLD2g@ zDywRo+|%1>QL<{zw0*|U_1VVJwkXx}{4vh{>BpHGMH`?TWE8-mxe!*RO1YOfWR^vKkF-Sm&sHH^{E z=iI}$Jsi>L9+8Y-FVq87!u6$J`HH_$<=Zf0R19MJUn}E7jtYM%3#v6!r44X;jO2ow z1${Z7VIvx;aQY?ybBSU*CfVe1l?_fX3mWqagbLGud|np6S>QbYIt04|-7%PAQN`Kk zgn|yqWf!!oF*i;K@Q{j$y*3640ir#wp;aa3*K298^;~`pOmidHHtFHKK^mjCzdd|7$H9LZ>(mLuN}o1< zf|%n9NHS)YOTInp48ps$06t{N9*eJL!?_C&3nHosQ>5IUysDuaT~`r!E9jvPXE8p8ud4;KDkh~x8EO=lCcexalM1y zgk+dDI&wk68#v1GS|%t`i_|c9lGLY*&RG7U8FzWY^>}fgixRyFysVo^`3t``lgT=G z3*106NWaUn@tZql0rK1#C+R3*0oJM*~Tvn5XW7}n$i z=}w$*2a0no!`a(XQ&-Y%&+roO;U z+v=}CquOYZ1tZj_`AA;{9}C1>1{aLGFvZaZ!N z3-APz{A>$HDY?=ICUOM;jC{sF(;$C@Pxzn+-~8I`df4Hia1?e`JVeQx_^B~{S0I5e z`0=!!AA+#iY2b5RWNU7kcr0)z@W~>s*A=uG`IdC9MHIU89G@njA9_cX?kwt<-+`2!p!Q$bZo_{3oP(Cs(F zux$@c7-yiwQB!38?^$y7P7_oOkaNLC($m#sPODMi5oWysL^(T{)ZUf7ABf+>f+#~J zYST^L4fjFeax7<(oa&uJPYO~I-PV{$G~ z_dP%eC;HW2FyjthwV{bF;7+7P7!~m9aU;XtI^i72@j)dTrg1+t{KiCrk(R}sHT&EFAE z4F+|=ji{w?8R-C_SoXxjItI(m8AtW(WT|h|k4Je5drK*hwGw#&KHR zeMdThAVO(4Bb69`i=^}lZW+)Bn%Hylz&tS1Q!9TD7y}p*Arn8DqIZI^1viw<`0nfL z>mpvv&z1CdECOTv&VO`V+3eME!*{zyxijjPHaf%C^wnt|qqo)6M!eynv2AuoK^kI| zBE_p6hDAjN@rF9uuh4cVu8EcweiK?P-4o$7Q82|c9w#7_Hn>@E%a*hC(N3C_Tq#ij zkweHU8!o4&X{y!qO{opVP?!O~#q_j@(b9YTPzcI{LT%#+X+n1Twt^q7Et3OFfroQ*I>`$}i zbFGs#Bs22+93|ZKBqc22*+xu-)!F;TA+=X30Ep{q(Qhpa@;rHXob%|ri1)X_3klm! z9|(3cq8!F*tIm^ud?b2$Z&*H#SM%Yc`WfzdkB zw>$))vi~4ofFW`1%rfVvA@0NXd~N#lWH7gvO2OIg%i-DhwVCrf8x58-=*SS=(nnza z0ako+S1+O5x>NN6C!6_DsRtbK{}TA{W;Na?@;gb|T*N$!b4T6{U=3k~OyKWSrSjepGDWxJJ(-3$JMuO$Az(qQh%Z|UrDe|J&zNlEM$&mi0|2L2F4Q^cV&>ir8>MS z-0OETz*H*0MCX*B$JBv1(El&U-x7G{sMZ31D8(S1eKoO-Wb&)h8gn;Zhbk8#iD zPu(=6hkSeYYI8SAz`0=vlp6c~otMdlPEj%$Jo%79i+^>wBBR3dA=S4)!$%5737Bvz zLnh+}mj)8uAlQBfZ&^EbDJ97jSO?I%PXy8~b6USzld`r5rJkPaZ-2k`wGD&;I$=bLQF2Jz2BK{9ibE(`|pI zDDJ3~?*oGqhD|5LC%?GM@XZG+bi|XAi^=)No&7i`T7j5k?6mBiYHIT^eR)Q8!V+Il zK(3kGd;NzaC=E{6abJY9cwNU5`dx~)DetQHp2!dZ`L9)jFTDUz&N2)B zTP5Qs9iMT87`s-ieY8o+V|$7eqgOoGfa> zRKQd#{+16|I8L0APUnz8@D2yQtQ{g?f{w&rY#Qcdlf8mYGw|T%&$Sl$wop^unU~HT z9~#`eMj%)I~OE%Wl>x8ajDS4dDFM2yiFLiffGiZts4H$ehxSeXSBKq%>7;Td-z{k5 z>;ZVT35ozS2xuL>-ksaU4N@J5zyIt29u4H1J|*Xtf)$KpTkMlT8-`;jF|0OGmoQJz zSI;^a5~|M%omEAu9E~4~(k2}ZRD%vBN0PiA6YncV8()sfi=s5q)t)04Vk+jev?XD z33X(u;a_%3uOaKDR?W;MUyNRGOV1}S5E)@x$F!bdQ$rn@vEs0HdI;b zw>puvM7_TlY0x^Mey5NwcBi;qV1tY!R{bI3>rNDnYSZZ;p9wG)zt~WBg~?lITaV*dUIddh$Gy986i2V>fa50( zWJLADlDXo3A=zulGQCQ6Uq3FhhZ^Zj8SkQ?c>h^Oq!HVUdlGUPTiTCuz9M*U_=s7@ zTzG9@xXp7@5pm7s=yiTik%#TBOqazbcA`l}_4|qC=e5Xe*PqhL-BR!Q+dT6~R;0IY-l&WA@WoMO9vAgSj1Kr?*dS~6j7AO`i6~xSVfuflV z%hkbeHtu`UdAYPI_^Zcb*QaHZA%eno`DXIGSv%Fny0`Xkn`*! zM$dytc`yBV#)Xz!gl8RmXa`&($~bIz&657b$a=_~-r4w~+IOV2G}pgl{&xT|w)~Atrt4cc`QjM}vZ$ zG0)7!or*C{Lw3LAf)l(W?)%=SjhMW(z$3OUeT{)I8rr>_h#6IRGEPj3TJ|3&#d^zPh{z%F^7W@BKE#$KT*DtET}^uG3?t{3f{-LNv4S)xr+_iX#b+CirfjIuR+3%YY~9(B(4+8x2~Tf5F9@E zU7MnUgKNCgXSenV$6CbWE()SAEFOJI)%X_o|F9_&XMQ+U(o4Q-a&1U-aHj3C!?4?O z?2$#><)w$QaWUkB7ZoVWb1ZI9sidxJ2gdQ#UjKEgfw%k>Lp_rySQ99&mo`5aQYV*| zqF5jE*cz;LCiSWGNbo^zd75oV763lwCx?t|RuW?@AI)wjaZ;DowCN@HU%yZb)8m_t zC8vGQD7m0b$Qum_zk7^}5zHQvObLY&xJ!So(UQ{ywPkCFXi;nQWjjpc5kOSSt>MoN zFZ|e>NYkE0w5?|{oD$@cCfqeZ|CTQK$;Pu<39ep3G9vO3bFH+)TT13vevd{xkd~gf zNbr`QQsbuhx}^2M-u%&rwX?xDU-}UFBTf=tH`Cf77?{0iPcL}uiVpACOEzYfwHG^4 zU;)lAW;iht$f7g&pgZ~k7t4X4?X`cieQ3i;Wpn};~Y?Ns(!aElNbeg#sfKKMY<;7Q|0rbwqhq3Vf0%&Pj` zIQJxSI;ryk!*%b?i;s(yca1|1@|ujkL;Wts4s1AE;XvTei%|1ew;)b?}Ezs)X11u+U8HaPR3aF+fg5+#Ue; zx15lNes&DA{l64$w>rrW&v_DM>h3r_PpTI@A8yEV>#2Mbz_5nv9nHz$?SM4Y)WADW z2r9KF)GggLU3vWZ=i(?kG#}1Y&irWFd~omD+#(%OrXiKXL$i}rD__pO*PzarFJq&n z)A>Sm{rhNspz1f}JC}IUmF431-YMt}O1v4;Xm;Y}In%sE?02?fi6H$@DB{p$?8QJN z(o&Q$tv~uUe*jFJ?Y(1YarI2W;S+wrBn~0Gcc!X5zTV6st4Vzf=mwQ^xPqEIpQ2s7 zZ=d#yr>_q|>>1I!MYGwYC&%tzQ76FXS!k)x@)qiKqPLFU&6@;cefxK7!F`4vGvn`o zUdkNAo}&s4KeG?-z*5g@f4j?`7$$2{itza|2d&~`{Vw&at*JEi zhcEWk3iTJn1^IrTXEBG`Uj!}38Hg#qXdu6u{H(cL#G6HlCFJeVP_QPQD5&D zJ3P+c3t}=IYv)HQL*L&1hFIwtE+g2VH9>yj9naTtoCVpLI>;hS){krJd~yfl_1A8o zIAHhn^)rRlxhGSOQALUwr;{i)=~wSp%jK}qS8f!gHGgU4r+HB*pEhAx&2#{12H_P? zwQ}}PUpYz2E6r`HckmHHnH<*qWhjWScQlUToKfAoM*05OvHGgHMPB+)g4GD}O; zg#j&ty1*1<&;z)gP2Yn7%7!IErKUR01`#^;-8fCP%RBo7IlU|PI#Jv2Tin;L2KG0k z6*>0q^Ml2bnNzUN)IMo{W}I#M$0bj`&^1N=#uubM*yNkL%jLO?lwaLgOdd7X&^4Uk zvo^X>X!&}bX<^gJqeZd!8COr_kA7D zyXQFm$MHOGboJuOdH%-t_Z^=>vC14N_YQ@{>r)yw-(741(gEE67-)R*zDtNh*G8b!g+BFSXPS18viY=f?O%&6DrhxfzTGLb zm#v%nQQYST%LkX;>1t%g+-#wM1I62ZHVK6_ti5Pm3G~m&BHio!#k-R=j~tO~%>;eA zIkal@QuK$uh(77OwdmV#XC_t7MW1m%4DKQ{PI(ujcx!FbBDa-Vha>~P)z57Au>b#r!l_FeCc(iuK5Zq^oQ|o!fM*Pm6 zdoiZ9hoLntjVc6MmUSB~=YY^8_;lDT-j^b{BE16NtUbtVbuiWW_^h#)si%j)w0<*I zGZTyXCM`R{FUvR>iCj-HRi8#}JI6%2eo9Y6_ac`+s~6w@3{epUY~EpSs+f}&dr z{&M5BwgO_N45UkZuRnM8;&^)mH~SJPbD|Ohc4kevg63ruLzO-&_@3YL<39Pq|AMdR?B4HQEXvi+Gv%m@fMct}g(t z_N5t2VK0+^UP=uH)cY)w8ViYF<1!~v5cB-GeXor|s4M2kKvhKb)IR&MCS^8T_|77& zU%N#)y(0ot*FwVW(uIb%j+NZ_%}EG^l^wb+4RVKvHsl5&PR~gKS3~2*q|i?3T+C+> z5Xob`)9_)F4qt;MOc~@6W1-cVz)+w3S^ZWq2)~|psX6b%`;{QN;_a$trqe29#PKcM5BzF!W|$&J(ihmO-m4u4Lf3oOS+wq6Mki>l-IT0eajROi z+`89ME8)M8Uqc3AZ^^_N(GBAgM+7Ue;$8$vId9VAA6@T1)biA<`y@vJUj83NxJeYX za69is53j4vc6ky+7(xzqOh0rP)9j%>^i`0Ht-Q^2Ip^kCBQ&yE%{GayWcZS+&GY3+ zij~a4&$qld+W-J2N4I$?2#pThsU{{NUL;Xt$3RD*K7njR>93peG4Qol$;G-yL8ExE zAI-%|v_9{4p#xy`v~Y5s`>Hv-?p!j78RXZ}@}-sMWock5R^n8yEd8LI2I8tu{Nxko zVD$&d%iWvz-Wrt`4x?|OHQCZrH%@ki8`Sf?7_|7YuiB2+R(u;^EU7W7KI8d^GiXlTCSTBVHq~huF}sK@9phnRyyH6TxA@Acv!o zN!faW?gjE0E}spBgL>|V_Excml-};lk`DXT!DzRbu_xSxk9Dz`WdJxz_A3h}EWaK+ z1Q9xqvRS(?SyN^eprBY%jL5qbe_E?QJJ9%lN{+FC8nyMqLsQ|y?jc8s%Z~a7$JN+K z7Q^j4l%S5}%H5Lm+pk{S3n!l8F>1mfTp=;RE3zV}A7K|H>x4MICjv{T;-zQe6a_1`XjnNmGp^HoPyAj1cvgb~c#id3mi-GHaZt22)0~N?GBvg5P|3F# z4*;voL(xooRnp9tMkB%JbotF&VY^Kp*s6WpdXLczy!Y7-=0WmEm-?p{=2<^|=t#Oo zz6O4{6qS!9o?@{L(Ka{sv#Jjom9`Dg|L_4VlqJ?!cGRPSQlGf7dD z#{BLfv9#Mh0Na-KZNuCcCrz~6>o9vft+36u|3HI9E^K~fKO>OWZidG~^aOB&rYWPY z#%(?!3#`pHS2BJ0yZNkg-j?Y$?MrH`k0h)gL~~M2M0+w&H)|-;{?fA&&p$V9!&n{s zjklTBt*oi0rmd=NM2!r!Na;-Vt|Gq(s7?bQ#{0^?==_$LHVadZ+u174g)eWNGf(hrV z#QM(z-5^Kj)qk2SNw)5_tH67r2rUxV>qy3`G*lD|07;a0W}>gWNFnIO&bTm5%F?Mf zvdKg3kjEUlcJuSI-B-a6+xK|5KkHLJj(}9&MPkwHBX6gwu50)9BMj+-$QmsP-073! zvqO?i_MnyiSOnkN{e2uj?T=P4ge2~c0+(7n|FV}a7oV48wf(#Di17^*0!F6gM~2lj zXEtN}=*xr)=|0bW4XMN>&$G5EZjJUr+F4CsC(n@rcFPHk8olqNIZf*Q{n|5iyiY#` zT_k!^m+FU=Hg#Rh_XL^GLM~y!t0P#bRF@DfDkHr+l5h`;BKQ#TUTS0qgJ20ESvJT9 zxZu;x&HxXrGysokO-LtV(mRbFU#VYLoo})3Tg=`MJ8#1|G&M(PVs=Ck#6e|4(njBW zo+V(LKa3;H9GCuDdAfQsxb2+1_v7T~s8;9Bk400L8Y3J=BA|l0)rGm5$E^X#Vz@7f z*FMvEJq`kT#}Jl^|AyzS za^5Zk&y&q$&crFvmErYNZ}#vNyxR(<7M8ePVt9*1vTSb#H!^a+i*%u%23)39jGpw0 z1nj&fN=}88Yoo{p09JA2eNSbG&|C;XKP&h0J(B6%A1>>AGu^42i~LV9bS??tEi|1I z3erIGO|@}{R2b|K@fPufhsw@ovSQZTG~btB?E^eYLW1jn7r}S4VG>2cZ}`%lYq$r< z+`#%E3gesk`t*kuA)zeaKHJH~Iqn&79ZI*GL+?uu1t`xblU>J^5xXaaA{WO`*^(6~doR zTh3dlE=1IKKO3hx;}J3I2p3UYqW90C4Sd||3O}(M<_@YWJ};WmhYQ<&sVDUA*_f`} z=SMcqC_H%&0suW*hZl^>=`aYfp58F%o9Gw{sU_bs?O}Zfl+0%>IeWyNcYI>F$9jl? zAUjc25vomr?u4l@kdaHU{b{!fK^#2ndvSuVVqlZ%TqgaO2P2`kjGVXtXuY)W>~xmk z0H^TW$@(y)1?La4edy;|eZSqa?xmH72`l)Cyr&Sb#$PV(WoPk>XyJ>B{`#4mHU|!0-)2ELR7AtS!4S5^gk&4qM`1VduFxt*v zyxDZi;iCwO=JKR-^Yq-?lZhR95b&MOAM}0e=v=cGCt!YYpYgMhMDgi5MKXp-7G2yj zwBgrSY$#eDnCETCE?5;_-0eo(D*(WJ@=cbB;B_rlVU3#R0~(VbLmMNYeO7r&_1U8d zPed7XMfR2?2l)b8@Kg4$#r(UOzExv!-uCVG^m#TG?h=qkp={X;arXha7ixl=MmK8u z_jZXCp9p{YaXpP{uF097YFc@tNqpqusW{NO=)BNZckqLIR{1_N*+BmNWu};Ek^#i9 zP*}h#JcWxf4>yu9Z!db3W#4WxZd$%FA)<7{S#7OlDlhdYEd4qX(@)`YB!d*G1Yr|p zlta7-PEmY}{|WD-aIc}4lA)xYe5>myS=Xu;xAPK_j&fN|Mjm05Bb&V87&8FX5goTS zY0Qn{aRU864JN+lWW*HI^k{}dhWwl6TWo5!i6xhQpWShgo zEt&$@u)~L*k9$c^%4A*R`RQ|k_49m**YMb?@&3BUDN+TjCEFjwO2W+@KL{qHE{$68 z9GZ?6tv+|t*zNgjM?6oiB=RM_fAm;&H7(MN{!U;Y?UO~?4kAFx*wY4x3%8_$bn-Qr z`BSeXcC@ER5mb9WRHJ9=kEf3c#MKh@KD_%{D>Fo#>Jyc8bM}qmBacwIAOgReAE=sa zxKf3@NfS2eJTAAM-w+6LudjWYm|>Mw_Ygr$XyQ3ld*c5}K%N?@8Okp(Z-#f7 zPU1$zB#y@SI=y-%C|WM7O1YlQmxlB7Fc@4Tc&mFUw(U+|!~$NL>Y>QZwUM%}oUc+h zob!#!)ngDOq{|UPFLsu9@Ej9EY;pjRZr_5q!Izkpa>#|dcctr2;CFy0_n-i;l2z|2 zVjFD@HLYnAL;MCCZ6_y!?qN&cGBkUhXVntz=To}q7nxA0V?2>{!p3}QuNeCDnkqm>%?Rnm)eeCfp(t<`dGqCh{70a6<^%+7 zUq{g`N}60{-19vm?CdI+J@EH#Ck)O6#S7L>Hb^{Y(~|S4op!~iG1o6l8VKn|ZpwTl z3Tmp~kVVe0j#Rz+;NcK(XH`mkG`J+^$VPhVp(4W zXy1DE(PM9_&-fyU7z)yicv3~)`STQQ;Ws{|`8nTEMgBPZV8KYxa$X45`ZMGx;;vh& z?7>=fo#<^(=rNJZktPOJez>;A9Gi|#_w}tWi?KyNjOAk0NQny(+V1%B{^z3%$qBVs zxAwLShlHDdQ6;SG(Zb?Gn{V?X?>Bf+O^f_NL{+W_p&l(8ZeR9n-sSkjNl||Evu}g6 z>MOE5rPximLOHoxaIE6p<4JC~J=FKr4DqgDqzexhJTx7bu|6(2tD&Wpno%=8!s75i z{-B`jjeil*FQm0h2=kd}ZY#CH_SwZKGy&m6fc6VYr0q(nnhVz0 z_?_CYp)G;<_3Rp6gmQdN3s=r2Nj97P)Fl323lM~^X1)tfiY1_u1bSSR-az*teBZqM zdl_QH0Bx{$P^|+$PVU~vQlzU1)|y=I&B}e3XR3@(D|z{J;@jth46J=zmw&JVqX2ZM z*28XQ)DHrscr)}9x=0pse*AxOed0%1Wc^45LB)Tq1yDeWux9T5=9K)+!k~+SL)S`J zh;bc0*BORny0g$&&t+-2{Cj2pWtn`S z1W7r(%V*_g7bu80Hrf8B`H2OcRP|TxW2Y2@O!8J7R=MMj@e_tl`MWv3o5C_p=U2aB9 z^+C`sW?1I818NOkj=!$gA4dmuo~+bSyCS*q_X!G9B*$Y8PZjqu9P7$COhd)ot-^{~X&vUe$g4I)WZ|>U7K~xBmIl-Ki zZ2gW4*o%qT+4jTcSSOHyB;nBgxTW~T`QE*f88Om7k16u1%+J&J0LpcywBk&KQ?<;l zE*v+ECC?F=!xCdVg^quQmWo!JK|* za4|7CcnzY^v_gUaH$DPlov~23r$LF8mKzGGjy#M$uU~6VNPCN)mBSwVwcHNi2#l@m|mw3uLkt!FMH#GPr0Pfd5s?bcDfLQqF}r z!a6s_p^GwC+(v_Au?fOB9Uuyh&XdztU$>d!Q3l9E$NGDOMWU14;)qL;=>x&{01X)xT z<#NwyCP6TR8)Rq(Lq|PPh6J`@-s5tm?YTBDf_i}8qWts6sm>^fGuCmnM5JM2_~ zey`z%`?npjGs8j;K%FRW%S_A`+C14npwoUSbJihJUwa}`i2!9{r`MV?gelJ%>Ao>; zd~d#!9foT$U>PU$UN)=etM_;>=6$pW(1)LPedY6gU$$e3&~m42&_vbcRiZ3;*@U2k zJxCd5hO+OzTkpEDsbY7q$|8nj64iP_ zq;TcDMfiM2PEhAre=F6x0x=Rk^RSHQaeeKiL8+OBqwgbEw-;yE_#S0C1##e#q#B6h z@J}C1Ro)G0W;eSDN}TMVub6LIX9A+pOh)hTU_D5j`H-Oyk>@^HlXTfgc(~H-4Hu{^ z1D9h_e}YS}?QC=&9+hyei#!qM#nY7_4zNqsHUY6bkPgz#)szL5fovk(u0#RWJ8H64 zPf$JGXP_eVcnYqfmWstFcIG1S!G@JmR8X{5#L8~$3=nAb`N*HhQnfqqvRJ)OgPY^_ z`891R?g7f4_Ia=usPJtAx;gjzSyyC9Be;H(&dx0?EEIqME6OWR=#@e^$rAwSyjdN8 zEm>Z{4PJc9zJMKYZh5KuTARZk5hpTZ03@Tr$q0<{kOz1wJwSs>C+9YLuO=z*y3GHm zAM3+?>unH>{RFZ&c_WSAMIQb*39~pKr~*z&{Q?17jOmDxo}IP%)XoFmvhH&_-D?+F-cut`w~m`pAOtcf!z& zQXLj;+%duuVk$pQI!|-Kw&pao^dyV`2Y%+zN^Z`3dsn(G8xMoeqsL298N*)P$BED zgB3{3aQwmQFe9pMZ~_AuYq9}f$oJ>oe{;h5YSylj6;(njy?a3x#keYu zQ~0(b=ht+kX)xFobW2$+h>wveWQ?xg0>26CF7W@fx;<*Wk5=Q~SW0jrNY)<3u0Cfx zl_;>8k8;+NR9dzR zgg;u^Tlue?6=}2`s4sCx-ZL>+h-6(DbqmejsQNaWZy}byx|3un*qCcfa_Y2{Sdn$z zKBfH{uFE^qmvv7YqVg&`ij2y^CY&IQm={USFrGF>w=8p~FZI#>&M`RyI(i0l;IzoH zhvX^(6XKH^i1z@)ucFnpwud;wdOhjcv}b8Oi6Wqx6+Qp}QV!b|+LQ612wPuCUB?>= zYM~w^hOCj7Hoq|3tH|Fq!O9$|g6xIzDiM>bzfJnsHNPsZJ8bF0XRa=ow9wpu6}cVOK+Nu{uciHs{cq#a?x;K)0glI5utvX1*@|R=9eU!I=UO94tPI<)! zzdIr2Iyh_20tcXMT;SDK#vPEM?JajUZR;4F<)WX&9BpM zodo*h8g1G5xqj=k`yWEGNJ(30Qg+R}pELGOC^{PYC}!rF^IP{cleIHiK6$JVsDAAg z%f35)Cm-y%w$>#T=@a${nt8u=EYOGJ{}dIQoMPgCuVe7~=8~HA zbR6Sg$N1GJoS9C+c_I_uecOnjSJs1t#yS$TAn^1I#Rlgv1-C;mK-FQk&jY{jci>mG zT^;li?MXxXP64Pr$Xav&l3ec!^rZF1A%Um_lAtcBHLLk9+@A6v65I_faOI1-hgYHyGH$4Xj zY+kk#0I)r8<6b{^a85(2Z}niNe4zlEH?rpRlZ4`qBZ6f?H0flU1T@Hlsajai<8i*W zO+>J0*ur(z)`lZcx%s@MdKUAYc&X^J`J;XCXDCe7Lpo1i>J6DYf&oBZ=m5y1YlSWB z0*OK^H3gxvg>TQJ;N8#z9cHDsT01_WF;VWtom6Q`h_uO)0C zbHldmoSR4zfd9(+alEjGsfR2GBTUDT8jqQO$tAIo?bH7H7$?f#myYG79m1ggER7>C z@{GlBsc=}(CuA^;E zLCs(DIX2^(B$K97Pkm4Jt7UKX2QbQW?@+vV;wsYlaPM~6=m>jZ0qN3xpYM`^IH@9+ zqZkxfhmx(yc2b(fkQ`U(g3Wz_3~h38qwh|eHw)dgG>9YxJW!@;hQ0SkKDxb^L(g?) zT_yQxeW)u^iO_O@{h0z+8iqQId`p^r7(cU3Z#RN;lTjkCxtzZFJ5X#zBhV+8*Fpv* z?Sy^zYkQqa>uvspJ!Yu=9oSf^g3kn{UXU=tlb)TuJrAvr{OOO;MyyW0dBbGmmw3;c z@yADw52BKhpKa3`b8|ZXDD<=NV59At#70a&EI>7C@QL5Kv!@swarKpaVVR60@y{=O z-NEMXlm4-#p7>$bZ`aO%pT)eNL?Jmf+4RDD?rfB~pVZ-v*ll??0V%IbI=KNc88uHg zF{IG1a7Bpx94hCzIJlPhFri6@r1<0ouFF-zP=X7^r2q$z{YtlwashrWi|$`xW=cwR z-+%2)#;Xq}yTN28cO$>r)H(M>bPQw}N2V@N=&nkV!F8?mTR%Via zA6(6;u}J1`?gA}RFRrfMLXtTM)Ns-pUJWay8gFWU%4)R1$p7xDzDSoh%Vb&JdHGWO zAo)w;XSS@SifxmOCCYHTt;aOK7kkqr(y!`iv-seuBZSSBHTMCKzN|Cw9-K&IP3eX|B&InYt_B`r zKl$o(w6hQoE?;AYanYU#ho;1Ks%}OJ3%lP}QwHrH3K=?WbCUSm-Ej(^NZaY#)L+D1 z@``>F_l~zx>L|fm_`yV7LFo81wgh9Rs_L*jUaw(Uies&kZK;|JYhQ|cr6L(@lp?`- zUoyprZ<3&4$GsCzG+7RJMOYQB-bf}L*(Ig19f@gfC&55ulBl|w>x&ccaT5h%P)Xb} zw-wQiWvjaR!WdofyI}BhO%oGqpBe)H8$1zTQyq_Ad=OH&R25@8P`Z)Kpw9##I^!yl zT&M3T55|k*R+_wsEYgK{e2XNg#_QDgf&6OJ5x#~B#(3kQWXAWORN{H_o$2Xhf1lUq zv&;{HV!b3-t4+m=r2=i1c5GwVz zciK#FlLul{)UR#?JK_8k6+W*6-Tei$)HYsZY|>0`+G9@~7h~3%5^2s#X;)SA{;UR{ z-$i+tG~B2y8){XQadmo|gG#+39i@5yK#+KU<{22d^~KqXzn3A z&EF_>G)p{3Po`33ja#Q_l2j?Fv+B25YTLu$!(r=y&5!U#T&sVaC>NnlIdeaSJ%2b@ z{?&11CrJYXq2l#omGbQJd40r%dXkEo3fekJXXVXcn=gW!veOKhJBXvJSa!#hPZM=e z1jOC`cY^}K@|a)36jrQQq>oB5$00!hWDo@uc^);0yIj3J8F((|yySsm{MF8RaA?(A_% z=}!Bmd-m7Zqp~Ye*IJ0go@*{YXA757OFthFL(247e1+@7bU3H(i9$;L`2aK`9A;M5 z8SRdkFPMp+GWn`_?XjuDzAuo}@!paK6OUMM|JK#RDqA%*b;kBl&JV!dQ-_RX)Akfd z@+o=QYy!bIx$Htd&Rbaros?n=2O&|x>iR9(ht2#TbO(a2GUoIVlSRqHv+|eTp9`ewWdHl)=KOJ$H$-MuUL>NcJbwf82^yzwD2w_`pgy?lG)_?JFHAf z!m%9}*eg-M&NS(@GD=L90qG&XXs&~g?$P-NygUk8?`*L3)cK?#n=2q{TRimH%nCfT zrR48PcKKc;aWqtHX??NH$ExLhM1Bs^PkR61;S@%j!_|w5oRM;eC9Dy>J_(I%aZAo4 z?^M#TtAsXMxcQUFl2rR$@R~1+%qybzttF&B>A}vNKMf-G9K+7^MUD&7HWvlihS(Ue z&h;+r3O_?S!8TyGY~N=U2=0NrS`XZs?cOAb#Up0vI>i#%BgMA_zCBVPSC~L95S}AX zzkqPi=h+c%+XF4--2BZPGHW=K))HD(hY{VRTzsMBT;o)Z^QBO;T6ccy57H87_M>QR zBJr#N<`>?^ekoqDZP#7K|@=zgGD=xHuolF;)Grg_VJmdbyZRFUt+ zn-$WSs^p7LuPSMD)U$ufRDWkxCFGU>wb(V3Jp`!xja81WU$%`_A%BFl6*}I)m&m+t z@f?&8bSJ-3JW{B7HlBK?Wwnn)k-l1v+T9XW;a+6r9o@bWR-#{svr`IZ)z6F{Rixm~ z`+P&4ujD>F5_WC*+Yg>BjOa*DVs?GbdnfFIz?sH!`)=-R%L3?>qoKICDuG? z+f{dYD6F{3+a9VF(7P^iRp--LC63=Y_OzM{(ai30+|E`G($dMl`-v{9mY%^sg*0hZ zE`uhmZ{YERm$jd9qP2pAWx~#r6OlADeRe`Tp|f__ko$gmxaLtBJ>h%jr}mlQ=_k8w zf;(fF&-bYJx*jyAYGORf4=25xmxM>AL*!ywoJDQTeQ@KvKdn&0tb;yU<`7?BdYN*iH#_1MxTp?f zgNtmP8T)IardZQvkN%J>W<=8JK#kV}yU~hb4t2*z1xsCIfbuHN1#P0dXy^`IgMs;K zQw-rhWcl14=Vd$_qeJ$e#E z#q{cH-B&gwJfLAx(6QQG_9600e<@nDyhH?|&s?2iZcKu>7#eHeUbKOzze+TXq~;w1 z$aGC$ak!>hBFK!kz?#OA$w(s~&-vhhP z1zp{2>}=Kz^T#EO`$HL(3GoYD(>4$h2Q5k2Cyp`ob?7Gyks}IPlCs6&nK|`SB&C`M zfdxxix5=ct(3x|ooK*OlRk^y9^3}aU9w4bL*x#!kzVkY80$)`nZRApVG}IV40DNWa z64ZI-|1k*2*l%7JfEZr@dE$twz_WwQLfQge!(xk;m!h%zqB*FsKa)vs>Vw5@Zl+~E zQOB_`vtx0c!$-ocY^{&x>J1YDWia`ay=DVez!|9E!_|rX?_1w?F1XmkUecG=n}Vwl zS`QQhHpT2o=` zm-T<%-BHV8iL{_`uHI5il<+92zcSk1DOv(3F+!wP5^2YRe|sa1W&di^Xq#)!gpGvd z;pi7@DOcnTde25GV=5`vO$s!gdkKBt&vv^uKL}38$VaoXZPDYKdn9;qNbI<$wVSRF|@MtM=M*IP8bfeKiyB>yDD-Ij9AzCo=dfh-;-+TT`R1PQrW&!ttQtxZB007(Nxb$fzOqUPG&&CZ ztMk^F3?p=B=-YA8&oHT>I+kW1UYkT+;$Ufea1NpJjxBS={9C1)!wN#~ogKo6F5j-F7ss4rEjiUrG-NY!br#{l zS7-R5Q7qwCdAQVkbFvH!P#Uxo!2USzE1N zUap*?j^&=JDN1HvoS{yA3TKBs9py50)3RqUh-jwlWgYa*4WFNI8oXkG-# zSZt!5wX}1dkv+aN2^(+88rjBaxgHO8vy49i$*(sHq&sLl>N_1+62uce2Pw69Wi4fg z)L%@8{LV|b)91}l@JZ|J2D^jj{Qei*iaJzpP4a=Ve&_?tI@e8U`5R%$NE_`^NHN~< zJlmDD@J2>Gp~l&VFv95~RpyRN{XL{muD=9n!n$bo&C39!RY*OxRz_R8jM`Z{|B8&v zx}n$l(KR*sFK*6tmK^`9gVaX#SU$Irb{T6acY>T<3{eQjHni7+!2b(+C^u zOEnv_#;?0&Ndgnd*9~Nin?+l`7eD;z>+`y7&yc)~62F+(-cCGE>AS}yK&FasxpoQN z1G#I%VrQ*+D|HtmH%X>cxiG{f>6`H@1xGZQohsxCAHH=9_q%aMW)z-Aa6Yrcxi~GG zwh;HnqS5igbzq!%K2XLUnIq(~AD@neyc~JqM*S!Dle=~XCFn%ElS=iq2KQwY)y7^I zI%{**;o9q5E4kgp@Et$p*?UWo_9@FJpoD;fvQPW!n>!S;JXaI*M!G}0bOY1F?+XE>z=5H>BV2lUx|(`O7BF5K{=1_T_Us3cr?bY7_oMd+-~2wOh$>31R~;A}p}X8<0oWxF)0IcvofYlF=*ghEwq*W9zQZEK+`04l{Ir98L;tSfW2K+g$;A@R$iku^>TYZc|3*&Yj;!J! zi8B4iplSa!R(7)zJh=3{a7ekrkcABwH$b}O1tWrfE1xdM!BCoK(<1mKCO0y)_#Mp# zuN=O-c%1%x+lvML13r<>m#sEeYG-9~{gkDjZW;s>p@II|vozmh_UV;9Nk4X}Z<{Tc z@-9IR^{!-ireiFB3=E-LfdbY1h_qbtZ?xswMP9}lLP4A|hE?+xo^H3?8}hs>#brWQ z^{y(bNc^a2JCJ|2>C#y(-K>V6+ZXuz;_;B-7vlOaNGN#KlwBWK7hN93R8^sSj78_p z5qG*W*ZNVL_O)MJ>(!_aS6Iyj=M{(l*1`PiKm43gNQSlkTz|+||H6ep&L$XZ^j1?M zw0fsLaqiuY2z8wAhEx2=Mc0m}mp&;+$xJhcW%%xqJf4*@5Gls2cP)ZZ`8G_2u^S18 z@H*Z<+)0%%wtZXImI_} znl~N3k(ZUJ{n|>s{rOBJ+ck|I+|!|>{dw>*VFfKTM04jT`A5&k+4K4)cnu`xXp?+- zj+6ENC4hVINiZ)bQ50D1-){Y_S`Mh5dYHXwDRI@ZXYnp4zn+xgNAuq$QEb@Q4^hZc z?Xw`g#+_EKd6)GPrmeYPSs_5YnP=LfH3|$vL^aF>uc|RR2mBGM{LpU!{BN03uTnRQ zH;H>_*iMl-W*jVE-R-n>4Zrd*g+`~w{n&7{QR|63E@H0tp|=z(vAdb8+3=~hAoQ_* zhO+%0SiilV)vuJubf!PUXilugYWivMpz}N9?_vQc8J*yom0M>64avHN&`eccG+%@w zF)l@DvI-_$rf^WNMN9bW{L$C9ox7Rzk;%Uc)n9NQ3Q%{|dZ@t*KmI~Su|={B1^p9- ziV2|vmv;tpFfa%5!!u8=$M~^;s_fIrKLhmN&?wYLN`n{f|Hn}M--MweQiPO`0|pmy z;G**hruFPs+*iLCi#kP+urr`=G1QeVADExNu}E2{w8gGXJSi1!=GDi8 zF*2BFCYYI!uEky1sMY~IPNpX~b*1GSR|(5U+k`{@#$?$VsfF=hkOj`#eM@=S8 zPyg!}{`J=_W{7aL-jg~FsA+|{fOH>XNKgl{2A?pYj|nC}?kDHZ8}I-UpgB$)ur z!0*1QkJ4*ptVq)P-5mLSo6xI4!5IdVTXvz1w%!#bU)8SBk^Yh)eR90F(ou4|V-~PQ z5sy%Yky&7~i$b7WbQa8MMe_ZAegEFX7#UquU=6<9#ht;MWSu*#Wle_wu9kiVBoa?R zBvv2A5KOq+F!N63DGR^zxP-@cJFBIA1lfOt=YKyC@=Aa!xuxJRB2`(R6E9G?NB&&eH zDS@oAESlgHhr2=sFzAJjVU##SyN+tyl zC@ix8YR0jp`-DNE9~t}fDWiQGHLB{eg5l!wF%&b|i``uHZs)N{G!?=Ky*4Lhcixqy zao*%UD>4KQva?`#Iw0nX;W*coeDfl>*mPfe=>|qKs*fp!vhGebq8x9Tz26#?wpRDM z&^o37=3&{eNLHKYRymY0ZO8j^PjTjjYUbOdxM}`->HH>sAp>!Lzot0m_dh*j-v&v^ z*82+p7mF5zl`o{pgnIwt2|?i9AuwxszKa!*F$Pk$3;;hUlF5BX)r!3_z-3S}q`~e_ zxOJlm>nVbe)^m~mwceXJODbEEQ)wX9mY6jJkoYVHfDr2^$MP{2#Mw3XlS3D`Q@D+5 zulikJes>$*M!@>B#@dPv2VoJTqjBS=69Wg7;FbM0+$j~Po%%n3a;h52krgydE12_= z$Q;Ki^*|`rj@00LLIlX@sy-rO&gXZJPn8I2s|M8XHsX)4C`kzZdp-H_f0&ZfVci-{KBZ2R)geRs?iU zfzR;~;Ozx{o5Ghs2BFgC%@^|p7-aA}jf$dNV|88xhoMM+s&~}18rTxV-SVm-kSMl4 ztU?IUJLOoXJ^b+@Gi>n&0Iwd4aCK{!H!cGHj2WVtJBb3H2{Ogre}r0kz7F7L^eq;~ zWEniEWK~CYzN?lq|8uXHuzx*w?0>&`0Bl2rkJhM5Z`R!xzoeId6>`G*NjPU=3?M9I z=x(4c*#Q4R0%e&o*k~Yl%Gwsz$Et3FaH#zUcPPidZXcc;?L&!=XI-J8`ko|NjD0PJKzmF_nrb|%X=`0}5?GpOGi2$dB$ zD{#!SVH4uq|MF6H;0QnzI#@|JWX&)vAA;~;Kj|FEbC)vHta!q$)?wg?0=O$-e@JaNMaN9#X57rCR=^J#Q)e@NZ7$ByTVpuq=k$Kqo(sZ7+P>KvBl9?c7 zT}rTjjQPj2RqL3ms}9awj;sY0>j36GC*0y2Qz>uo=qV>3Q9U}k?aM$Ru<|FK7xgUo z(V~LUO#X=o3wGwu;z=5;W;Hq@@X%ZYDPWtuBS6x;VIIBVk8wpA?f#hCpuuGi!a7|M zYcfT88ViaVDmcjpTm))X`68v6E6hipUZ8Pnbp5@38whGJIIC2Cz0`e7#VpfA8(()W${FOGcEQ>^jHS%hTcG_xrBrr9Mq~ zVSmma+c%L`#Par~-(#%5aPXS~aPxdqFA@H}dJK^a`*WuU6V7Aw65PdEG-zpLAr%RX zijkTfAKkoqkOa#PYep~!y|-rMT=UhWWlI!bktoUIHrLk|8B}^dJ?AX2&!KybQ;dtk z!*TQw%inCC#l1Z*8PvfNy#gqNbDWlv^L9NM3O7pUMH|bAuWx4qoEE)Brc4~KxDO)r zhsgk({$YSovb~)`=v|696_k!TXGQeC^RrJ6#2itz`xMsw@MNy^IoWFa zWYit;EkuN)TH744m!px!jX^-On_>4ggq@>Nr(Z3Aq>z|N&~y3`Yoi{P$7g7iWq72i zlQlXJ@u@+T@_hO}IMK}Zs`BW~Ve7}ot=EnR@8lbWglWt-p5v;{@T`p3md>OJy;qzNVay}S=m zO@jgg|0RVXOX)6+A^&Zr|5@KNzt)%O_xf^1S{(695YaC;%xr*|YxvUVDgW?_XHSs# z6i3q8BZ27G9{x4a@Cs0h^=R@d$3c&ht74p!Q#&hc`*#eh4@WnmYyjnw^P&`Iwt>xv{;*7vk~PXAbVm^Zry4O zV5-dL)t}b(e3NL)Ev+U0=Tvi|PIXG6z|3z3!@EW{Cs%j_w85izZp@CmFY1CUB*j^v zcu|7fvOvk=3trFie9X?F$Pk%fF7Tln6w6TI^YuR2_47gO74TXVsBToQa1WO|r@7s+ zu2I^~1`K)&U#kNoOID3RSOaeRv#Ov|Q6?dfg0Ala$%rqT_-kF@=%RFNuQALiY-jJj zpzV76zd>L3SI~F;9rP8*gEd2#H#gOqe-`VPJB?+*lK!1d+HuU5L1z+W&N|oi0SuBp z9E^Ki-6vkb>g#ibxljpkf!}~LT?7u1wp}vFd3Gl7U8Obr22Z3aWb0q~VlAh}uXjA> zj7Pocd6-iO1m#(@*LF8=xhQDj_93w{a}GF;A7Im)MRnLs@a)V83djA)Q9^}183y3t zEN=||y~0ooKOnvVM$L2@0~%6C@+=yut4J7fAblD+FSfr{3$)>yPWFZ1W?ftsy}J6b zx3Ft{>CrtMboqN6OlFPukdB+vm%xUWZuIOO!0qz`gx+UlBnfR5_}99PtUml2sS?|8 z5mo%r&fk9$ewHdadv~#3tv+Zdf0NP##?4>L$IsFtQQ)kbWUmX-=2>mcB>rIg{5mB5 zD4tLM1TmNd{6$NFgR;(wYjFu()Nzvv^l4eJvm%u}CxZweslhkY)61@k5u{kA(l-`R}>g zUK}%vX*nSLVEq=#XcC3{ZjnG`>r&b8x3nKwO$rCRZ%!XmKoCv^a~JC{yv zzf@;B&x%R;zlmVOuS78D&qPp{b@XUDf@c=2>*0F3y>~-0$@xWuwv;@hnQ2dxuEV93 zy49lRCQy49bj$`Bec7rR=##`Lh-8F-6ThUZX;8t7vCq6JOLhVO$I${Ub8n*;38@AQ z9!z1bhelwqwiM3KiUTTicD^rKyZHb6>M5iDI?1Ykp5(7CvcF7%HZ^1ddmB2P3=_q#mdXWMei`ttg!gJ8r?kuChq{= zD-enu#A<-z@q+x#H-a*S-dre8LP*jG#m~rjSJ*W3><};082pbrjxV%m`nyXR!HDl~ z22)Q2cct?2?o#GDIUg5c2R~&MBnorDkkO52sGWyGD$|ao>#cvqquXnsv_!-tFaq>| zJ3~dX@9zx9F6&PQ8vr67#VB|NM9C29TPAW%^OU<_(c&)iLXS+usr>MTAOZ~ z(rZILT#18&wBq-bfuxGjAS{U|ciESM@xCn7FeJ1=m(G}$&&MCRTrnB=kFFM~!zGO( zaD^IRN?w5J=q3#F_HAnx2_PY zgv5v$Q<fL?s#K1zz*|Dcp(({XCKGM$grWoh&cV&{GTkD zV*GyoDnhCMzI^{TQ2p2k?$-gpH`~xxa zEQ-OlVS{Q|{?$GuQ6#?8D0=I^g97YN7^+p_60{-w{R0W~uOqmL>d|*U@f@KG*3xh(DehE< z(yYILEnGCS+l`wwc*mL;`NaL9kV>4RWGr{?#)kzOyYXs|JRcN7to1P!>kvyGa|x)J z55l-n6}+?>r=`6Qi*{EAHDY698wakYpqVTqm&s@%tSdaX&K}nVbB=4XfD15UT%qp2UZ{%YdJ%IMt4KL6%f3-SMCw=D> znX(G%T<>gqHoC%mQhu%T@pAW zh3H{#C4eTPIgDiy2zcG;dLBB*7W$n>*+%=sJ8N$OcY%Bix%p9@Qz&RW29mvZ(#+MO z{-VO!*T6=^QE{L4-?@h{($AgcIgoDtC3t>AqF1hy7j0`<@3Y=#4|hp6c|KEE?S9$X zHOPMKORCUD@GXur^f50@sv#zt?hLBv@Pc=xJ{w3=**Ad*Cex%4#0IuN8?UhhSSboU zMFmJ(t#5?^1il2A-o?<=zk=izUM{EN$Cu%JTaf_0%~-!K6`>Gw|2NLvU$F951W$46 zWrhUkg}nAO+2fTOl9k^8_f{SSL>h(vHcsk0$9}75UkRT(1au1JN)(wU4Hije>~s z#t*hFq`Aq!oIHVY!IOKTh?|v7eZbC*6PK`9R_cbJ0RK)w&8Z`C>Atx3&*=

    D6yt4bsA^a#OHIE+u-qCB3DRKit=SQ_pMC2t1kjECy8Nki4fn=_dOfeFAAmMBsiPv_e6Z4wGU_}MUn_UP! zLJbpwjxopj4j|%kH?fFkiLK^xvQnZ!3poFpp%QllUbh?Xg6OTM!mo&y0!DyKPUGwq zu@9mz8DP!mB@go9sx?)s6sU*yGp;|C^a5@?MnD95%KoQw06RkHe3=5E$-hg9Kn+=T zlmC3S%f%>}gUrHilr*kk+bX6kWU08ce`6wCoVX%1ZH7r;fpioYsthx>4tRQB%HHJ5 zU1x5w0&G}Z4odCj*#UGp@!BROJTk8!x4V55HBANpF80F8y?I^1O}zmmlz&24w<{R)xN_&U;X)yD9_O9XKHfwD>VVy=J_}ZU=F1uRj$TkeW915 z&YOkbmnZ`|4eW0%C}tSSjuQhX5B}-;4%*hnOK;*#*AviaV2+#ywT@n_-I1Q*kbX%) zR3luI#SKTSFbqsOM4O$aJ=6Duup#f=7G_rjAo{8=DTg05iXz)#W5g%p8!ICV=%h5^ z8}|Y9G;8?*RJ;zJpu`s^nT-)fZV4@qLMYo~{X#-RVsL3HzK0liV8f??rKOt?LK2HG zms}NZek%uI_2dVN9$UcU&Wi@fpHgryq8(dgF~)W$XfZ5~yE0rr`T31@VPVnJNwpv! z)}UVEEK{`WtRVd?lZ2V;;gh}FY0#{7EWn6)BS?|tTb*Knwu)fzs5fY*i(~f;5@A!B z;`4Lrsnrz4c;)zbdA&hN;Tf#M69-2%89pHr*(cy$d57^+;I%NXoyTTFk`&BJ#GJ-& z5dJ;?{Ec+^87EKvvT^td$Zr$ReyO;SMc*a!JE-538K=a)jF&4Ex_aUB=$(WCaCA-l z+g1PQKL4EwWtN$U&oDrcM$^V+Wcf0SlsEo&xlLe$m{09h#1b4z@%fo9JGOM2j>{53 z6CoT>02vrn4Pe47nMZ)!h0@?x^ zaNa(^N}K{+$zJ+92MuYA{^fr3&(jk|y6gKhqN4sMqPi_^V~#5!QO?4Eox4I{nCW%C zkj!q^b$xBH)bK@fHeCpp0Cyvng_e{{&NF;cJo5q!4=pJ7fW@6N5K??~A6{e(Z? zG6G#Ul=Kfl=f-=vQu5W~qX0quKy{)hzl4S}%Y<3%mTTNEh3RzDrv%HyNv{opUSd1K9#c0f1Y5kcC+voFJM%_73VDQQsCf zs81ymaFql_lws%I#iwe`o*>r9vt?8rWh}NJ(P!*^&MN-MDd;QU58T4b9Y$%#VYHTD z{5I5!sfRDUOL^HmSYz6@lNFtihF4}NS?}0FDpOH&R3q> zWUrPG>$;8mGTuUvv3_QpFo-5WHN`|n+$E!Zoi3Npjv*`{)Dk^H5zTU7?5p<;CKiY$ zjyE>YNb0%cN>9?$~%&x+bU=5ugwyqLh-U4kIt;YUEcGHONxIs8>JGL4}|mT)Xy! z{JAJ-+JH5mWi?(Z7RRhbk9#s&pphkK^Is7(J<)a6eC_&#$L_%LCL4#wUqnh1Y3*T{ z&zO<%O3WNgD(~UGxzqjFkcYPPpN1F|@DbV%WJ<%-z^4a3E9CPm0BF*RZA!oOaxm(B zdpU?|dw>h19%1z~V9kHQWutd^b%tn(s_h2j94702ElUD)m2Yb}tAKUK(T(kwq=5v} zmEagxO-TXj+XjLa{DqXiu~wVFS+o|1poR5z+`&7~GbrNBA~c<6ryVCOp9U9fT8c4* zC6Lv_xst9Wm~Eh@VCthK;fuBTza;y&=#mt$+^$#v+4wR;{g~w$!A?_$l^kKP-QUv_5}bDSOi~?Y8*Fa2^ppAK^1zJW(dlI}s#B`8dmgfkI&Ed09AwJ;p87^LTA{|)jjghjRCsXYb~v+H1g{vqzCa@0jo#bN<0 z9r@?+k?!b(!KxW#!083QgagRU9(?q9kJfTAlnaqYM7FWRq|)U@K*1VYKxU&vsXXr) zK`vCtFMRl(dOX2KLFvvexK|+RXXZ2;k{5y2Iu+p!e>*OA%@3k;1sulg7J!*cZuvcs zWA$@c1#}f?)s?WMH-gQq->NN!H8OvOSJ&cw(@XgvdCKq$n78>^Rt-T}M@wL+9`g* zQNU{EZ%pc6iW6Gq8COxVgmR`wHbVuhP>?@<#MLCT!y1D!tTf@(CmZk!yK3{X|E#?X^6xiQ=NO`xjBMLtr!m{Byd?FG& zlSxAZm6jp497yvI;RvaT@{{QNyOi?f1<q!s@0f@)Z z^$jAhU4fiK3RL1DjEaWk5&>?tl#d?qTU4;V-4BXKfAwjzjY#JmsAl90?z|fA`5`ha zaRMAnmEKS`PGXeny%oOEb2^>U{+|ROybAmctlki;ROad%i&i%e&nXET}XfiE{@yIIvpfrf?{u9b|hE_&36 z#c1dmf~qx)=Q=eCYS<|AG@GE>ge_(0-#}@S0D2#2-7J!Nz^e|FDfbxsq=KB(g~bJ0 zSr8?whE^NQh-q+1>MP&30I7^=0H>?$ML$H@@BQ11C1|8D#(wCcbPydh|796|aH!%P zaW>VUz}$|A6R~IXVG~G0P3zMMq|jOJ=!$Pn0MTU*@h7N=YN`{SIAuy4 z<4Z;K5!@hEfR&NlAnQ5~y!8d}OuFzEVLCTg9jpJ`Q8j<=C%+vP;F#6IPbLgv;5`jZ z4w)aD=;?NW^fBgH^VrsJ3gAOUSn=UN(&lWTe^S~9wbVHcGa8Znt_0j(2PrUs_Ag-W#x$}JEj+2`qzZ_vrYS!u50rja3c1KS}lq^bBT$#tp<;bE}%`v5PPW9`J1AUF!yxpw+Ny~aKV>n{6) z?3BSKu!{5f6$b=UNbDGaM@a$2r89YAKZlM2s6bE^>_?4!GY`VD;7WUUp_BQ0twu`kxmwoiA$Og zxI0I2Iq!*@J$A<~eUMBLWw2V@&Wp)^Dq(mX+6$xht}{5IifGZeinsgkG(S@Fp^F=> zB@nHrPzLG)&MLqfm1pBEaTluBx7P1+2ZtO%JP3)q05VK5Ca$lG`IP7g<@&v5di@A0 ztX6;_Q=5i3{247h`H6o3SGd3N50H4xSF+@!$aNF-%!LT=B8_KEJ@ki6k;jq0qa@!# z3reE`DJXa3hX#wUO3LrDOp2;1yP%{JS`}SVzo%rs-j{$H_&t(FXB`>6$J2h}o|#m$ zpanbn7mPN$PClHZu4{r`EqZH5TaGzeY*qZ@pTZW=9%wi3xiq?lh})+A?J@d$vMh`g zEQ%E9UA1k}ZLu}F1YI1w|7Dy=`LitiSn)q% z0}26ug^xA$dbihpl)o3lPD#TTW4ix8&~ggz#|Rp+jE%(PUH^fgs9hvHG#P|lXjr~< zOxllJPf8y5gYp9}Yd(XJfE4DG|Hj)XyuTp4GF9u?xMlSozw)zqgM3L1BAJ86f!7iL zKK%ZIQYi!gIv^u=nsWpmp@Vq-Q=~%h4TrZO1b+@a zYuSG^^m&`~zdS~-qn|`m-hoXmGihu6o!W zF0FLce4EJ7>W?b5I})5i`Iji?*D6APhM4j+A-|?P5bNTK>5z?R0U6p%&3?son}f_V ziB7ndTR!{NKUD0WRolP5JWl-yk^s-hziWnoIu1))%{|n}aE&d)_kR1l>qpHQ+HZ*m7_MeoClx4f$(NbOA=b`Vag?EP>Y8d{=n(11{ zetuV1WooKdPBGMIj5T%P==Osri8}zx>)ZW68_5z;-mr(4N9H)4$R!a@DnzCnr-G`nTt~@cwQlI+*KCH)7$ zdG{6>3&&m|4^_g zu)w<1n?0Vz_g5f)ffN{Ng9gIw8UnHYTdj957TY8;)aHD;^YYpRL}FKTToqarM`i@? zQxi9m)Xq)M?M*a8ta2aOC*L10dp^*NlJ7=5vY=u*T<6UTPXxt2p4S#%&tNxP8%@bR z=xsa*>(i-}EnM4E3zZa2&iR$Vh^#z?|b$sX4PqW$O5x6d@5YB-9%cM5R8nkCrV+%$YyH6_-om zr|XwHeRdeJGzejM7lqk>(MYjND80^NI;z2CAybKy+Hv>Y8;}dE%;xuO=#;k}+ zn~A8xgS*w+CGm^<+n#c&`mz1=1UrNt(0d{?1J>P%slDal-;$~Pflv9H(Z%m8Gci+c zM~PYNZJw@}>8O`HjKc{IGEJvB)5)Lh5;=UBWG|mMeV9D>8bh~ULScaS8YqIZ>c#8m zV;bLEP8}4ZTF1IUbqq*10JIjymYa(5uR6;lf(93^r^b;kP$t;Dpzs zUPi5}PT$$*e`aCntjE+uh@LcJ{X8y}h%sJ>umGJ$dvf%pQ6Hn_l-Ew=VppsZ@AP-k z5CXc7(JhQ>4YF@BOXRK1$0bVvf9T8A%>DR-^!}vl5mICDWH;qhf}YrSiA4>Jk=JX# zobPlfE)igUV#5RG3SwBJ;zX$ohi1y?hRxFt%dXU@yqI6Y-qWHQa2SLI;E5~Xq`cWd z^Rt|(kf3n%F9T^zCrZVo+&87zy$r*F?zejv+o9A_g(!VNO|Fi^WuNVLsY=cJJ;&=T zRdZpj2A;3CWta<@h(-&T10%@db(UFF_miOasl$-fz5;7epOgJ=>~G9*H@z!W3TNm! zyk0KyIx_IP6j?bp459h;eC0Um|3qDC)S*+Vd*b?Gf91z(B|LlP=o2>o0blP++oeus z9;7Rlm-Pw>Q*7VOoOX#=By;BdjkYg+y^WS!^1t5&n9(vvtMI>=(NPR|869xP9L0ANl1P(BS3D}zZj`E)Da%zDB#r(Y1qleXHk?7wog74WGM60 zGE}jtz}ek7X*!Zj%wvWeHlA6QOzw7$Me3&~itD(XU$3U5C~pSJEgGy1W*L?ohE8|V z?ntqV_AR;20fVNO#Zev?d8&cAJ<(TChKHd9ial@#zL+e4=3H+oQeEMoDr8RGxm0C8 zu68>71T8!^tE;H1o0uh9DnaE{`-IdRA}Q;}il*2P9YRMHVO;YM4dKNNOX{7?7Loob_j`t2fUR z!Yw~Cq1kJphTY*$cigx2qzLk4(<{cCgI&xX$Dc5K*6LTFoQWcBth1aFISEK!I&%`2 ztPl=Ds`c0(bdP=T?b1K+B~D6h70MSXu7Vgn?RL2oX1TD`d3z{*sJ-WA^J?=Ov{3f2 zie`dj6hEYqu&$xff$N^!(hZ(o!+g*t?@tj=szL6yTjRH)=Zq<({@Z{Xn zBrqO1FY}>9_fgIz_DPX5pD31ZN01eH0Jkhzzm8ww2Y3^Ou&_igC@%u4T-b-Q4y+e| z2f;f($G2FRn81M1$gD|Mb#U)tV8nf;j!fDRAINo_@_;J1(hAjy9+B8#1@QQ|v29L+ zV_&DuO(SDXP9Z&NQ5sG?{`_=}Ng8+tllm=e)L@}^TUcYH&lNiGs^;t_6Iv&6`*WkQ z&P7Z4ODQf4$(1*oeA$G*62kRz6JQ%)v<01777Azo&=r!q!IQ2d%zAe_ zF7|zEx@1&!a?x~i;Fk5Ngp3L!<13sACJ1Egbi8*)*-mW>)AtFK?K`>XSIL5t3Vi~DvCY`G%I)tdAF$5cxk{_bQVpYxZUSTs*A}_tQ+9+Zn zsAc7~>Bf3KU9U>E28Hg?iWmhc#WWs2bxCNtK?Lv2=3XqwDH{^>eBYeN+`6arpqicx z7qxcARVy-ZuHIX%NS5kjgNk)JFc_X0RzI~jEl9+5ZfV%*d;Ic?e*GSM!&T+RJMPrN z*B1gUixF*kho8*#?1#DUe17fizRr7~aCUlDz;)0!T7(_z%9*z=@}buLZQaG%Q7Ic8 zPcia^#hOb8ws%G!&+v&sM+EBphw0?zvMPhTM`kZ-Zu7hCK-<{u{bZkPCz<`YtmzpR zs0Pi^_y&6W>vH8JBs+vlw5|Rp00tpFn-4p{ZCDV5QBJXMHxSG*U9n=CAN> zj?{HO=H=d5GeqhE%tb=Fr-#VLu1(x2Q|;L;9B8agV}7D?aFMvoxmsL4l=g`#H!jyF z$gEZyM}q}yITF-H{3_j-H?y!*@9k(vha+mq+<7I@V8?9zsBreqOKUn+X))X^8`H`GjeX<&xt-CTJP z)_!`qS_#tt0Rh#&0)ikv5D-Y3WO!%+u4aFdz$;x45GYIvw)X6bY%7(Z+k`p$teoqh z(5Za+xHqd&>ry+Et*+hn+G287LD|OWB~sdk-HjS`gxl=1PCn9O9BrTYBq#B6@3HFx z)b}*lVPX~bQ+0%7Mf>pu*sYZ1v$@#`ZCH0 zh;-A=LoL|<+djIcQt_mY8f9JVW*)WxWK^iw&(Qa z0pFUy-eGa`DyDBON*q)tb6o_#K6ihXg2k4E{jPF;3^2FF;0ZYGc%~psgR)p?i9YnE0@J*f@cp!?MAK8_-Y^5QK zG%xz{mFl{}Y@tNQ^L^n>nO5?!wsKn6y&<}Xnqef)5qKvQ%-Um_s^4w*Bu~AA4wqUx zkb@1L-SwM_x+56f-*_gVOrW~7(dY)*Tk0xUpFUi*V~3py4TJs4YV6*QZrzC<#403b z0x3^5!uJy#>xm+E)Kev)0C#s2dgzZ6~XUoga&8r+2wVegtq6)>Y7pmG%R{%^rp1IkIZ~*pO*yT$6ouE+HdVOVq-Zn zZrl$j*u6<*{d4t~y07{-UzRObymBWKGKL)v=MFavG-P+d70VsoBn=mSSsQiSuWw{= z>dwAyJU%;ZG@)RUa`dFqx_V`wC%fz1Z^j6vOr0%v-q_BHcy8MDo0nkN)y}6Y*=%7I zBL;C*eiyeoTQ$n!=2^qR%k=%sx54Cs7e2f2&CsH@4t1tnT_wv)pZH0P)8j6dtx1n> ztQ(F)%fdo;!w46SV%+7tUa+n|{33SkIx?;>;)Pc9jcz$^1=J0@$prRtUsmIdu!E0+ z4Q*Jp!8|Ts!+zCdZi#oiOvTxX&(d~5xoTT1yCeHVV)3zO28~yr$Z&iI z9!jf@m(=&D3`6U#dp%)OJ%**y<&U1 zelz`EYI{y*?CY4c*rW!_ieZ(ltr4Bc@DRA`6kEcLir0*Ktm`SgyAjO$D!SZy?MXu% z=NZGUiq~hAFs}$x0PDw9v*%AU{qMLDcx z@AygG{s_4j(ALCVL{Z-*ZmgP3d9r$S`Gx^@Vq)%Ni0QTO%KU$2^n4YfvWZ`uaTQo7 zDZ5;dZ(mH%u=#=g2@TGu_S&3z~D%ScFU8Y_i1>Pdf zBG8r6-YeM0e^eBtLXk&2agGwopy^p;=bpEA{@7}gmXlbfuv7W=qYJiYOscHV%5Spp zW^0KQh3)>E-mt8;%`MCMu9$gSa!L-$9j3~e4?Iq%ug_zvv2&FjSUTOOZv zIS4kHFyAGU__eM&CEn;hCeNth3)C&&+H_gDNh>eOn$SM$JD?dK`$%WMLQ5TLS61ZU zAE+F*f+|v2(uWFL2)RWkPb)^7aNjrF$!eS*+EbRrkiDQvXy*22Dq_u?K6^fq`Nn92 zFDbMVjstPw^E|bvy(~nXa?!hXuW)nFXu{J=lz?&FaazsEGm-Ymi06AVY1RC2c);ZS z5r6OQo#3kRUKhEV{pc%xPCK88BS=KvYq`b&?niQTPMTA?kFip_9hBReNE z_!zL@_`;4?FURS}Z^58y{dXcl;AfloL?r_leJyUcoctr3tDZPFhFj(N?UcB0)A%aX zGf&oX6p(Al3cBv~npi`#exxv~-p~7ff_~$hCIjB#GLd*&j5vII4fMClJh*{*HhvOB zej3;_70|SLV_@AH<$G7A0MEq;Vb_X%{zv=1^!|irlZIfd7BR$eLPPEgJn`6H+S0R0 z->JQOv-#Aw6LzP&Qt|syj>C#wOv~Gdii?2u!p!CuQ>bw;gWNP#XsC*eThf-T;g(=? zY(|&v3|n#$*XI8GI^3L6r>WREmWg(9vQMl`}M$bud~6ik5232Q~iE_ zMWNz(o^@e)Y~HBoaD$LjzW}Zlbo0nO|E;dF|10I!j|%*ovQ?HY?UikO6$*YR2NgJ8 zOyNM20#jv3#Qi&zA5QBukljo-b@R-{t3xuq60~Oa8V%<)wu^e`P$+c(jTz)7SQtO( z%DQA-fn}w7s8AXn(wOV?NS~%`g`k^G_?0`3zdaf+KO-yLQ6| zYM*i9e*re~VYlsjFp<+x%)XNJbQo>I_IF0hCzTZHkh}txW|hH{=Mx4)M@0oVS`sw>JiL>lO45{#)U`YK>MTNji>vNkL#cGlMQCoVzI|7V(n0~6;lr}^%3L3XL+;|y zE_mou;|{GT;7|7xgl&zu(zxw94L82i*0_5-TztBjKt7`!CY2WyBja5IS2<*= z)-NY+B%g8Gi_?8f*UW~5M)Bqv;zig2%(uLB$k_^B*hmAzx_aFa3;!ltTieo+a*}u0@LAu%+RLhN!re!qo(qcvvih7- zX{@=QcQs^jd3VF)=lo+uTZh-q!S1E{^* zvBP$&EvB2`99^~3G||_$24^>m@PZ4gt$91xE1i{?$=*gX<&c3L0ec}v<#&~+uVj|9#+p|5XspUIWhx!W=j1STDpH7KUwGb^ zCY+P9GTv!VA6!7Pkh@s2b{vV!V>nYKBT^b$jyAY|I%n>>+JCuDen zKz-=J{b9jOCowk+!N%ilE(-)?k?W=Mo*m_d+zfZgq|p!h?lb{34>uZh^qOy6Zztzk zHI@@{>jGu`bv>Q-C98CDz3RBYG#d?@;nz1PY8-7Wn21n%8jGQsvqXaL7=}S6aF%F3 z?}?vDtK2n=Pt0=B!c-EvyixAG0LVPtLbv^!30nuu>1+L_wqtWZ($^+DR`{hjzJ7Ovk0L9k_0QGyOnr;h+8Unzu3-B;~kXeh@u;$kC<1eoDd6WWtVrSjmYk;la9oHYgY- zty)+aucKpib#$^)Y28uj=)Pjz*5+Y9Eh5KQ-jLo+-+VUeAD>C@r(U@yaOP0HJqyJV zEL}(&{*XoHX5x-s?tvSvgo(keBGsS&a^1SsEnlOE!c# zS}w^RrrI4Q94*A>me#AVp2pRcYsN0E2v&Y2tgq?i5=V(_s5l~X{O(Dr>0s?Iieapb z)ltRzIhURoo1u8)us%S?IO58sWV;)Jp?Ym4v3U^n>X^uFt)j{@5S9qNK45eeNdBFU zes8!vnVn^CoqPF_S52+QTvbegWn!}Cmovlih_$qbJugryZ%tPDI`FLvPZSx{KG~EC zC~HVJWX!cRMmc`IB zgfy&1*!Hqu*IL^k<#2~%;AoM_d@$GlFkFSED#%l!!PHs-#&(a(|Es#o`juMRj?Ho% z4eYS$a_%zm$Hq|M(xqhD=p$bR0hPg-h*L>(DAMe<*ZbRE;%}|He#FA(I!Gqht~AfQ zu^WJGgtQCRuY>g3b!y^o@OThZPVBd=u740F^cgUu%@rW13T`I%$27x`kRyGa0UArE zRxhld-%D(IO5+yv z8-$~w9uePbjIEUUo|5Rc)nIVVDbC=QoH96aed;uysVVou*1HnUJ?HzBwy;iik6=Ic`(L=U zYwpazR|RwWcPW0`^N`cWFK^+7y>jy;))Kgry`lT~Zb&Y>(+opw$eu2b z*|flF;<863P5O&mnNkEFV?#NA0S=1VV7l}Vnir`HD$$wA={-x1^3Vzxg;N;0ryO&=B0MIKmAeg~dg0=3mG4&((@^=Teb4CeIi zpCOJ7Il}KS#QbIYdmgCj=JzzEYMckigfYv>HK>Svbxr;;tnO(PSlnZKq;qnHQTuw5 ziHBi^$j;L-2;Py*ZCMT*{A5|pcIDlt=?9|5NqNCr1#B)pY3U=I{0<+wDOPzmm>l^(+ zI^^@6SG|SuqVz6l4M#`XP73}r!Id#j>~=&)8!Ol4Csta$bLJ{i7!HooM^#43M|Xya zuEq+s4mC(u-3vCSFUC@p;fg6DzcQ{d9~3n4GJz8?&?)5Ns{P1dtVvYTanMLib>J+KY6eA{?m>Hr3t2 zty*X973#x0MUT~$MYlNga2%Sa7*DWayH)PNxxbbdY1cgajfhflH=ArAS4QjRm8d=X zF$W7{&Ims~dwBV%iNnI}sBfNbm1VHT$sw3^ZcnHKx?@9w=>_55d!$Wki6+Qd5p2^Z zFM}bC2|o?@@Y~Y1%6M`E19U~nEzQ$1zNm)j&H+!km@veRGfv<00M#%3*7RsN|7+`s zFKc(I@Q(de<3`r+Z&YPpvo10Ltt5?44Vp=;8x2^*;)hoZ5=4u-&L5oRc9k$EZo95> zPsZUu%@(>E>l9)A=@%|?>#oI|Q&t+aUYB-#8If#8Ye!0*$1d2v7L9NVE(O?h`0@U; zYG_qhUE9%^vDy0z=~U= zXQ5bQ@7j%w_P3r7jeang{7>|Y2!ScU(PrU=gBUfTu)-8{dF<@&r!rT)w?msQ6&UW7t6T{j zp!_Sk{wu^@0&!#P+0bDba=L@SjXE!1Jla>R>ZI>)S!zu9yd15GXH+{#I1HgTBftD6 z>v&WlBICAKqWBo3Ga6QjjQlOE}Nwv>o5R=nJZ-b{EX-bTJ;dP)hR=W}4b>I9BY7aY-9N zd7r9Ivx0s5w;%Q~2;8YyE8|mDt}26Z4NA#*8W1 z_gwJqb^ZQd!2()W10Y~H&Bu@`8sY+^psd^3;fV8PJuKetk5A$Xf3yd`DWi-@ydOEe zP5v_uasw{Vcl%n_ACAEHIto~y{#ua#xlJZb;igrtSMahdK)zi0tJr$HZnKWE*~fA_Y){OiOg{d1W9y3=>UVBRBV+Xw%C zLH|62aYDrQch1WD3nTj1MbLVKr;0AWhWD@U{(3Z}Ov0=QuoFG|e}3Ta@8prdQ^ouE z~>H7R1JoHHq From 11923c985a245f33017f1eefb0e85e8519d117e5 Mon Sep 17 00:00:00 2001 From: louisjoecodes Date: Thu, 17 Oct 2024 14:26:10 +0100 Subject: [PATCH 09/45] chore: remove piece on namespace --- README.md | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/README.md b/README.md index 3e0e5805..e00eb6a5 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ play(audio)

    Play - 🎧 **Try it out!** Want to hear our voices in action? Visit the [ElevenLabs Voice Lab](https://elevenlabs.io/voice-lab) to experiment with different voices, languages, and settings. +🎧 **Try it out!** Want to hear our voices in action? Visit the [ElevenLabs Voice Lab](https://elevenlabs.io/voice-lab) to experiment with different voices, languages, and settings.
    @@ -199,12 +199,6 @@ async def print_models() -> None: asyncio.run(print_models()) ``` -## Elevenlabs Namespace - -All of the ElevenLabs models are nested within the elevenlabs module. - - - ## Languages Supported We support 32 languages and 100+ accents. Explore [all languages](https://elevenlabs.io/languages). From 5a2c536514dbddfcd2434ca0d867d0b61a7059ab Mon Sep 17 00:00:00 2001 From: fern <126544928+fern-bot@users.noreply.github.com> Date: Mon, 21 Oct 2024 19:58:10 -0400 Subject: [PATCH 10/45] chore: update .fernignore with latest additions (#383) --- .fernignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.fernignore b/.fernignore index 1fc3a34a..0594fff5 100644 --- a/.fernignore +++ b/.fernignore @@ -11,3 +11,6 @@ README.md assets/ tests/ + +.github/ISSUE_TEMPLATE/ +.github/SECURITY.md From 9a45a3f2fbf86c1b9cdb3d0837afdcdd5f41839d Mon Sep 17 00:00:00 2001 From: Laco Date: Fri, 25 Oct 2024 15:06:21 +0200 Subject: [PATCH 11/45] Implement basic SDK for convai agents (#389) * Implement basic SDK for convai agents Early prototype, subject to change based on user feedback. Takes care of the websocket session and message handling, exposing a simplified audio interface to the client that can be hooked up to the appropriate audio inputs / outputs based on the usecase. Also implements a basic speaker/microphone interface, via optional dependency on pyaudio. * Move to `conversational_ai/` and split default_audio_interface * Review fixes --- .fernignore | 1 + poetry.lock | 39 +++- pyproject.toml | 7 + .../conversational_ai/conversation.py | 215 ++++++++++++++++++ .../default_audio_interface.py | 83 +++++++ 5 files changed, 344 insertions(+), 1 deletion(-) create mode 100644 src/elevenlabs/conversational_ai/conversation.py create mode 100644 src/elevenlabs/conversational_ai/default_audio_interface.py diff --git a/.fernignore b/.fernignore index 0594fff5..1a50c353 100644 --- a/.fernignore +++ b/.fernignore @@ -1,6 +1,7 @@ # Specify files that shouldn't be modified by Fern src/elevenlabs/client.py +src/elevenlabs/conversation.py src/elevenlabs/play.py src/elevenlabs/realtime_tts.py diff --git a/poetry.lock b/poetry.lock index 9df66569..5b236f6e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -351,6 +351,29 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pyaudio" +version = "0.2.14" +description = "Cross-platform audio I/O with PortAudio" +optional = true +python-versions = "*" +files = [ + {file = "PyAudio-0.2.14-cp310-cp310-win32.whl", hash = "sha256:126065b5e82a1c03ba16e7c0404d8f54e17368836e7d2d92427358ad44fefe61"}, + {file = "PyAudio-0.2.14-cp310-cp310-win_amd64.whl", hash = "sha256:2a166fc88d435a2779810dd2678354adc33499e9d4d7f937f28b20cc55893e83"}, + {file = "PyAudio-0.2.14-cp311-cp311-win32.whl", hash = "sha256:506b32a595f8693811682ab4b127602d404df7dfc453b499c91a80d0f7bad289"}, + {file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"}, + {file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"}, + {file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"}, + {file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"}, + {file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"}, + {file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"}, + {file = "PyAudio-0.2.14-cp39-cp39-win_amd64.whl", hash = "sha256:009f357ee5aa6bc8eb19d69921cd30e98c42cddd34210615d592a71d09c4bd57"}, + {file = "PyAudio-0.2.14.tar.gz", hash = "sha256:78dfff3879b4994d1f4fc6485646a57755c6ee3c19647a491f790a0895bd2f87"}, +] + +[package.extras] +test = ["numpy"] + [[package]] name = "pydantic" version = "2.9.2" @@ -610,6 +633,17 @@ files = [ {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] +[[package]] +name = "types-pyaudio" +version = "0.2.16.20240516" +description = "Typing stubs for pyaudio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-pyaudio-0.2.16.20240516.tar.gz", hash = "sha256:f1c419ccc78b00d26c6c1ae4fcb17f7e4f08af2c2b9b73b12fcbc4a4ffa3a2c7"}, + {file = "types_pyaudio-0.2.16.20240516-py3-none-any.whl", hash = "sha256:40063f13ae15a422cbd4a2a783653eb3e1091bdd23fc7ab8ca3abc21ad0d13f8"}, +] + [[package]] name = "types-python-dateutil" version = "2.9.0.20241003" @@ -744,7 +778,10 @@ files = [ {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] +[extras] +pyaudio = ["pyaudio"] + [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "a53420244251981fe047bbb97d6005fffb6b63447718cc640562750fffcc8c75" +content-hash = "af57dd0aacaa752d61d29db9f958f2d8d0950d51ab868c925a2a973689de5ff7" diff --git a/pyproject.toml b/pyproject.toml index 416dde7a..162e0135 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,14 +40,21 @@ requests = ">=2.20" typing_extensions = ">= 4.0.0" websockets = ">=11.0" +# Optional extras. +pyaudio = { version = ">=0.2.14", optional = true } + [tool.poetry.dev-dependencies] mypy = "1.0.1" pytest = "^7.4.0" pytest-asyncio = "^0.23.5" python-dateutil = "^2.9.0" +types-pyaudio = "^0.2.16.20240516" types-python-dateutil = "^2.9.0.20240316" ruff = "^0.5.6" +[tool.poetry.extras] +pyaudio = ["pyaudio"] + [tool.pytest.ini_options] testpaths = [ "tests" ] asyncio_mode = "auto" diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py new file mode 100644 index 00000000..13533ab8 --- /dev/null +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -0,0 +1,215 @@ +from abc import ABC, abstractmethod +import base64 +import json +import threading +from typing import Callable, Optional + +from websockets.sync.client import connect + +from ..base_client import BaseElevenLabs + + +class AudioInterface(ABC): + """AudioInterface provides an abstraction for handling audio input and output.""" + + @abstractmethod + def start(self, input_callback: Callable[[bytes], None]): + """Starts the audio interface. + + Called one time before the conversation starts. + The `input_callback` should be called regularly with input audio chunks from + the user. The audio should be in 16-bit PCM mono format at 16kHz. Recommended + chunk size is 4000 samples (250 milliseconds). + """ + pass + + @abstractmethod + def stop(self): + """Stops the audio interface. + + Called one time after the conversation ends. Should clean up any resources + used by the audio interface and stop any audio streams. Do not call the + `input_callback` from `start` after this method is called. + """ + pass + + @abstractmethod + def output(self, audio: bytes): + """Output audio to the user. + + The `audio` input is in 16-bit PCM mono format at 16kHz. Implementations can + choose to do additional buffering. This method should return quickly and not + block the calling thread. + """ + pass + + @abstractmethod + def interrupt(self): + """Interruption signal to stop any audio output. + + User has interrupted the agent and all previosly buffered audio output should + be stopped. + """ + pass + + +class Conversation: + client: BaseElevenLabs + agent_id: str + requires_auth: bool + + audio_interface: AudioInterface + callback_agent_response: Optional[Callable[[str], None]] + callback_agent_response_correction: Optional[Callable[[str, str], None]] + callback_user_transcript: Optional[Callable[[str], None]] + callback_latency_measurement: Optional[Callable[[int], None]] + + _thread: Optional[threading.Thread] = None + _should_stop: threading.Event = threading.Event() + _conversation_id: Optional[str] = None + _last_interrupt_id: int = 0 + + def __init__( + self, + client: BaseElevenLabs, + agent_id: str, + *, + requires_auth: bool, + audio_interface: AudioInterface, + callback_agent_response: Optional[Callable[[str], None]] = None, + callback_agent_response_correction: Optional[Callable[[str, str], None]] = None, + callback_user_transcript: Optional[Callable[[str], None]] = None, + callback_latency_measurement: Optional[Callable[[int], None]] = None, + ): + """Conversational AI session. + + BETA: This API is subject to change without regard to backwards compatibility. + + Args: + client: The ElevenLabs client to use for the conversation. + agent_id: The ID of the agent to converse with. + requires_auth: Whether the agent requires authentication. + audio_interface: The audio interface to use for input and output. + callback_agent_response: Callback for agent responses. + callback_agent_response_correction: Callback for agent response corrections. + First argument is the original response (previously given to + callback_agent_response), second argument is the corrected response. + callback_user_transcript: Callback for user transcripts. + callback_latency_measurement: Callback for latency measurements (in milliseconds). + """ + + self.client = client + self.agent_id = agent_id + self.requires_auth = requires_auth + + self.audio_interface = audio_interface + self.callback_agent_response = callback_agent_response + self.callback_agent_response_correction = callback_agent_response_correction + self.callback_user_transcript = callback_user_transcript + self.callback_latency_measurement = callback_latency_measurement + + def start_session(self): + """Starts the conversation session. + + Will run in background thread until `end_session` is called. + """ + ws_url = self._get_signed_url() if self.requires_auth else self._get_wss_url() + self._thread = threading.Thread(target=self._run, args=(ws_url,)) + self._thread.start() + + def end_session(self): + """Ends the conversation session.""" + self.audio_interface.stop() + self._should_stop.set() + + def wait_for_session_end(self) -> Optional[str]: + """Waits for the conversation session to end. + + You must call `end_session` before calling this method, otherwise it will block. + + Returns the conversation ID, if available. + """ + if not self._thread: + raise RuntimeError("Session not started.") + self._thread.join() + return self._conversation_id + + def _run(self, ws_url: str): + with connect(ws_url) as ws: + + def input_callback(audio): + ws.send( + json.dumps( + { + "user_audio_chunk": base64.b64encode(audio).decode(), + } + ) + ) + + self.audio_interface.start(input_callback) + while not self._should_stop.is_set(): + try: + message = json.loads(ws.recv(timeout=0.5)) + if self._should_stop.is_set(): + return + self._handle_message(message, ws) + except TimeoutError: + pass + + def _handle_message(self, message, ws): + if message["type"] == "conversation_initiation_metadata": + event = message["conversation_initiation_metadata_event"] + assert self._conversation_id is None + self._conversation_id = event["conversation_id"] + elif message["type"] == "audio": + event = message["audio_event"] + if int(event["event_id"]) <= self._last_interrupt_id: + return + audio = base64.b64decode(event["audio_base_64"]) + self.audio_interface.output(audio) + elif message["type"] == "agent_response": + if self.callback_agent_response: + event = message["agent_response_event"] + self.callback_agent_response(event["agent_response"].strip()) + elif message["type"] == "agent_response_correction": + if self.callback_agent_response_correction: + event = message["agent_response_correction_event"] + self.callback_agent_response_correction( + event["original_agent_response"].strip(), event["corrected_agent_response"].strip() + ) + elif message["type"] == "user_transcript": + if self.callback_user_transcript: + event = message["user_transcription_event"] + self.callback_user_transcript(event["user_transcript"].strip()) + elif message["type"] == "interruption": + event = message["interruption_event"] + self.last_interrupt_id = int(event["event_id"]) + self.audio_interface.interrupt() + elif message["type"] == "ping": + event = message["ping_event"] + ws.send( + json.dumps( + { + "type": "pong", + "event_id": event["event_id"], + } + ) + ) + if self.callback_latency_measurement and event["ping_ms"]: + self.callback_latency_measurement(int(event["ping_ms"])) + else: + pass # Ignore all other message types. + + def _get_wss_url(https://github.com/self): + base_url = self.client._client_wrapper._base_url + # Replace http(s) with ws(s). + base_ws_url = base_url.replace("http", "ws", 1) # First occurrence only. + return f"{base_ws_url}/v1/convai/conversation?agent_id={self.agent_id}" + + def _get_signed_url(https://github.com/self): + # TODO: Use generated SDK method once available. + response = self.client._client_wrapper.httpx_client.request( + f"v1/convai/conversation/get_signed_url?agent_id={self.agent_id}", + method="GET", + ) + return response.json()["signed_url"] diff --git a/src/elevenlabs/conversational_ai/default_audio_interface.py b/src/elevenlabs/conversational_ai/default_audio_interface.py new file mode 100644 index 00000000..b1660d85 --- /dev/null +++ b/src/elevenlabs/conversational_ai/default_audio_interface.py @@ -0,0 +1,83 @@ +from typing import Callable +import queue +import threading + +from .conversation import AudioInterface + + +class DefaultAudioInterface(AudioInterface): + INPUT_FRAMES_PER_BUFFER = 4000 # 250ms @ 16kHz + OUTPUT_FRAMES_PER_BUFFER = 1000 # 62.5ms @ 16kHz + + def __init__(self): + try: + import pyaudio + except ImportError: + raise ImportError("To use DefaultAudioInterface you must install pyaudio.") + self.pyaudio = pyaudio + + def start(self, input_callback: Callable[[bytes], None]): + # Audio input is using callbacks from pyaudio which we simply pass through. + self.input_callback = input_callback + + # Audio output is buffered so we can handle interruptions. + # Start a separate thread to handle writing to the output stream. + self.output_queue: queue.Queue[bytes] = queue.Queue() + self.should_stop = threading.Event() + self.output_thread = threading.Thread(target=self._output_thread) + + self.p = self.pyaudio.PyAudio() + self.in_stream = self.p.open( + format=self.pyaudio.paInt16, + channels=1, + rate=16000, + input=True, + stream_callback=self._in_callback, + frames_per_buffer=self.INPUT_FRAMES_PER_BUFFER, + start=True, + ) + self.out_stream = self.p.open( + format=self.pyaudio.paInt16, + channels=1, + rate=16000, + output=True, + frames_per_buffer=self.OUTPUT_FRAMES_PER_BUFFER, + start=True, + ) + + self.output_thread.start() + + def stop(self): + self.should_stop.set() + self.output_thread.join() + self.in_stream.stop_stream() + self.in_stream.close() + self.out_stream.close() + self.p.terminate() + + def output(self, audio: bytes): + self.output_queue.put(audio) + + def interrupt(self): + # Clear the output queue to stop any audio that is currently playing. + # Note: We can't atomically clear the whole queue, but we are doing + # it from the message handling thread so no new audio will be added + # while we are clearing. + try: + while True: + _ = self.output_queue.get(block=False) + except queue.Empty: + pass + + def _output_thread(self): + while not self.should_stop.is_set(): + try: + audio = self.output_queue.get(timeout=0.25) + self.out_stream.write(audio) + except queue.Empty: + pass + + def _in_callback(self, in_data, frame_count, time_info, status): + if self.input_callback: + self.input_callback(in_data) + return (None, self.pyaudio.paContinue) From 04fd89ff3cb5bd8dfeeb92f41ce020f3a6d4ce92 Mon Sep 17 00:00:00 2001 From: Laco Date: Fri, 25 Oct 2024 15:14:11 +0200 Subject: [PATCH 12/45] Bump version to 1.11.0 (#390) Side note, would be nice to inject package version at build time from git tag, and for the SDK version header to read it from package itself. --- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 162e0135..95b99063 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.10.0" +version = "1.11.0" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 633805d0..ff9aa13a 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.10.0", + "X-Fern-SDK-Version": "1.11.0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From 0383d9a2cfa6e546e7e60e099e31d7ea053465b2 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 21:01:43 +0000 Subject: [PATCH 13/45] SDK regeneration --- poetry.lock | 41 +- pyproject.toml | 9 +- reference.md | 1095 ++++++++++++----- src/elevenlabs/__init__.py | 80 +- src/elevenlabs/audio_isolation/client.py | 20 +- src/elevenlabs/base_client.py | 12 +- .../conversational_ai/conversation.py | 215 ---- .../default_audio_interface.py | 83 -- src/elevenlabs/core/client_wrapper.py | 2 +- src/elevenlabs/core/request_options.py | 3 + src/elevenlabs/dubbing/client.py | 30 +- src/elevenlabs/history/client.py | 10 +- src/elevenlabs/projects/__init__.py | 3 + src/elevenlabs/projects/client.py | 254 +++- src/elevenlabs/projects/types/__init__.py | 5 + .../projects_add_request_target_audience.py | 7 + src/elevenlabs/samples/client.py | 10 +- src/elevenlabs/speech_to_speech/client.py | 40 +- .../text_to_sound_effects/client.py | 10 +- src/elevenlabs/text_to_speech/__init__.py | 14 +- src/elevenlabs/text_to_speech/client.py | 128 +- .../text_to_speech/types/__init__.py | 20 +- ...id_stream_post_apply_text_normalization.py | 7 + ...imestamps_post_apply_text_normalization.py | 7 + ..._voice_id_post_apply_text_normalization.py | 7 + ...imestamps_post_apply_text_normalization.py | 7 + .../text_to_speech/types/send_message.py | 8 - src/elevenlabs/text_to_voice/__init__.py | 2 + src/elevenlabs/text_to_voice/client.py | 354 ++++++ src/elevenlabs/types/__init__.py | 62 +- ...ction.py => add_chapter_response_model.py} | 10 +- .../types/add_voice_ivc_response_model.py | 20 + src/elevenlabs/types/audio_output.py | 37 - src/elevenlabs/types/breakdown_types.py | 7 + src/elevenlabs/types/category.py | 5 - src/elevenlabs/types/chapter_response.py | 8 +- ...ed_subscription_response_model_currency.py | 5 + src/elevenlabs/types/fine_tuning_response.py | 2 + src/elevenlabs/types/generation_config.py | 40 - src/elevenlabs/types/initialize_connection.py | 38 - .../types/library_voice_response.py | 3 +- .../library_voice_response_model_category.py | 7 + src/elevenlabs/types/model.py | 4 + .../types/model_rates_response_model.py | 19 + .../model_response_model_concurrency_group.py | 5 + src/elevenlabs/types/normalized_alignment.py | 44 - .../types/project_extended_response_model.py | 24 +- ...ct_extended_response_model_access_level.py | 5 + ..._extended_response_model_quality_preset.py | 7 + ...extended_response_model_target_audience.py | 7 + src/elevenlabs/types/project_response.py | 21 +- .../project_response_model_access_level.py | 5 + .../project_response_model_target_audience.py | 7 + .../project_snapshot_upload_response_model.py | 4 +- ...t_snapshot_upload_response_model_status.py | 7 + ...ation_dictionary_version_response_model.py | 24 + .../types/realtime_voice_settings.py | 37 - src/elevenlabs/types/send_text.py | 34 - src/elevenlabs/types/source.py | 5 - .../types/speech_history_item_response.py | 4 +- ...eech_history_item_response_model_source.py | 5 + src/elevenlabs/types/status.py | 5 - src/elevenlabs/types/subscription.py | 4 +- src/elevenlabs/types/subscription_response.py | 4 +- .../subscription_response_model_currency.py | 5 + src/elevenlabs/types/voice.py | 6 +- .../types/voice_preview_response_model.py | 21 + .../types/voice_previews_response_model.py | 20 + .../types/voice_response_model_category.py | 7 + .../voice_response_model_safety_control.py | 3 +- ...sharing_moderation_check_response_model.py | 27 + .../types/voice_sharing_response.py | 6 +- .../voice_sharing_response_model_category.py | 7 + src/elevenlabs/usage/__init__.py | 3 - src/elevenlabs/usage/client.py | 16 +- src/elevenlabs/usage/types/__init__.py | 7 - ...rs_usage_metrics_request_breakdown_type.py | 7 - src/elevenlabs/voice_generation/client.py | 20 +- src/elevenlabs/voices/client.py | 51 +- src/elevenlabs/workspace/client.py | 127 ++ 80 files changed, 2260 insertions(+), 1081 deletions(-) delete mode 100644 src/elevenlabs/conversational_ai/conversation.py delete mode 100644 src/elevenlabs/conversational_ai/default_audio_interface.py create mode 100644 src/elevenlabs/projects/types/__init__.py create mode 100644 src/elevenlabs/projects/types/projects_add_request_target_audience.py create mode 100644 src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization.py create mode 100644 src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization.py create mode 100644 src/elevenlabs/text_to_speech/types/body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization.py create mode 100644 src/elevenlabs/text_to_speech/types/body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization.py delete mode 100644 src/elevenlabs/text_to_speech/types/send_message.py create mode 100644 src/elevenlabs/text_to_voice/__init__.py create mode 100644 src/elevenlabs/text_to_voice/client.py rename src/elevenlabs/types/{close_connection.py => add_chapter_response_model.py} (76%) create mode 100644 src/elevenlabs/types/add_voice_ivc_response_model.py delete mode 100644 src/elevenlabs/types/audio_output.py create mode 100644 src/elevenlabs/types/breakdown_types.py delete mode 100644 src/elevenlabs/types/category.py create mode 100644 src/elevenlabs/types/extended_subscription_response_model_currency.py delete mode 100644 src/elevenlabs/types/generation_config.py delete mode 100644 src/elevenlabs/types/initialize_connection.py create mode 100644 src/elevenlabs/types/library_voice_response_model_category.py create mode 100644 src/elevenlabs/types/model_rates_response_model.py create mode 100644 src/elevenlabs/types/model_response_model_concurrency_group.py delete mode 100644 src/elevenlabs/types/normalized_alignment.py create mode 100644 src/elevenlabs/types/project_extended_response_model_access_level.py create mode 100644 src/elevenlabs/types/project_extended_response_model_quality_preset.py create mode 100644 src/elevenlabs/types/project_extended_response_model_target_audience.py create mode 100644 src/elevenlabs/types/project_response_model_access_level.py create mode 100644 src/elevenlabs/types/project_response_model_target_audience.py create mode 100644 src/elevenlabs/types/project_snapshot_upload_response_model_status.py create mode 100644 src/elevenlabs/types/pronunciation_dictionary_version_response_model.py delete mode 100644 src/elevenlabs/types/realtime_voice_settings.py delete mode 100644 src/elevenlabs/types/send_text.py delete mode 100644 src/elevenlabs/types/source.py create mode 100644 src/elevenlabs/types/speech_history_item_response_model_source.py delete mode 100644 src/elevenlabs/types/status.py create mode 100644 src/elevenlabs/types/subscription_response_model_currency.py create mode 100644 src/elevenlabs/types/voice_preview_response_model.py create mode 100644 src/elevenlabs/types/voice_previews_response_model.py create mode 100644 src/elevenlabs/types/voice_response_model_category.py create mode 100644 src/elevenlabs/types/voice_sharing_moderation_check_response_model.py create mode 100644 src/elevenlabs/types/voice_sharing_response_model_category.py delete mode 100644 src/elevenlabs/usage/types/__init__.py delete mode 100644 src/elevenlabs/usage/types/usage_get_characters_usage_metrics_request_breakdown_type.py diff --git a/poetry.lock b/poetry.lock index 5b236f6e..46e79034 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "annotated-types" @@ -351,29 +351,6 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] -[[package]] -name = "pyaudio" -version = "0.2.14" -description = "Cross-platform audio I/O with PortAudio" -optional = true -python-versions = "*" -files = [ - {file = "PyAudio-0.2.14-cp310-cp310-win32.whl", hash = "sha256:126065b5e82a1c03ba16e7c0404d8f54e17368836e7d2d92427358ad44fefe61"}, - {file = "PyAudio-0.2.14-cp310-cp310-win_amd64.whl", hash = "sha256:2a166fc88d435a2779810dd2678354adc33499e9d4d7f937f28b20cc55893e83"}, - {file = "PyAudio-0.2.14-cp311-cp311-win32.whl", hash = "sha256:506b32a595f8693811682ab4b127602d404df7dfc453b499c91a80d0f7bad289"}, - {file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"}, - {file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"}, - {file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"}, - {file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"}, - {file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"}, - {file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"}, - {file = "PyAudio-0.2.14-cp39-cp39-win_amd64.whl", hash = "sha256:009f357ee5aa6bc8eb19d69921cd30e98c42cddd34210615d592a71d09c4bd57"}, - {file = "PyAudio-0.2.14.tar.gz", hash = "sha256:78dfff3879b4994d1f4fc6485646a57755c6ee3c19647a491f790a0895bd2f87"}, -] - -[package.extras] -test = ["numpy"] - [[package]] name = "pydantic" version = "2.9.2" @@ -633,17 +610,6 @@ files = [ {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] -[[package]] -name = "types-pyaudio" -version = "0.2.16.20240516" -description = "Typing stubs for pyaudio" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-pyaudio-0.2.16.20240516.tar.gz", hash = "sha256:f1c419ccc78b00d26c6c1ae4fcb17f7e4f08af2c2b9b73b12fcbc4a4ffa3a2c7"}, - {file = "types_pyaudio-0.2.16.20240516-py3-none-any.whl", hash = "sha256:40063f13ae15a422cbd4a2a783653eb3e1091bdd23fc7ab8ca3abc21ad0d13f8"}, -] - [[package]] name = "types-python-dateutil" version = "2.9.0.20241003" @@ -778,10 +744,7 @@ files = [ {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] -[extras] -pyaudio = ["pyaudio"] - [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "af57dd0aacaa752d61d29db9f958f2d8d0950d51ab868c925a2a973689de5ff7" +content-hash = "a53420244251981fe047bbb97d6005fffb6b63447718cc640562750fffcc8c75" diff --git a/pyproject.toml b/pyproject.toml index 95b99063..60127203 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.11.0" +version = "1.12.0-beta0" description = "" readme = "README.md" authors = [] @@ -40,21 +40,14 @@ requests = ">=2.20" typing_extensions = ">= 4.0.0" websockets = ">=11.0" -# Optional extras. -pyaudio = { version = ">=0.2.14", optional = true } - [tool.poetry.dev-dependencies] mypy = "1.0.1" pytest = "^7.4.0" pytest-asyncio = "^0.23.5" python-dateutil = "^2.9.0" -types-pyaudio = "^0.2.16.20240516" types-python-dateutil = "^2.9.0.20240316" ruff = "^0.5.6" -[tool.poetry.extras] -pyaudio = ["pyaudio"] - [tool.pytest.ini_options] testpaths = [ "tests" ] asyncio_mode = "auto" diff --git a/reference.md b/reference.md index 682c52cc..3fa8ba09 100644 --- a/reference.md +++ b/reference.md @@ -285,7 +285,7 @@ client.history.get_audio(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -452,7 +452,7 @@ client.text_to_sound_effects.convert(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -523,7 +523,7 @@ core.File` — See core.File for more documentation
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -593,7 +593,7 @@ core.File` — See core.File for more documentation
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -752,7 +752,7 @@ client.samples.get_audio(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -935,7 +935,23 @@ client.text_to_speech.convert(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + +
    +
    + +
    +
    + +**apply_text_normalization:** `typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -1110,6 +1126,24 @@ client.text_to_speech.convert_with_timestamps(
    +**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + +
    +
    + +
    +
    + +**apply_text_normalization:** `typing.Optional[ + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization +]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -1292,7 +1326,25 @@ client.text_to_speech.convert_as_stream(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + +
    +
    + +
    +
    + +**apply_text_normalization:** `typing.Optional[ + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization +]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -1467,6 +1519,24 @@ client.text_to_speech.stream_with_timestamps(
    +**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + +
    +
    + +
    +
    + +**apply_text_normalization:** `typing.Optional[ + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization +]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -1599,7 +1669,15 @@ core.File` — See core.File for more documentation
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -1743,7 +1821,15 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -1910,7 +1996,7 @@ client.voice_generation.generate(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -1998,6 +2084,14 @@ client.voice_generation.create_a_previously_generated_voice(
    +**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF. + +
    +
    + +
    +
    + **labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
    @@ -2018,8 +2112,8 @@ client.voice_generation.create_a_previously_generated_voice(
    -## User -
    client.user.get_subscription() +## TextToVoice +
    client.text_to_voice.create_previews(...)
    @@ -2031,7 +2125,7 @@ client.voice_generation.create_a_previously_generated_voice(
    -Gets extended information about the users subscription +Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
    @@ -2051,7 +2145,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.user.get_subscription() +client.text_to_voice.create_previews( + voice_description="voice_description", + text="text", +) ``` @@ -2067,6 +2164,22 @@ client.user.get_subscription()
    +**voice_description:** `str` — Description to use for the created voice. + +
    +
    + +
    +
    + +**text:** `str` — Text to generate, text length has to be between 100 and 1000. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2079,7 +2192,7 @@ client.user.get_subscription()
    -
    client.user.get() +
    client.text_to_voice.create_voice_from_preview(...)
    @@ -2091,7 +2204,7 @@ client.user.get_subscription()
    -Gets information about the user +Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
    @@ -2111,7 +2224,11 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.user.get() +client.text_to_voice.create_voice_from_preview( + voice_name="voice_name", + voice_description="voice_description", + generated_voice_id="generated_voice_id", +) ``` @@ -2127,6 +2244,46 @@ client.user.get()
    +**voice_name:** `str` — Name to use for the created voice. + +
    +
    + +
    +
    + +**voice_description:** `str` — Description to use for the created voice. + +
    +
    + +
    +
    + +**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + +
    +
    + +
    +
    + +**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None. + +
    +
    + +
    +
    + +**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2139,8 +2296,8 @@ client.user.get()
    -## voices -
    client.voices.get_all(...) +## User +
    client.user.get_subscription()
    @@ -2152,7 +2309,7 @@ client.user.get()
    -Gets a list of all available voices for a user. +Gets extended information about the users subscription
    @@ -2172,7 +2329,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_all() +client.user.get_subscription() ``` @@ -2188,14 +2345,6 @@ client.voices.get_all()
    -**show_legacy:** `typing.Optional[bool]` — If set to true, legacy premade voices will be included in responses from /v1/voices - -
    -
    - -
    -
    - **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2208,7 +2357,7 @@ client.voices.get_all()
    -
    client.voices.get_default_settings() +
    client.user.get()
    @@ -2220,7 +2369,7 @@ client.voices.get_all()
    -Gets the default settings for voices. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +Gets information about the user
    @@ -2240,7 +2389,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_default_settings() +client.user.get() ``` @@ -2268,7 +2417,8 @@ client.voices.get_default_settings()
    -
    client.voices.get_settings(...) +## voices +
    client.voices.get_all(...)
    @@ -2280,7 +2430,7 @@ client.voices.get_default_settings()
    -Returns the settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +Gets a list of all available voices for a user.
    @@ -2300,9 +2450,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_settings( - voice_id="2EiwWnXFnvU5JabPnv8n", -) +client.voices.get_all() ``` @@ -2318,7 +2466,7 @@ client.voices.get_settings(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. +**show_legacy:** `typing.Optional[bool]` — If set to true, legacy premade voices will be included in responses from /v1/voices
    @@ -2338,7 +2486,7 @@ client.voices.get_settings(
    -
    client.voices.get(...) +
    client.voices.get_default_settings()
    @@ -2350,7 +2498,7 @@ client.voices.get_settings(
    -Returns metadata about a specific voice. +Gets the default settings for voices. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
    @@ -2370,9 +2518,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get( - voice_id="29vD33N1CtxCmqQRPOHJ", -) +client.voices.get_default_settings() ``` @@ -2388,22 +2534,6 @@ client.voices.get(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. - -
    -
    - -
    -
    - -**with_settings:** `typing.Optional[bool]` — If set will return settings information corresponding to the voice, requires authorization. - -
    -
    - -
    -
    - **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2416,7 +2546,7 @@ client.voices.get(
    -
    client.voices.delete(...) +
    client.voices.get_settings(...)
    @@ -2428,7 +2558,7 @@ client.voices.get(
    -Deletes a voice by its ID. +Returns the settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
    @@ -2448,8 +2578,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.delete( - voice_id="29vD33N1CtxCmqQRPOHJ", +client.voices.get_settings( + voice_id="2EiwWnXFnvU5JabPnv8n", ) ``` @@ -2486,7 +2616,7 @@ client.voices.delete(
    -
    client.voices.edit_settings(...) +
    client.voices.get(...)
    @@ -2498,7 +2628,7 @@ client.voices.delete(
    -Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +Returns metadata about a specific voice.
    @@ -2513,18 +2643,13 @@ Edit your settings for a specific voice. "similarity_boost" corresponds to"Clari
    ```python -from elevenlabs import ElevenLabs, VoiceSettings +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.edit_settings( +client.voices.get( voice_id="29vD33N1CtxCmqQRPOHJ", - request=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, - ), ) ``` @@ -2549,7 +2674,7 @@ client.voices.edit_settings(
    -**request:** `VoiceSettings` +**with_settings:** `typing.Optional[bool]` — If set will return settings information corresponding to the voice, requires authorization.
    @@ -2569,7 +2694,7 @@ client.voices.edit_settings(
    -
    client.voices.add(...) +
    client.voices.delete(...)
    @@ -2581,7 +2706,7 @@ client.voices.edit_settings(
    -Add a new voice to your collection of voices in VoiceLab. +Deletes a voice by its ID.
    @@ -2601,8 +2726,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.add( - name="Alex", +client.voices.delete( + voice_id="29vD33N1CtxCmqQRPOHJ", ) ``` @@ -2619,7 +2744,7 @@ client.voices.add(
    -**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
    @@ -2627,26 +2752,187 @@ client.voices.add(
    -**files:** `from __future__ import annotations - -typing.List[core.File]` — See core.File for more documentation +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    + +
    -
    -
    -**description:** `typing.Optional[str]` — How would you describe the voice? -
    +
    +
    client.voices.edit_settings(...)
    -**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice. - +#### 📝 Description + +
    +
    + +
    +
    + +Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs, VoiceSettings + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.voices.edit_settings( + voice_id="29vD33N1CtxCmqQRPOHJ", + request=VoiceSettings( + stability=0.1, + similarity_boost=0.3, + style=0.2, + ), +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. + +
    +
    + +
    +
    + +**request:** `VoiceSettings` + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.voices.add(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Add a new voice to your collection of voices in VoiceLab. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.voices.add( + name="Alex", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. + +
    +
    + +
    +
    + +**files:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation + +
    +
    + +
    +
    + +**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + +
    +
    + +
    +
    + +**description:** `typing.Optional[str]` — How would you describe the voice? + +
    +
    + +
    +
    + +**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice. +
    @@ -2742,6 +3028,14 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
    +**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + +
    +
    + +
    +
    + **description:** `typing.Optional[str]` — How would you describe the voice?
    @@ -2846,14 +3140,6 @@ client.voices.add_sharing_voice(
    -**xi_app_check_token:** `typing.Optional[str]` — Your app check token. - -
    -
    - -
    -
    - **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -3366,9 +3652,9 @@ typing.Optional[core.File]` — See core.File for more documentation Output quality of the generated audio. Must be one of: standard - standard output format, 128kbps with 44.1kHz sample rate. -high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the character cost by 20%. -ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the character cost by 50%. -ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the character cost by 100%. +high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%. +ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%. +ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%.
    @@ -3393,6 +3679,62 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
    +**description:** `typing.Optional[str]` — An optional description of the project. + +
    +
    + +
    +
    + +**genres:** `typing.Optional[typing.List[str]]` — An optional list of genres associated with the project. + +
    +
    + +
    +
    + +**target_audience:** `typing.Optional[ProjectsAddRequestTargetAudience]` — An optional target audience of the project. + +
    +
    + +
    +
    + +**language:** `typing.Optional[str]` — An optional language of the project. Two-letter language code (ISO 639-1). + +
    +
    + +
    +
    + +**content_type:** `typing.Optional[str]` — An optional content type of the project. + +
    +
    + +
    +
    + +**original_publication_date:** `typing.Optional[str]` — An optional original publication date of the project, in the format YYYY-MM-DD or YYYY. + +
    +
    + +
    +
    + +**mature_content:** `typing.Optional[bool]` — An optional mature content of the project. + +
    +
    + +
    +
    + **isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
    @@ -3922,7 +4264,7 @@ client.projects.stream_audio(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -4013,7 +4355,7 @@ client.projects.stream_archive(
    -
    client.projects.update_pronunciation_dictionaries(...) +
    client.projects.add_chapter_to_a_project(...)
    @@ -4025,7 +4367,7 @@ client.projects.stream_archive(
    -Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does. +Creates a new chapter either as blank or from a URL.
    @@ -4040,19 +4382,14 @@ Updates the set of pronunciation dictionaries acting on a project. This will aut
    ```python -from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.update_pronunciation_dictionaries( +client.projects.add_chapter_to_a_project( project_id="21m00Tcm4TlvDq8ikWAM", - pronunciation_dictionary_locators=[ - PronunciationDictionaryVersionLocator( - pronunciation_dictionary_id="pronunciation_dictionary_id", - version_id="version_id", - ) - ], + name="name", ) ``` @@ -4077,7 +4414,15 @@ client.projects.update_pronunciation_dictionaries(
    -**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. +**name:** `str` — The name of the chapter, used for identification only. + +
    +
    + +
    +
    + +**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
    @@ -4097,8 +4442,7 @@ client.projects.update_pronunciation_dictionaries(
    -## Chapters -
    client.chapters.get_all(...) +
    client.projects.update_pronunciation_dictionaries(...)
    @@ -4110,7 +4454,7 @@ client.projects.update_pronunciation_dictionaries(
    -Returns a list of your chapters for a project together and its metadata. +Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
    @@ -4125,7 +4469,92 @@ Returns a list of your chapters for a project together and its metadata.
    ```python -from elevenlabs import ElevenLabs +from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.update_pronunciation_dictionaries( + project_id="21m00Tcm4TlvDq8ikWAM", + pronunciation_dictionary_locators=[ + PronunciationDictionaryVersionLocator( + pronunciation_dictionary_id="pronunciation_dictionary_id", + version_id="version_id", + ) + ], +) + +``` +
    +
    + +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + + +
    + + +## Chapters +
    client.chapters.get_all(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Returns a list of your chapters for a project together and its metadata. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", @@ -4713,6 +5142,22 @@ typing.Optional[core.File]` — See core.File for more documentation
    +**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. + +
    +
    + +
    +
    + +**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]' + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -4932,7 +5377,7 @@ client.dubbing.get_dubbed_file(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
    @@ -5033,25 +5478,11 @@ client.dubbing.get_transcript_for_dub(
    -## Models -
    client.models.get_all() -
    -
    - -#### 📝 Description - -
    -
    - +## Workspace +
    client.workspace.get_sso_provider_admin(...)
    -Gets a list of available models. -
    -
    -
    -
    - #### 🔌 Usage
    @@ -5066,7 +5497,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.models.get_all() +client.workspace.get_sso_provider_admin( + workspace_id="workspace_id", +) ```
    @@ -5082,6 +5515,14 @@ client.models.get_all()
    +**workspace_id:** `str` + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -5094,8 +5535,7 @@ client.models.get_all()
    -## AudioNative -
    client.audio_native.create(...) +
    client.workspace.invite_user(...)
    @@ -5107,7 +5547,7 @@ client.models.get_all()
    -Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet. +Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
    @@ -5127,8 +5567,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.audio_native.create( - name="name", +client.workspace.invite_user( + email="email", ) ``` @@ -5145,7 +5585,7 @@ client.audio_native.create(
    -**name:** `str` — Project name. +**email:** `str` — Email of the target user.
    @@ -5153,89 +5593,69 @@ client.audio_native.create(
    -**image:** `typing.Optional[str]` — Image URL used in the player. If not provided, default image set in the Player settings is used. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    - -
    -
    - -**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used. -
    -
    -
    -**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used. -
    +
    +
    client.workspace.delete_existing_invitation(...)
    -**small:** `typing.Optional[bool]` — Whether to use small player or not. If not provided, default value set in the Player settings is used. - -
    -
    +#### 📝 Description
    -**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used. - -
    -
    -
    -**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used. - +Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
    - -
    -
    - -**sessionization:** `typing.Optional[int]` — Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used. -
    +#### 🔌 Usage +
    -**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used. - -
    -
    -
    -**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used. - +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.workspace.delete_existing_invitation( + email="email", +) + +``` +
    +
    +#### ⚙️ Parameters +
    -**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation - -
    -
    -
    -**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not. +**email:** `str` — Email of the target user.
    @@ -5255,8 +5675,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -## Usage -
    client.usage.get_characters_usage_metrics(...) +
    client.workspace.update_member(...)
    @@ -5268,7 +5687,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -Returns the characters usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis. +Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
    @@ -5288,9 +5707,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.usage.get_characters_usage_metrics( - start_unix=1, - end_unix=1, +client.workspace.update_member( + email="email", ) ``` @@ -5307,7 +5725,7 @@ client.usage.get_characters_usage_metrics(
    -**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day. +**email:** `str` — Email of the target user.
    @@ -5315,7 +5733,7 @@ client.usage.get_characters_usage_metrics(
    -**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day. +**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account.
    @@ -5323,7 +5741,7 @@ client.usage.get_characters_usage_metrics(
    -**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace. +**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace.
    @@ -5331,10 +5749,63 @@ client.usage.get_characters_usage_metrics(
    -**breakdown_type:** `typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    + +
    + + + + +
    + +## Models +
    client.models.get_all() +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Gets a list of available models. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.models.get_all() + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    @@ -5351,8 +5822,8 @@ client.usage.get_characters_usage_metrics(
    -## PronunciationDictionary -
    client.pronunciation_dictionary.add_from_file(...) +## AudioNative +
    client.audio_native.create(...)
    @@ -5364,7 +5835,7 @@ client.usage.get_characters_usage_metrics(
    -Creates a new pronunciation dictionary from a lexicon .PLS file +Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet.
    @@ -5384,7 +5855,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.add_from_file( +client.audio_native.create( name="name", ) @@ -5402,7 +5873,7 @@ client.pronunciation_dictionary.add_from_file(
    -**name:** `str` — The name of the pronunciation dictionary, used for identification only. +**name:** `str` — Project name.
    @@ -5410,9 +5881,7 @@ client.pronunciation_dictionary.add_from_file(
    -**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**image:** `typing.Optional[str]` — Image URL used in the player. If not provided, default image set in the Player settings is used.
    @@ -5420,7 +5889,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only. +**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
    @@ -5428,7 +5897,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'editor' or 'viewer'. If not provided, defaults to no access. +**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
    @@ -5436,79 +5905,57 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**small:** `typing.Optional[bool]` — Whether to use small player or not. If not provided, default value set in the Player settings is used.
    - -
    +
    +
    +**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used. +
    -
    -
    client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...)
    -#### 📝 Description - -
    -
    +**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used. + +
    +
    -Add rules to the pronunciation dictionary -
    -
    +**sessionization:** `typing.Optional[int]` — Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used. +
    -#### 🔌 Usage -
    +**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used. + +
    +
    +
    -```python -from elevenlabs import ElevenLabs -from elevenlabs.pronunciation_dictionary import ( - PronunciationDictionaryRule_Phoneme, -) - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( - pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", - rules=[ - PronunciationDictionaryRule_Phoneme( - string_to_replace="rules", - phoneme="rules", - alphabet="rules", - ) - ], -) - -``` -
    -
    +**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used. + -#### ⚙️ Parameters -
    -
    -
    +**file:** `from __future__ import annotations -**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary +typing.Optional[core.File]` — See core.File for more documentation
    @@ -5516,11 +5963,7 @@ client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
    -**rules:** `typing.Sequence[PronunciationDictionaryRule]` - -List of pronunciation rules. Rule can be either: - an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', } - or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' } +**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
    @@ -5540,7 +5983,8 @@ List of pronunciation rules. Rule can be either:
    -
    client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...) +## Usage +
    client.usage.get_characters_usage_metrics(...)
    @@ -5552,7 +5996,7 @@ List of pronunciation rules. Rule can be either:
    -Remove rules from the pronunciation dictionary +Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
    @@ -5572,9 +6016,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( - pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", - rule_strings=["rule_strings"], +client.usage.get_characters_usage_metrics( + start_unix=1, + end_unix=1, ) ``` @@ -5591,7 +6035,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary +**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day.
    @@ -5599,7 +6043,23 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary. +**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day. + +
    +
    + +
    +
    + +**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace. + +
    +
    + +
    +
    + +**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False.
    @@ -5619,7 +6079,8 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -
    client.pronunciation_dictionary.download(...) +## PronunciationDictionary +
    client.pronunciation_dictionary.add_from_file(...)
    @@ -5631,7 +6092,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -Get PLS file with a pronunciation dictionary version rules +Creates a new pronunciation dictionary from a lexicon .PLS file
    @@ -5651,9 +6112,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.download( - dictionary_id="Fm6AvNgS53NXe6Kqxp3e", - version_id="KZFyRUq3R6kaqhKI146w", +client.pronunciation_dictionary.add_from_file( + name="name", ) ``` @@ -5670,7 +6130,7 @@ client.pronunciation_dictionary.download(
    -**dictionary_id:** `str` — The id of the pronunciation dictionary +**name:** `str` — The name of the pronunciation dictionary, used for identification only.
    @@ -5678,7 +6138,25 @@ client.pronunciation_dictionary.download(
    -**version_id:** `str` — The id of the version of the pronunciation dictionary +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
    +
    + +
    +
    + +**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only. + +
    +
    + +
    +
    + +**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'editor' or 'viewer'. If not provided, defaults to no access.
    @@ -5698,7 +6176,7 @@ client.pronunciation_dictionary.download(
    -
    client.pronunciation_dictionary.get(...) +
    client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...)
    @@ -5710,7 +6188,7 @@ client.pronunciation_dictionary.download(
    -Get metadata for a pronunciation dictionary +Add rules to the pronunciation dictionary
    @@ -5726,12 +6204,22 @@ Get metadata for a pronunciation dictionary ```python from elevenlabs import ElevenLabs +from elevenlabs.pronunciation_dictionary import ( + PronunciationDictionaryRule_Phoneme, +) client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.get( - pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e", +client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( + pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", + rules=[ + PronunciationDictionaryRule_Phoneme( + string_to_replace="rules", + phoneme="rules", + alphabet="rules", + ) + ], ) ``` @@ -5756,6 +6244,18 @@ client.pronunciation_dictionary.get(
    +**rules:** `typing.Sequence[PronunciationDictionaryRule]` + +List of pronunciation rules. Rule can be either: + an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', } + or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' } + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -5768,7 +6268,7 @@ client.pronunciation_dictionary.get(
    -
    client.pronunciation_dictionary.get_all(...) +
    client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...)
    @@ -5780,7 +6280,7 @@ client.pronunciation_dictionary.get(
    -Get a list of the pronunciation dictionaries you have access to and their metadata +Remove rules from the pronunciation dictionary
    @@ -5800,8 +6300,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.get_all( - page_size=1, +client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( + pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", + rule_strings=["rule_strings"], ) ``` @@ -5818,7 +6319,7 @@ client.pronunciation_dictionary.get_all(
    -**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response. +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
    @@ -5826,7 +6327,7 @@ client.pronunciation_dictionary.get_all(
    -**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30. +**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary.
    @@ -5846,8 +6347,7 @@ client.pronunciation_dictionary.get_all(
    -## Workspace -
    client.workspace.invite_user(...) +
    client.pronunciation_dictionary.download(...)
    @@ -5859,7 +6359,7 @@ client.pronunciation_dictionary.get_all(
    -Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. +Get PLS file with a pronunciation dictionary version rules
    @@ -5879,8 +6379,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.invite_user( - email="email", +client.pronunciation_dictionary.download( + dictionary_id="Fm6AvNgS53NXe6Kqxp3e", + version_id="KZFyRUq3R6kaqhKI146w", ) ``` @@ -5897,7 +6398,15 @@ client.workspace.invite_user(
    -**email:** `str` — Email of the target user. +**dictionary_id:** `str` — The id of the pronunciation dictionary + +
    +
    + +
    +
    + +**version_id:** `str` — The id of the version of the pronunciation dictionary
    @@ -5917,7 +6426,7 @@ client.workspace.invite_user(
    -
    client.workspace.delete_existing_invitation(...) +
    client.pronunciation_dictionary.get(...)
    @@ -5929,7 +6438,7 @@ client.workspace.invite_user(
    -Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators. +Get metadata for a pronunciation dictionary
    @@ -5949,8 +6458,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.delete_existing_invitation( - email="email", +client.pronunciation_dictionary.get( + pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e", ) ``` @@ -5967,7 +6476,7 @@ client.workspace.delete_existing_invitation(
    -**email:** `str` — Email of the target user. +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
    @@ -5987,7 +6496,7 @@ client.workspace.delete_existing_invitation(
    -
    client.workspace.update_member(...) +
    client.pronunciation_dictionary.get_all(...)
    @@ -5999,7 +6508,7 @@ client.workspace.delete_existing_invitation(
    -Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators. +Get a list of the pronunciation dictionaries you have access to and their metadata
    @@ -6019,8 +6528,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.update_member( - email="email", +client.pronunciation_dictionary.get_all( + page_size=1, ) ``` @@ -6037,15 +6546,7 @@ client.workspace.update_member(
    -**email:** `str` — Email of the target user. - -
    -
    - -
    -
    - -**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account. +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
    @@ -6053,7 +6554,7 @@ client.workspace.update_member(
    -**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace. +**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
    diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py index 8ca7e4ed..d6225664 100644 --- a/src/elevenlabs/__init__.py +++ b/src/elevenlabs/__init__.py @@ -2,32 +2,32 @@ from .types import ( Accent, + AddChapterResponseModel, AddProjectResponseModel, AddPronunciationDictionaryResponseModel, AddPronunciationDictionaryRulesResponseModel, + AddVoiceIvcResponseModel, AddVoiceResponseModel, Age, AudioNativeCreateProjectResponseModel, AudioNativeGetEmbedCodeResponseModel, - AudioOutput, - Category, + BreakdownTypes, ChapterResponse, ChapterSnapshotResponse, ChapterSnapshotsResponse, ChapterState, ChapterStatisticsResponse, - CloseConnection, Currency, DoDubbingResponse, DubbingMetadataResponse, EditProjectResponseModel, ExtendedSubscriptionResponseModelBillingPeriod, ExtendedSubscriptionResponseModelCharacterRefreshPeriod, + ExtendedSubscriptionResponseModelCurrency, FeedbackItem, FineTuningResponse, FineTuningResponseModelStateValue, Gender, - GenerationConfig, GetChaptersResponse, GetLibraryVoicesResponse, GetProjectsResponse, @@ -40,41 +40,47 @@ HistoryAlignmentsResponseModel, HistoryItem, HttpValidationError, - InitializeConnection, Invoice, LanguageResponse, LibraryVoiceResponse, + LibraryVoiceResponseModelCategory, ManualVerificationFileResponse, ManualVerificationResponse, Model, - NormalizedAlignment, + ModelRatesResponseModel, + ModelResponseModelConcurrencyGroup, OptimizeStreamingLatency, OutputFormat, ProfilePageResponseModel, ProjectExtendedResponseModel, + ProjectExtendedResponseModelAccessLevel, + ProjectExtendedResponseModelQualityPreset, + ProjectExtendedResponseModelTargetAudience, ProjectResponse, + ProjectResponseModelAccessLevel, + ProjectResponseModelTargetAudience, ProjectSnapshotResponse, ProjectSnapshotUploadResponseModel, + ProjectSnapshotUploadResponseModelStatus, ProjectSnapshotsResponse, ProjectState, PronunciationDictionaryAliasRuleRequestModel, PronunciationDictionaryPhonemeRuleRequestModel, PronunciationDictionaryVersionLocator, - RealtimeVoiceSettings, + PronunciationDictionaryVersionResponseModel, RecordingResponse, RemovePronunciationDictionaryRulesResponseModel, ReviewStatus, - SendText, - Source, SpeechHistoryItemResponse, + SpeechHistoryItemResponseModelSource, SpeechHistoryItemResponseModelVoiceCategory, SsoProviderResponseModel, SsoProviderResponseModelProviderType, - Status, Subscription, SubscriptionResponse, SubscriptionResponseModelBillingPeriod, SubscriptionResponseModelCharacterRefreshPeriod, + SubscriptionResponseModelCurrency, SubscriptionStatus, TextToSpeechAsStreamRequest, UsageCharactersResponseModel, @@ -85,10 +91,15 @@ Voice, VoiceGenerationParameterOptionResponse, VoiceGenerationParameterResponse, + VoicePreviewResponseModel, + VoicePreviewsResponseModel, + VoiceResponseModelCategory, VoiceResponseModelSafetyControl, VoiceSample, VoiceSettings, + VoiceSharingModerationCheckResponseModel, VoiceSharingResponse, + VoiceSharingResponseModelCategory, VoiceSharingState, VoiceVerificationResponse, ) @@ -106,6 +117,7 @@ speech_to_speech, text_to_sound_effects, text_to_speech, + text_to_voice, usage, user, voice_generation, @@ -116,36 +128,45 @@ from .dubbing import GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType from .environment import ElevenLabsEnvironment from .play import play, save, stream +from .projects import ProjectsAddRequestTargetAudience from .pronunciation_dictionary import ( PronunciationDictionaryAddFromFileRequestWorkspaceAccess, PronunciationDictionaryRule, PronunciationDictionaryRule_Alias, PronunciationDictionaryRule_Phoneme, ) -from .text_to_speech import SendMessage -from .usage import UsageGetCharactersUsageMetricsRequestBreakdownType +from .text_to_speech import ( + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization, + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, + BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization, + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, +) from .version import __version__ from .workspace import BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole __all__ = [ "Accent", + "AddChapterResponseModel", "AddProjectResponseModel", "AddPronunciationDictionaryResponseModel", "AddPronunciationDictionaryRulesResponseModel", + "AddVoiceIvcResponseModel", "AddVoiceResponseModel", "Age", "AsyncElevenLabs", "AudioNativeCreateProjectResponseModel", "AudioNativeGetEmbedCodeResponseModel", - "AudioOutput", + "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization", + "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization", + "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization", + "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization", "BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole", - "Category", + "BreakdownTypes", "ChapterResponse", "ChapterSnapshotResponse", "ChapterSnapshotsResponse", "ChapterState", "ChapterStatisticsResponse", - "CloseConnection", "Currency", "DoDubbingResponse", "DubbingMetadataResponse", @@ -154,11 +175,11 @@ "ElevenLabsEnvironment", "ExtendedSubscriptionResponseModelBillingPeriod", "ExtendedSubscriptionResponseModelCharacterRefreshPeriod", + "ExtendedSubscriptionResponseModelCurrency", "FeedbackItem", "FineTuningResponse", "FineTuningResponseModelStateValue", "Gender", - "GenerationConfig", "GetChaptersResponse", "GetLibraryVoicesResponse", "GetProjectsResponse", @@ -172,23 +193,31 @@ "HistoryAlignmentsResponseModel", "HistoryItem", "HttpValidationError", - "InitializeConnection", "Invoice", "LanguageResponse", "LibraryVoiceResponse", + "LibraryVoiceResponseModelCategory", "ManualVerificationFileResponse", "ManualVerificationResponse", "Model", - "NormalizedAlignment", + "ModelRatesResponseModel", + "ModelResponseModelConcurrencyGroup", "OptimizeStreamingLatency", "OutputFormat", "ProfilePageResponseModel", "ProjectExtendedResponseModel", + "ProjectExtendedResponseModelAccessLevel", + "ProjectExtendedResponseModelQualityPreset", + "ProjectExtendedResponseModelTargetAudience", "ProjectResponse", + "ProjectResponseModelAccessLevel", + "ProjectResponseModelTargetAudience", "ProjectSnapshotResponse", "ProjectSnapshotUploadResponseModel", + "ProjectSnapshotUploadResponseModelStatus", "ProjectSnapshotsResponse", "ProjectState", + "ProjectsAddRequestTargetAudience", "PronunciationDictionaryAddFromFileRequestWorkspaceAccess", "PronunciationDictionaryAliasRuleRequestModel", "PronunciationDictionaryPhonemeRuleRequestModel", @@ -196,27 +225,24 @@ "PronunciationDictionaryRule_Alias", "PronunciationDictionaryRule_Phoneme", "PronunciationDictionaryVersionLocator", - "RealtimeVoiceSettings", + "PronunciationDictionaryVersionResponseModel", "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ReviewStatus", - "SendMessage", - "SendText", - "Source", "SpeechHistoryItemResponse", + "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", "SsoProviderResponseModel", "SsoProviderResponseModelProviderType", - "Status", "Subscription", "SubscriptionResponse", "SubscriptionResponseModelBillingPeriod", "SubscriptionResponseModelCharacterRefreshPeriod", + "SubscriptionResponseModelCurrency", "SubscriptionStatus", "TextToSpeechAsStreamRequest", "UnprocessableEntityError", "UsageCharactersResponseModel", - "UsageGetCharactersUsageMetricsRequestBreakdownType", "User", "ValidationError", "ValidationErrorLocItem", @@ -224,10 +250,15 @@ "Voice", "VoiceGenerationParameterOptionResponse", "VoiceGenerationParameterResponse", + "VoicePreviewResponseModel", + "VoicePreviewsResponseModel", + "VoiceResponseModelCategory", "VoiceResponseModelSafetyControl", "VoiceSample", "VoiceSettings", + "VoiceSharingModerationCheckResponseModel", "VoiceSharingResponse", + "VoiceSharingResponseModelCategory", "VoiceSharingState", "VoiceVerificationResponse", "__version__", @@ -246,6 +277,7 @@ "stream", "text_to_sound_effects", "text_to_speech", + "text_to_voice", "usage", "user", "voice_generation", diff --git a/src/elevenlabs/audio_isolation/client.py b/src/elevenlabs/audio_isolation/client.py index 2e831108..cae2e69a 100644 --- a/src/elevenlabs/audio_isolation/client.py +++ b/src/elevenlabs/audio_isolation/client.py @@ -31,7 +31,7 @@ def audio_isolation( See core.File for more documentation request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -59,7 +59,8 @@ def audio_isolation( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -90,7 +91,7 @@ def audio_isolation_stream( See core.File for more documentation request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -118,7 +119,8 @@ def audio_isolation_stream( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -154,7 +156,7 @@ async def audio_isolation( See core.File for more documentation request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -190,7 +192,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() @@ -221,7 +224,7 @@ async def audio_isolation_stream( See core.File for more documentation request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -257,7 +260,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() diff --git a/src/elevenlabs/base_client.py b/src/elevenlabs/base_client.py index 977ed580..50a0d659 100644 --- a/src/elevenlabs/base_client.py +++ b/src/elevenlabs/base_client.py @@ -12,16 +12,17 @@ from .text_to_speech.client import TextToSpeechClient from .speech_to_speech.client import SpeechToSpeechClient from .voice_generation.client import VoiceGenerationClient +from .text_to_voice.client import TextToVoiceClient from .user.client import UserClient from .voices.client import VoicesClient from .projects.client import ProjectsClient from .chapters.client import ChaptersClient from .dubbing.client import DubbingClient +from .workspace.client import WorkspaceClient from .models.client import ModelsClient from .audio_native.client import AudioNativeClient from .usage.client import UsageClient from .pronunciation_dictionary.client import PronunciationDictionaryClient -from .workspace.client import WorkspaceClient from .core.client_wrapper import AsyncClientWrapper from .history.client import AsyncHistoryClient from .text_to_sound_effects.client import AsyncTextToSoundEffectsClient @@ -30,16 +31,17 @@ from .text_to_speech.client import AsyncTextToSpeechClient from .speech_to_speech.client import AsyncSpeechToSpeechClient from .voice_generation.client import AsyncVoiceGenerationClient +from .text_to_voice.client import AsyncTextToVoiceClient from .user.client import AsyncUserClient from .voices.client import AsyncVoicesClient from .projects.client import AsyncProjectsClient from .chapters.client import AsyncChaptersClient from .dubbing.client import AsyncDubbingClient +from .workspace.client import AsyncWorkspaceClient from .models.client import AsyncModelsClient from .audio_native.client import AsyncAudioNativeClient from .usage.client import AsyncUsageClient from .pronunciation_dictionary.client import AsyncPronunciationDictionaryClient -from .workspace.client import AsyncWorkspaceClient class BaseElevenLabs: @@ -107,16 +109,17 @@ def __init__( self.text_to_speech = TextToSpeechClient(client_wrapper=self._client_wrapper) self.speech_to_speech = SpeechToSpeechClient(client_wrapper=self._client_wrapper) self.voice_generation = VoiceGenerationClient(client_wrapper=self._client_wrapper) + self.text_to_voice = TextToVoiceClient(client_wrapper=self._client_wrapper) self.user = UserClient(client_wrapper=self._client_wrapper) self.voices = VoicesClient(client_wrapper=self._client_wrapper) self.projects = ProjectsClient(client_wrapper=self._client_wrapper) self.chapters = ChaptersClient(client_wrapper=self._client_wrapper) self.dubbing = DubbingClient(client_wrapper=self._client_wrapper) + self.workspace = WorkspaceClient(client_wrapper=self._client_wrapper) self.models = ModelsClient(client_wrapper=self._client_wrapper) self.audio_native = AudioNativeClient(client_wrapper=self._client_wrapper) self.usage = UsageClient(client_wrapper=self._client_wrapper) self.pronunciation_dictionary = PronunciationDictionaryClient(client_wrapper=self._client_wrapper) - self.workspace = WorkspaceClient(client_wrapper=self._client_wrapper) class AsyncBaseElevenLabs: @@ -184,16 +187,17 @@ def __init__( self.text_to_speech = AsyncTextToSpeechClient(client_wrapper=self._client_wrapper) self.speech_to_speech = AsyncSpeechToSpeechClient(client_wrapper=self._client_wrapper) self.voice_generation = AsyncVoiceGenerationClient(client_wrapper=self._client_wrapper) + self.text_to_voice = AsyncTextToVoiceClient(client_wrapper=self._client_wrapper) self.user = AsyncUserClient(client_wrapper=self._client_wrapper) self.voices = AsyncVoicesClient(client_wrapper=self._client_wrapper) self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper) self.chapters = AsyncChaptersClient(client_wrapper=self._client_wrapper) self.dubbing = AsyncDubbingClient(client_wrapper=self._client_wrapper) + self.workspace = AsyncWorkspaceClient(client_wrapper=self._client_wrapper) self.models = AsyncModelsClient(client_wrapper=self._client_wrapper) self.audio_native = AsyncAudioNativeClient(client_wrapper=self._client_wrapper) self.usage = AsyncUsageClient(client_wrapper=self._client_wrapper) self.pronunciation_dictionary = AsyncPronunciationDictionaryClient(client_wrapper=self._client_wrapper) - self.workspace = AsyncWorkspaceClient(client_wrapper=self._client_wrapper) def _get_base_url(https://github.com/*,%20base_url:%20typing.Optional[str]%20=%20None,%20environment:%20ElevenLabsEnvironment) -> str: diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py deleted file mode 100644 index 13533ab8..00000000 --- a/src/elevenlabs/conversational_ai/conversation.py +++ /dev/null @@ -1,215 +0,0 @@ -from abc import ABC, abstractmethod -import base64 -import json -import threading -from typing import Callable, Optional - -from websockets.sync.client import connect - -from ..base_client import BaseElevenLabs - - -class AudioInterface(ABC): - """AudioInterface provides an abstraction for handling audio input and output.""" - - @abstractmethod - def start(self, input_callback: Callable[[bytes], None]): - """Starts the audio interface. - - Called one time before the conversation starts. - The `input_callback` should be called regularly with input audio chunks from - the user. The audio should be in 16-bit PCM mono format at 16kHz. Recommended - chunk size is 4000 samples (250 milliseconds). - """ - pass - - @abstractmethod - def stop(self): - """Stops the audio interface. - - Called one time after the conversation ends. Should clean up any resources - used by the audio interface and stop any audio streams. Do not call the - `input_callback` from `start` after this method is called. - """ - pass - - @abstractmethod - def output(self, audio: bytes): - """Output audio to the user. - - The `audio` input is in 16-bit PCM mono format at 16kHz. Implementations can - choose to do additional buffering. This method should return quickly and not - block the calling thread. - """ - pass - - @abstractmethod - def interrupt(self): - """Interruption signal to stop any audio output. - - User has interrupted the agent and all previosly buffered audio output should - be stopped. - """ - pass - - -class Conversation: - client: BaseElevenLabs - agent_id: str - requires_auth: bool - - audio_interface: AudioInterface - callback_agent_response: Optional[Callable[[str], None]] - callback_agent_response_correction: Optional[Callable[[str, str], None]] - callback_user_transcript: Optional[Callable[[str], None]] - callback_latency_measurement: Optional[Callable[[int], None]] - - _thread: Optional[threading.Thread] = None - _should_stop: threading.Event = threading.Event() - _conversation_id: Optional[str] = None - _last_interrupt_id: int = 0 - - def __init__( - self, - client: BaseElevenLabs, - agent_id: str, - *, - requires_auth: bool, - audio_interface: AudioInterface, - callback_agent_response: Optional[Callable[[str], None]] = None, - callback_agent_response_correction: Optional[Callable[[str, str], None]] = None, - callback_user_transcript: Optional[Callable[[str], None]] = None, - callback_latency_measurement: Optional[Callable[[int], None]] = None, - ): - """Conversational AI session. - - BETA: This API is subject to change without regard to backwards compatibility. - - Args: - client: The ElevenLabs client to use for the conversation. - agent_id: The ID of the agent to converse with. - requires_auth: Whether the agent requires authentication. - audio_interface: The audio interface to use for input and output. - callback_agent_response: Callback for agent responses. - callback_agent_response_correction: Callback for agent response corrections. - First argument is the original response (previously given to - callback_agent_response), second argument is the corrected response. - callback_user_transcript: Callback for user transcripts. - callback_latency_measurement: Callback for latency measurements (in milliseconds). - """ - - self.client = client - self.agent_id = agent_id - self.requires_auth = requires_auth - - self.audio_interface = audio_interface - self.callback_agent_response = callback_agent_response - self.callback_agent_response_correction = callback_agent_response_correction - self.callback_user_transcript = callback_user_transcript - self.callback_latency_measurement = callback_latency_measurement - - def start_session(self): - """Starts the conversation session. - - Will run in background thread until `end_session` is called. - """ - ws_url = self._get_signed_url() if self.requires_auth else self._get_wss_url() - self._thread = threading.Thread(target=self._run, args=(ws_url,)) - self._thread.start() - - def end_session(self): - """Ends the conversation session.""" - self.audio_interface.stop() - self._should_stop.set() - - def wait_for_session_end(self) -> Optional[str]: - """Waits for the conversation session to end. - - You must call `end_session` before calling this method, otherwise it will block. - - Returns the conversation ID, if available. - """ - if not self._thread: - raise RuntimeError("Session not started.") - self._thread.join() - return self._conversation_id - - def _run(self, ws_url: str): - with connect(ws_url) as ws: - - def input_callback(audio): - ws.send( - json.dumps( - { - "user_audio_chunk": base64.b64encode(audio).decode(), - } - ) - ) - - self.audio_interface.start(input_callback) - while not self._should_stop.is_set(): - try: - message = json.loads(ws.recv(timeout=0.5)) - if self._should_stop.is_set(): - return - self._handle_message(message, ws) - except TimeoutError: - pass - - def _handle_message(self, message, ws): - if message["type"] == "conversation_initiation_metadata": - event = message["conversation_initiation_metadata_event"] - assert self._conversation_id is None - self._conversation_id = event["conversation_id"] - elif message["type"] == "audio": - event = message["audio_event"] - if int(event["event_id"]) <= self._last_interrupt_id: - return - audio = base64.b64decode(event["audio_base_64"]) - self.audio_interface.output(audio) - elif message["type"] == "agent_response": - if self.callback_agent_response: - event = message["agent_response_event"] - self.callback_agent_response(event["agent_response"].strip()) - elif message["type"] == "agent_response_correction": - if self.callback_agent_response_correction: - event = message["agent_response_correction_event"] - self.callback_agent_response_correction( - event["original_agent_response"].strip(), event["corrected_agent_response"].strip() - ) - elif message["type"] == "user_transcript": - if self.callback_user_transcript: - event = message["user_transcription_event"] - self.callback_user_transcript(event["user_transcript"].strip()) - elif message["type"] == "interruption": - event = message["interruption_event"] - self.last_interrupt_id = int(event["event_id"]) - self.audio_interface.interrupt() - elif message["type"] == "ping": - event = message["ping_event"] - ws.send( - json.dumps( - { - "type": "pong", - "event_id": event["event_id"], - } - ) - ) - if self.callback_latency_measurement and event["ping_ms"]: - self.callback_latency_measurement(int(event["ping_ms"])) - else: - pass # Ignore all other message types. - - def _get_wss_url(https://github.com/self): - base_url = self.client._client_wrapper._base_url - # Replace http(s) with ws(s). - base_ws_url = base_url.replace("http", "ws", 1) # First occurrence only. - return f"{base_ws_url}/v1/convai/conversation?agent_id={self.agent_id}" - - def _get_signed_url(https://github.com/self): - # TODO: Use generated SDK method once available. - response = self.client._client_wrapper.httpx_client.request( - f"v1/convai/conversation/get_signed_url?agent_id={self.agent_id}", - method="GET", - ) - return response.json()["signed_url"] diff --git a/src/elevenlabs/conversational_ai/default_audio_interface.py b/src/elevenlabs/conversational_ai/default_audio_interface.py deleted file mode 100644 index b1660d85..00000000 --- a/src/elevenlabs/conversational_ai/default_audio_interface.py +++ /dev/null @@ -1,83 +0,0 @@ -from typing import Callable -import queue -import threading - -from .conversation import AudioInterface - - -class DefaultAudioInterface(AudioInterface): - INPUT_FRAMES_PER_BUFFER = 4000 # 250ms @ 16kHz - OUTPUT_FRAMES_PER_BUFFER = 1000 # 62.5ms @ 16kHz - - def __init__(self): - try: - import pyaudio - except ImportError: - raise ImportError("To use DefaultAudioInterface you must install pyaudio.") - self.pyaudio = pyaudio - - def start(self, input_callback: Callable[[bytes], None]): - # Audio input is using callbacks from pyaudio which we simply pass through. - self.input_callback = input_callback - - # Audio output is buffered so we can handle interruptions. - # Start a separate thread to handle writing to the output stream. - self.output_queue: queue.Queue[bytes] = queue.Queue() - self.should_stop = threading.Event() - self.output_thread = threading.Thread(target=self._output_thread) - - self.p = self.pyaudio.PyAudio() - self.in_stream = self.p.open( - format=self.pyaudio.paInt16, - channels=1, - rate=16000, - input=True, - stream_callback=self._in_callback, - frames_per_buffer=self.INPUT_FRAMES_PER_BUFFER, - start=True, - ) - self.out_stream = self.p.open( - format=self.pyaudio.paInt16, - channels=1, - rate=16000, - output=True, - frames_per_buffer=self.OUTPUT_FRAMES_PER_BUFFER, - start=True, - ) - - self.output_thread.start() - - def stop(self): - self.should_stop.set() - self.output_thread.join() - self.in_stream.stop_stream() - self.in_stream.close() - self.out_stream.close() - self.p.terminate() - - def output(self, audio: bytes): - self.output_queue.put(audio) - - def interrupt(self): - # Clear the output queue to stop any audio that is currently playing. - # Note: We can't atomically clear the whole queue, but we are doing - # it from the message handling thread so no new audio will be added - # while we are clearing. - try: - while True: - _ = self.output_queue.get(block=False) - except queue.Empty: - pass - - def _output_thread(self): - while not self.should_stop.is_set(): - try: - audio = self.output_queue.get(timeout=0.25) - self.out_stream.write(audio) - except queue.Empty: - pass - - def _in_callback(self, in_data, frame_count, time_info, status): - if self.input_callback: - self.input_callback(in_data) - return (None, self.pyaudio.paContinue) diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index ff9aa13a..ae429e14 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.11.0", + "X-Fern-SDK-Version": "1.12.0-beta0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key diff --git a/src/elevenlabs/core/request_options.py b/src/elevenlabs/core/request_options.py index d0bf0dbc..1b388044 100644 --- a/src/elevenlabs/core/request_options.py +++ b/src/elevenlabs/core/request_options.py @@ -23,6 +23,8 @@ class RequestOptions(typing.TypedDict, total=False): - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict + + - chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads. """ timeout_in_seconds: NotRequired[int] @@ -30,3 +32,4 @@ class RequestOptions(typing.TypedDict, total=False): additional_headers: NotRequired[typing.Dict[str, typing.Any]] additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]] additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]] + chunk_size: NotRequired[int] diff --git a/src/elevenlabs/dubbing/client.py b/src/elevenlabs/dubbing/client.py index 3fdeae18..f9e3e4ab 100644 --- a/src/elevenlabs/dubbing/client.py +++ b/src/elevenlabs/dubbing/client.py @@ -38,6 +38,8 @@ def dub_a_video_or_an_audio_file( start_time: typing.Optional[int] = OMIT, end_time: typing.Optional[int] = OMIT, highest_resolution: typing.Optional[bool] = OMIT, + drop_background_audio: typing.Optional[bool] = OMIT, + use_profanity_filter: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DoDubbingResponse: """ @@ -75,6 +77,12 @@ def dub_a_video_or_an_audio_file( highest_resolution : typing.Optional[bool] Whether to use the highest resolution available. + drop_background_audio : typing.Optional[bool] + An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. + + use_profanity_filter : typing.Optional[bool] + [BETA] Whether transcripts should have profanities censored with the words '[censored]' + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -107,6 +115,8 @@ def dub_a_video_or_an_audio_file( "start_time": start_time, "end_time": end_time, "highest_resolution": highest_resolution, + "drop_background_audio": drop_background_audio, + "use_profanity_filter": use_profanity_filter, }, files={ "file": file, @@ -271,7 +281,7 @@ def get_dubbed_file( ID of the language. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -297,7 +307,8 @@ def get_dubbed_file( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -410,6 +421,8 @@ async def dub_a_video_or_an_audio_file( start_time: typing.Optional[int] = OMIT, end_time: typing.Optional[int] = OMIT, highest_resolution: typing.Optional[bool] = OMIT, + drop_background_audio: typing.Optional[bool] = OMIT, + use_profanity_filter: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DoDubbingResponse: """ @@ -447,6 +460,12 @@ async def dub_a_video_or_an_audio_file( highest_resolution : typing.Optional[bool] Whether to use the highest resolution available. + drop_background_audio : typing.Optional[bool] + An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. + + use_profanity_filter : typing.Optional[bool] + [BETA] Whether transcripts should have profanities censored with the words '[censored]' + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -487,6 +506,8 @@ async def main() -> None: "start_time": start_time, "end_time": end_time, "highest_resolution": highest_resolution, + "drop_background_audio": drop_background_audio, + "use_profanity_filter": use_profanity_filter, }, files={ "file": file, @@ -667,7 +688,7 @@ async def get_dubbed_file( ID of the language. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -701,7 +722,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() diff --git a/src/elevenlabs/history/client.py b/src/elevenlabs/history/client.py index ce259e3f..6bb1f6f1 100644 --- a/src/elevenlabs/history/client.py +++ b/src/elevenlabs/history/client.py @@ -227,7 +227,7 @@ def get_audio( History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -252,7 +252,8 @@ def get_audio( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -570,7 +571,7 @@ async def get_audio( History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -603,7 +604,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() diff --git a/src/elevenlabs/projects/__init__.py b/src/elevenlabs/projects/__init__.py index f3ea2659..7bdfec7e 100644 --- a/src/elevenlabs/projects/__init__.py +++ b/src/elevenlabs/projects/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import ProjectsAddRequestTargetAudience + +__all__ = ["ProjectsAddRequestTargetAudience"] diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py index 604abcb4..5a944a87 100644 --- a/src/elevenlabs/projects/client.py +++ b/src/elevenlabs/projects/client.py @@ -10,11 +10,13 @@ from json.decoder import JSONDecodeError from ..core.api_error import ApiError from .. import core +from .types.projects_add_request_target_audience import ProjectsAddRequestTargetAudience from ..types.add_project_response_model import AddProjectResponseModel from ..types.project_extended_response_model import ProjectExtendedResponseModel from ..core.jsonable_encoder import jsonable_encoder from ..types.edit_project_response_model import EditProjectResponseModel from ..types.project_snapshots_response import ProjectSnapshotsResponse +from ..types.add_chapter_response_model import AddChapterResponseModel from ..types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator from ..core.serialization import convert_and_respect_annotation_metadata from ..core.client_wrapper import AsyncClientWrapper @@ -91,6 +93,13 @@ def add( quality_preset: typing.Optional[str] = OMIT, title: typing.Optional[str] = OMIT, author: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + genres: typing.Optional[typing.List[str]] = OMIT, + target_audience: typing.Optional[ProjectsAddRequestTargetAudience] = OMIT, + language: typing.Optional[str] = OMIT, + content_type: typing.Optional[str] = OMIT, + original_publication_date: typing.Optional[str] = OMIT, + mature_content: typing.Optional[bool] = OMIT, isbn_number: typing.Optional[str] = OMIT, acx_volume_normalization: typing.Optional[bool] = OMIT, volume_normalization: typing.Optional[bool] = OMIT, @@ -123,9 +132,9 @@ def add( quality_preset : typing.Optional[str] Output quality of the generated audio. Must be one of: standard - standard output format, 128kbps with 44.1kHz sample rate. - high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the character cost by 20%. - ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the character cost by 50%. - ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the character cost by 100%. + high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%. + ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%. + ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%. title : typing.Optional[str] @@ -134,6 +143,27 @@ def add( author : typing.Optional[str] An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download. + description : typing.Optional[str] + An optional description of the project. + + genres : typing.Optional[typing.List[str]] + An optional list of genres associated with the project. + + target_audience : typing.Optional[ProjectsAddRequestTargetAudience] + An optional target audience of the project. + + language : typing.Optional[str] + An optional language of the project. Two-letter language code (ISO 639-1). + + content_type : typing.Optional[str] + An optional content type of the project. + + original_publication_date : typing.Optional[str] + An optional original publication date of the project, in the format YYYY-MM-DD or YYYY. + + mature_content : typing.Optional[bool] + An optional mature content of the project. + isbn_number : typing.Optional[str] An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download. @@ -180,6 +210,13 @@ def add( "quality_preset": quality_preset, "title": title, "author": author, + "description": description, + "genres": genres, + "target_audience": target_audience, + "language": language, + "content_type": content_type, + "original_publication_date": original_publication_date, + "mature_content": mature_content, "isbn_number": isbn_number, "acx_volume_normalization": acx_volume_normalization, "volume_normalization": volume_normalization, @@ -577,7 +614,7 @@ def stream_audio( Whether to convert the audio to mpeg format. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -608,7 +645,8 @@ def stream_audio( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -683,6 +721,82 @@ def stream_archive( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def add_chapter_to_a_project( + self, + project_id: str, + *, + name: str, + from_url: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddChapterResponseModel: + """ + Creates a new chapter either as blank or from a URL. + + Parameters + ---------- + project_id : str + The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + + name : str + The name of the chapter, used for identification only. + + from_url : typing.Optional[str] + An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddChapterResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.projects.add_chapter_to_a_project( + project_id="21m00Tcm4TlvDq8ikWAM", + name="name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/chapters/add", + method="POST", + json={ + "name": name, + "from_url": from_url, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddChapterResponseModel, + construct_type( + type_=AddChapterResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def update_pronunciation_dictionaries( self, project_id: str, @@ -840,6 +954,13 @@ async def add( quality_preset: typing.Optional[str] = OMIT, title: typing.Optional[str] = OMIT, author: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + genres: typing.Optional[typing.List[str]] = OMIT, + target_audience: typing.Optional[ProjectsAddRequestTargetAudience] = OMIT, + language: typing.Optional[str] = OMIT, + content_type: typing.Optional[str] = OMIT, + original_publication_date: typing.Optional[str] = OMIT, + mature_content: typing.Optional[bool] = OMIT, isbn_number: typing.Optional[str] = OMIT, acx_volume_normalization: typing.Optional[bool] = OMIT, volume_normalization: typing.Optional[bool] = OMIT, @@ -872,9 +993,9 @@ async def add( quality_preset : typing.Optional[str] Output quality of the generated audio. Must be one of: standard - standard output format, 128kbps with 44.1kHz sample rate. - high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the character cost by 20%. - ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the character cost by 50%. - ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the character cost by 100%. + high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%. + ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%. + ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%. title : typing.Optional[str] @@ -883,6 +1004,27 @@ async def add( author : typing.Optional[str] An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download. + description : typing.Optional[str] + An optional description of the project. + + genres : typing.Optional[typing.List[str]] + An optional list of genres associated with the project. + + target_audience : typing.Optional[ProjectsAddRequestTargetAudience] + An optional target audience of the project. + + language : typing.Optional[str] + An optional language of the project. Two-letter language code (ISO 639-1). + + content_type : typing.Optional[str] + An optional content type of the project. + + original_publication_date : typing.Optional[str] + An optional original publication date of the project, in the format YYYY-MM-DD or YYYY. + + mature_content : typing.Optional[bool] + An optional mature content of the project. + isbn_number : typing.Optional[str] An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download. @@ -937,6 +1079,13 @@ async def main() -> None: "quality_preset": quality_preset, "title": title, "author": author, + "description": description, + "genres": genres, + "target_audience": target_audience, + "language": language, + "content_type": content_type, + "original_publication_date": original_publication_date, + "mature_content": mature_content, "isbn_number": isbn_number, "acx_volume_normalization": acx_volume_normalization, "volume_normalization": volume_normalization, @@ -1374,7 +1523,7 @@ async def stream_audio( Whether to convert the audio to mpeg format. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -1413,7 +1562,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() @@ -1496,6 +1646,90 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + async def add_chapter_to_a_project( + self, + project_id: str, + *, + name: str, + from_url: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddChapterResponseModel: + """ + Creates a new chapter either as blank or from a URL. + + Parameters + ---------- + project_id : str + The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + + name : str + The name of the chapter, used for identification only. + + from_url : typing.Optional[str] + An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddChapterResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.projects.add_chapter_to_a_project( + project_id="21m00Tcm4TlvDq8ikWAM", + name="name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/chapters/add", + method="POST", + json={ + "name": name, + "from_url": from_url, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddChapterResponseModel, + construct_type( + type_=AddChapterResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def update_pronunciation_dictionaries( self, project_id: str, diff --git a/src/elevenlabs/projects/types/__init__.py b/src/elevenlabs/projects/types/__init__.py new file mode 100644 index 00000000..42c21d40 --- /dev/null +++ b/src/elevenlabs/projects/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .projects_add_request_target_audience import ProjectsAddRequestTargetAudience + +__all__ = ["ProjectsAddRequestTargetAudience"] diff --git a/src/elevenlabs/projects/types/projects_add_request_target_audience.py b/src/elevenlabs/projects/types/projects_add_request_target_audience.py new file mode 100644 index 00000000..74c8b589 --- /dev/null +++ b/src/elevenlabs/projects/types/projects_add_request_target_audience.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectsAddRequestTargetAudience = typing.Union[ + typing.Literal["children", "young adult", "adult", "all ages"], typing.Any +] diff --git a/src/elevenlabs/samples/client.py b/src/elevenlabs/samples/client.py index 37a35981..6014a8f8 100644 --- a/src/elevenlabs/samples/client.py +++ b/src/elevenlabs/samples/client.py @@ -94,7 +94,7 @@ def get_audio( Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -120,7 +120,8 @@ def get_audio( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -230,7 +231,7 @@ async def get_audio( Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -264,7 +265,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py index 5aa8427f..981ca920 100644 --- a/src/elevenlabs/speech_to_speech/client.py +++ b/src/elevenlabs/speech_to_speech/client.py @@ -33,6 +33,7 @@ def convert( model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, seed: typing.Optional[int] = OMIT, + remove_background_noise: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[bytes]: """ @@ -64,8 +65,11 @@ def convert( seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + remove_background_noise : typing.Optional[bool] + If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -98,6 +102,7 @@ def convert( "model_id": model_id, "voice_settings": voice_settings, "seed": seed, + "remove_background_noise": remove_background_noise, }, files={ "audio": audio, @@ -107,7 +112,8 @@ def convert( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -137,6 +143,7 @@ def convert_as_stream( model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, seed: typing.Optional[int] = OMIT, + remove_background_noise: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[bytes]: """ @@ -179,8 +186,11 @@ def convert_as_stream( seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + remove_background_noise : typing.Optional[bool] + If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -213,6 +223,7 @@ def convert_as_stream( "model_id": model_id, "voice_settings": voice_settings, "seed": seed, + "remove_background_noise": remove_background_noise, }, files={ "audio": audio, @@ -222,7 +233,8 @@ def convert_as_stream( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -257,6 +269,7 @@ async def convert( model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, seed: typing.Optional[int] = OMIT, + remove_background_noise: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[bytes]: """ @@ -288,8 +301,11 @@ async def convert( seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + remove_background_noise : typing.Optional[bool] + If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -330,6 +346,7 @@ async def main() -> None: "model_id": model_id, "voice_settings": voice_settings, "seed": seed, + "remove_background_noise": remove_background_noise, }, files={ "audio": audio, @@ -339,7 +356,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() @@ -369,6 +387,7 @@ async def convert_as_stream( model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, seed: typing.Optional[int] = OMIT, + remove_background_noise: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[bytes]: """ @@ -411,8 +430,11 @@ async def convert_as_stream( seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + remove_background_noise : typing.Optional[bool] + If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -453,6 +475,7 @@ async def main() -> None: "model_id": model_id, "voice_settings": voice_settings, "seed": seed, + "remove_background_noise": remove_background_noise, }, files={ "audio": audio, @@ -462,7 +485,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() diff --git a/src/elevenlabs/text_to_sound_effects/client.py b/src/elevenlabs/text_to_sound_effects/client.py index b9eb26df..af994638 100644 --- a/src/elevenlabs/text_to_sound_effects/client.py +++ b/src/elevenlabs/text_to_sound_effects/client.py @@ -41,7 +41,7 @@ def convert( A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -74,7 +74,8 @@ def convert( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -121,7 +122,7 @@ async def convert( A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -162,7 +163,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() diff --git a/src/elevenlabs/text_to_speech/__init__.py b/src/elevenlabs/text_to_speech/__init__.py index 518f9a32..3fadc417 100644 --- a/src/elevenlabs/text_to_speech/__init__.py +++ b/src/elevenlabs/text_to_speech/__init__.py @@ -1,5 +1,15 @@ # This file was auto-generated by Fern from our API Definition. -from .types import SendMessage +from .types import ( + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization, + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, + BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization, + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, +) -__all__ = ["SendMessage"] +__all__ = [ + "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization", + "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization", + "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization", + "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization", +] diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py index 87e8193e..6056554e 100644 --- a/src/elevenlabs/text_to_speech/client.py +++ b/src/elevenlabs/text_to_speech/client.py @@ -6,6 +6,9 @@ from ..types.output_format import OutputFormat from ..types.voice_settings import VoiceSettings from ..types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator +from .types.body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization import ( + BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization, +) from ..core.request_options import RequestOptions from ..core.jsonable_encoder import jsonable_encoder from ..core.serialization import convert_and_respect_annotation_metadata @@ -14,6 +17,15 @@ from ..core.unchecked_base_model import construct_type from json.decoder import JSONDecodeError from ..core.api_error import ApiError +from .types.body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import ( + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, +) +from .types.body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization import ( + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization, +) +from .types.body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization import ( + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, +) from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -43,6 +55,10 @@ def convert( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[bytes]: """ @@ -92,8 +108,14 @@ def convert( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -144,13 +166,16 @@ def convert( "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -188,6 +213,10 @@ def convert_with_timestamps( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ @@ -237,6 +266,12 @@ def convert_with_timestamps( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -282,6 +317,8 @@ def convert_with_timestamps( "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, @@ -329,6 +366,10 @@ def convert_as_stream( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[bytes]: """ @@ -378,8 +419,14 @@ def convert_as_stream( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -430,13 +477,16 @@ def convert_as_stream( "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -474,6 +524,10 @@ def stream_with_timestamps( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> None: """ @@ -523,6 +577,12 @@ def stream_with_timestamps( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -567,6 +627,8 @@ def stream_with_timestamps( "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, @@ -613,6 +675,10 @@ async def convert( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[bytes]: """ @@ -662,8 +728,14 @@ async def convert( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -722,13 +794,16 @@ async def main() -> None: "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() @@ -766,6 +841,10 @@ async def convert_with_timestamps( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ @@ -815,6 +894,12 @@ async def convert_with_timestamps( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -868,6 +953,8 @@ async def main() -> None: "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, @@ -915,6 +1002,10 @@ async def convert_as_stream( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[bytes]: """ @@ -964,8 +1055,14 @@ async def convert_as_stream( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -1024,13 +1121,16 @@ async def main() -> None: "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() @@ -1068,6 +1168,10 @@ async def stream_with_timestamps( next_text: typing.Optional[str] = OMIT, previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT, + use_pvc_as_ivc: typing.Optional[bool] = OMIT, + apply_text_normalization: typing.Optional[ + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization + ] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> None: """ @@ -1117,6 +1221,12 @@ async def stream_with_timestamps( next_request_ids : typing.Optional[typing.Sequence[str]] A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + use_pvc_as_ivc : typing.Optional[bool] + If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. + + apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization] + This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1169,6 +1279,8 @@ async def main() -> None: "next_text": next_text, "previous_request_ids": previous_request_ids, "next_request_ids": next_request_ids, + "use_pvc_as_ivc": use_pvc_as_ivc, + "apply_text_normalization": apply_text_normalization, }, request_options=request_options, omit=OMIT, diff --git a/src/elevenlabs/text_to_speech/types/__init__.py b/src/elevenlabs/text_to_speech/types/__init__.py index d770d9b9..b05354ee 100644 --- a/src/elevenlabs/text_to_speech/types/__init__.py +++ b/src/elevenlabs/text_to_speech/types/__init__.py @@ -1,5 +1,21 @@ # This file was auto-generated by Fern from our API Definition. -from .send_message import SendMessage +from .body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization import ( + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization, +) +from .body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization import ( + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, +) +from .body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization import ( + BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization, +) +from .body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import ( + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, +) -__all__ = ["SendMessage"] +__all__ = [ + "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization", + "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization", + "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization", + "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization", +] diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization.py new file mode 100644 index 00000000..42f98101 --- /dev/null +++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization = typing.Union[ + typing.Literal["auto", "on", "off"], typing.Any +] diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization.py new file mode 100644 index 00000000..7cbbc3b7 --- /dev/null +++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization = ( + typing.Union[typing.Literal["auto", "on", "off"], typing.Any] +) diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization.py new file mode 100644 index 00000000..42f873cf --- /dev/null +++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization = typing.Union[ + typing.Literal["auto", "on", "off"], typing.Any +] diff --git a/src/elevenlabs/text_to_speech/types/body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization.py b/src/elevenlabs/text_to_speech/types/body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization.py new file mode 100644 index 00000000..b2e12a10 --- /dev/null +++ b/src/elevenlabs/text_to_speech/types/body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization = typing.Union[ + typing.Literal["auto", "on", "off"], typing.Any +] diff --git a/src/elevenlabs/text_to_speech/types/send_message.py b/src/elevenlabs/text_to_speech/types/send_message.py deleted file mode 100644 index d4d0d409..00000000 --- a/src/elevenlabs/text_to_speech/types/send_message.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ...types.initialize_connection import InitializeConnection -from ...types.send_text import SendText -from ...types.close_connection import CloseConnection - -SendMessage = typing.Union[InitializeConnection, SendText, CloseConnection] diff --git a/src/elevenlabs/text_to_voice/__init__.py b/src/elevenlabs/text_to_voice/__init__.py new file mode 100644 index 00000000..f3ea2659 --- /dev/null +++ b/src/elevenlabs/text_to_voice/__init__.py @@ -0,0 +1,2 @@ +# This file was auto-generated by Fern from our API Definition. + diff --git a/src/elevenlabs/text_to_voice/client.py b/src/elevenlabs/text_to_voice/client.py new file mode 100644 index 00000000..5af3934c --- /dev/null +++ b/src/elevenlabs/text_to_voice/client.py @@ -0,0 +1,354 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.voice_previews_response_model import VoicePreviewsResponseModel +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.voice import Voice +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class TextToVoiceClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def create_previews( + self, *, voice_description: str, text: str, request_options: typing.Optional[RequestOptions] = None + ) -> VoicePreviewsResponseModel: + """ + Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice. + + Parameters + ---------- + voice_description : str + Description to use for the created voice. + + text : str + Text to generate, text length has to be between 100 and 1000. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + VoicePreviewsResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.text_to_voice.create_previews( + voice_description="voice_description", + text="text", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/text-to-voice/create-previews", + method="POST", + json={ + "voice_description": voice_description, + "text": text, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + VoicePreviewsResponseModel, + construct_type( + type_=VoicePreviewsResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_voice_from_preview( + self, + *, + voice_name: str, + voice_description: str, + generated_voice_id: str, + labels: typing.Optional[typing.Dict[str, str]] = OMIT, + played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Voice: + """ + Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews. + + Parameters + ---------- + voice_name : str + Name to use for the created voice. + + voice_description : str + Description to use for the created voice. + + generated_voice_id : str + The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + + labels : typing.Optional[typing.Dict[str, str]] + Optional, metadata to add to the created voice. Defaults to None. + + played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]] + List of voice ids that the user has played but not selected. Used for RLHF. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Voice + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.text_to_voice.create_voice_from_preview( + voice_name="voice_name", + voice_description="voice_description", + generated_voice_id="generated_voice_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/text-to-voice/create-voice-from-preview", + method="POST", + json={ + "voice_name": voice_name, + "voice_description": voice_description, + "generated_voice_id": generated_voice_id, + "labels": labels, + "played_not_selected_voice_ids": played_not_selected_voice_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Voice, + construct_type( + type_=Voice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncTextToVoiceClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def create_previews( + self, *, voice_description: str, text: str, request_options: typing.Optional[RequestOptions] = None + ) -> VoicePreviewsResponseModel: + """ + Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice. + + Parameters + ---------- + voice_description : str + Description to use for the created voice. + + text : str + Text to generate, text length has to be between 100 and 1000. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + VoicePreviewsResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.text_to_voice.create_previews( + voice_description="voice_description", + text="text", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/text-to-voice/create-previews", + method="POST", + json={ + "voice_description": voice_description, + "text": text, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + VoicePreviewsResponseModel, + construct_type( + type_=VoicePreviewsResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_voice_from_preview( + self, + *, + voice_name: str, + voice_description: str, + generated_voice_id: str, + labels: typing.Optional[typing.Dict[str, str]] = OMIT, + played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Voice: + """ + Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews. + + Parameters + ---------- + voice_name : str + Name to use for the created voice. + + voice_description : str + Description to use for the created voice. + + generated_voice_id : str + The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + + labels : typing.Optional[typing.Dict[str, str]] + Optional, metadata to add to the created voice. Defaults to None. + + played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]] + List of voice ids that the user has played but not selected. Used for RLHF. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Voice + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.text_to_voice.create_voice_from_preview( + voice_name="voice_name", + voice_description="voice_description", + generated_voice_id="generated_voice_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/text-to-voice/create-voice-from-preview", + method="POST", + json={ + "voice_name": voice_name, + "voice_description": voice_description, + "generated_voice_id": generated_voice_id, + "labels": labels, + "played_not_selected_voice_ids": played_not_selected_voice_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Voice, + construct_type( + type_=Voice, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py index 3cfa2ee3..2363ce41 100644 --- a/src/elevenlabs/types/__init__.py +++ b/src/elevenlabs/types/__init__.py @@ -1,21 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from .accent import Accent +from .add_chapter_response_model import AddChapterResponseModel from .add_project_response_model import AddProjectResponseModel from .add_pronunciation_dictionary_response_model import AddPronunciationDictionaryResponseModel from .add_pronunciation_dictionary_rules_response_model import AddPronunciationDictionaryRulesResponseModel +from .add_voice_ivc_response_model import AddVoiceIvcResponseModel from .add_voice_response_model import AddVoiceResponseModel from .age import Age from .audio_native_create_project_response_model import AudioNativeCreateProjectResponseModel from .audio_native_get_embed_code_response_model import AudioNativeGetEmbedCodeResponseModel -from .audio_output import AudioOutput -from .category import Category +from .breakdown_types import BreakdownTypes from .chapter_response import ChapterResponse from .chapter_snapshot_response import ChapterSnapshotResponse from .chapter_snapshots_response import ChapterSnapshotsResponse from .chapter_state import ChapterState from .chapter_statistics_response import ChapterStatisticsResponse -from .close_connection import CloseConnection from .currency import Currency from .do_dubbing_response import DoDubbingResponse from .dubbing_metadata_response import DubbingMetadataResponse @@ -24,11 +24,11 @@ from .extended_subscription_response_model_character_refresh_period import ( ExtendedSubscriptionResponseModelCharacterRefreshPeriod, ) +from .extended_subscription_response_model_currency import ExtendedSubscriptionResponseModelCurrency from .feedback_item import FeedbackItem from .fine_tuning_response import FineTuningResponse from .fine_tuning_response_model_state_value import FineTuningResponseModelStateValue from .gender import Gender -from .generation_config import GenerationConfig from .get_chapters_response import GetChaptersResponse from .get_library_voices_response import GetLibraryVoicesResponse from .get_projects_response import GetProjectsResponse @@ -41,41 +41,47 @@ from .history_alignments_response_model import HistoryAlignmentsResponseModel from .history_item import HistoryItem from .http_validation_error import HttpValidationError -from .initialize_connection import InitializeConnection from .invoice import Invoice from .language_response import LanguageResponse from .library_voice_response import LibraryVoiceResponse +from .library_voice_response_model_category import LibraryVoiceResponseModelCategory from .manual_verification_file_response import ManualVerificationFileResponse from .manual_verification_response import ManualVerificationResponse from .model import Model -from .normalized_alignment import NormalizedAlignment +from .model_rates_response_model import ModelRatesResponseModel +from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup from .optimize_streaming_latency import OptimizeStreamingLatency from .output_format import OutputFormat from .profile_page_response_model import ProfilePageResponseModel from .project_extended_response_model import ProjectExtendedResponseModel +from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel +from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset +from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience from .project_response import ProjectResponse +from .project_response_model_access_level import ProjectResponseModelAccessLevel +from .project_response_model_target_audience import ProjectResponseModelTargetAudience from .project_snapshot_response import ProjectSnapshotResponse from .project_snapshot_upload_response_model import ProjectSnapshotUploadResponseModel +from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus from .project_snapshots_response import ProjectSnapshotsResponse from .project_state import ProjectState from .pronunciation_dictionary_alias_rule_request_model import PronunciationDictionaryAliasRuleRequestModel from .pronunciation_dictionary_phoneme_rule_request_model import PronunciationDictionaryPhonemeRuleRequestModel from .pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator -from .realtime_voice_settings import RealtimeVoiceSettings +from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel from .recording_response import RecordingResponse from .remove_pronunciation_dictionary_rules_response_model import RemovePronunciationDictionaryRulesResponseModel from .review_status import ReviewStatus -from .send_text import SendText -from .source import Source from .speech_history_item_response import SpeechHistoryItemResponse +from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory from .sso_provider_response_model import SsoProviderResponseModel from .sso_provider_response_model_provider_type import SsoProviderResponseModelProviderType -from .status import Status from .subscription import Subscription from .subscription_response import SubscriptionResponse from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod from .subscription_response_model_character_refresh_period import SubscriptionResponseModelCharacterRefreshPeriod +from .subscription_response_model_currency import SubscriptionResponseModelCurrency from .subscription_status import SubscriptionStatus from .text_to_speech_as_stream_request import TextToSpeechAsStreamRequest from .usage_characters_response_model import UsageCharactersResponseModel @@ -86,41 +92,46 @@ from .voice import Voice from .voice_generation_parameter_option_response import VoiceGenerationParameterOptionResponse from .voice_generation_parameter_response import VoiceGenerationParameterResponse +from .voice_preview_response_model import VoicePreviewResponseModel +from .voice_previews_response_model import VoicePreviewsResponseModel +from .voice_response_model_category import VoiceResponseModelCategory from .voice_response_model_safety_control import VoiceResponseModelSafetyControl from .voice_sample import VoiceSample from .voice_settings import VoiceSettings +from .voice_sharing_moderation_check_response_model import VoiceSharingModerationCheckResponseModel from .voice_sharing_response import VoiceSharingResponse +from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory from .voice_sharing_state import VoiceSharingState from .voice_verification_response import VoiceVerificationResponse __all__ = [ "Accent", + "AddChapterResponseModel", "AddProjectResponseModel", "AddPronunciationDictionaryResponseModel", "AddPronunciationDictionaryRulesResponseModel", + "AddVoiceIvcResponseModel", "AddVoiceResponseModel", "Age", "AudioNativeCreateProjectResponseModel", "AudioNativeGetEmbedCodeResponseModel", - "AudioOutput", - "Category", + "BreakdownTypes", "ChapterResponse", "ChapterSnapshotResponse", "ChapterSnapshotsResponse", "ChapterState", "ChapterStatisticsResponse", - "CloseConnection", "Currency", "DoDubbingResponse", "DubbingMetadataResponse", "EditProjectResponseModel", "ExtendedSubscriptionResponseModelBillingPeriod", "ExtendedSubscriptionResponseModelCharacterRefreshPeriod", + "ExtendedSubscriptionResponseModelCurrency", "FeedbackItem", "FineTuningResponse", "FineTuningResponseModelStateValue", "Gender", - "GenerationConfig", "GetChaptersResponse", "GetLibraryVoicesResponse", "GetProjectsResponse", @@ -133,41 +144,47 @@ "HistoryAlignmentsResponseModel", "HistoryItem", "HttpValidationError", - "InitializeConnection", "Invoice", "LanguageResponse", "LibraryVoiceResponse", + "LibraryVoiceResponseModelCategory", "ManualVerificationFileResponse", "ManualVerificationResponse", "Model", - "NormalizedAlignment", + "ModelRatesResponseModel", + "ModelResponseModelConcurrencyGroup", "OptimizeStreamingLatency", "OutputFormat", "ProfilePageResponseModel", "ProjectExtendedResponseModel", + "ProjectExtendedResponseModelAccessLevel", + "ProjectExtendedResponseModelQualityPreset", + "ProjectExtendedResponseModelTargetAudience", "ProjectResponse", + "ProjectResponseModelAccessLevel", + "ProjectResponseModelTargetAudience", "ProjectSnapshotResponse", "ProjectSnapshotUploadResponseModel", + "ProjectSnapshotUploadResponseModelStatus", "ProjectSnapshotsResponse", "ProjectState", "PronunciationDictionaryAliasRuleRequestModel", "PronunciationDictionaryPhonemeRuleRequestModel", "PronunciationDictionaryVersionLocator", - "RealtimeVoiceSettings", + "PronunciationDictionaryVersionResponseModel", "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ReviewStatus", - "SendText", - "Source", "SpeechHistoryItemResponse", + "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", "SsoProviderResponseModel", "SsoProviderResponseModelProviderType", - "Status", "Subscription", "SubscriptionResponse", "SubscriptionResponseModelBillingPeriod", "SubscriptionResponseModelCharacterRefreshPeriod", + "SubscriptionResponseModelCurrency", "SubscriptionStatus", "TextToSpeechAsStreamRequest", "UsageCharactersResponseModel", @@ -178,10 +195,15 @@ "Voice", "VoiceGenerationParameterOptionResponse", "VoiceGenerationParameterResponse", + "VoicePreviewResponseModel", + "VoicePreviewsResponseModel", + "VoiceResponseModelCategory", "VoiceResponseModelSafetyControl", "VoiceSample", "VoiceSettings", + "VoiceSharingModerationCheckResponseModel", "VoiceSharingResponse", + "VoiceSharingResponseModelCategory", "VoiceSharingState", "VoiceVerificationResponse", ] diff --git a/src/elevenlabs/types/close_connection.py b/src/elevenlabs/types/add_chapter_response_model.py similarity index 76% rename from src/elevenlabs/types/close_connection.py rename to src/elevenlabs/types/add_chapter_response_model.py index 42a428f2..c6401365 100644 --- a/src/elevenlabs/types/close_connection.py +++ b/src/elevenlabs/types/add_chapter_response_model.py @@ -1,16 +1,14 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .chapter_response import ChapterResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -class CloseConnection(UncheckedBaseModel): - text: typing.Literal[""] = pydantic.Field(default="") - """ - End the stream with an empty string - """ +class AddChapterResponseModel(UncheckedBaseModel): + chapter: ChapterResponse if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/add_voice_ivc_response_model.py b/src/elevenlabs/types/add_voice_ivc_response_model.py new file mode 100644 index 00000000..0c85c00e --- /dev/null +++ b/src/elevenlabs/types/add_voice_ivc_response_model.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AddVoiceIvcResponseModel(UncheckedBaseModel): + voice_id: str + requires_verification: bool + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/audio_output.py b/src/elevenlabs/types/audio_output.py deleted file mode 100644 index ac0c7c13..00000000 --- a/src/elevenlabs/types/audio_output.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -import pydantic -import typing_extensions -from ..core.serialization import FieldMetadata -from .normalized_alignment import NormalizedAlignment -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class AudioOutput(UncheckedBaseModel): - audio: typing.Optional[str] = pydantic.Field(default=None) - """ - A generated partial audio chunk, encoded using the selected output_format, by default this - is MP3 encoded as a base64 string. - """ - - is_final: typing_extensions.Annotated[typing.Optional[bool], FieldMetadata(alias="isFinal")] = pydantic.Field( - default=None - ) - """ - Indicates if the generation is complete. If set to `True`, `audio` will be null. - """ - - normalized_alignment: typing_extensions.Annotated[ - typing.Optional[NormalizedAlignment], FieldMetadata(alias="normalizedAlignment") - ] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/breakdown_types.py b/src/elevenlabs/types/breakdown_types.py new file mode 100644 index 00000000..addda63e --- /dev/null +++ b/src/elevenlabs/types/breakdown_types.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BreakdownTypes = typing.Union[ + typing.Literal["none", "voice", "user", "api_keys", "all_api_keys", "product_type", "model"], typing.Any +] diff --git a/src/elevenlabs/types/category.py b/src/elevenlabs/types/category.py deleted file mode 100644 index eca957fd..00000000 --- a/src/elevenlabs/types/category.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -Category = typing.Union[typing.Literal["generated", "professional", "high_quality", "famous"], typing.Any] diff --git a/src/elevenlabs/types/chapter_response.py b/src/elevenlabs/types/chapter_response.py index 1d3b652b..7193b118 100644 --- a/src/elevenlabs/types/chapter_response.py +++ b/src/elevenlabs/types/chapter_response.py @@ -1,21 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import typing from .chapter_state import ChapterState from .chapter_statistics_response import ChapterStatisticsResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing import pydantic class ChapterResponse(UncheckedBaseModel): chapter_id: str name: str - last_conversion_date_unix: int - conversion_progress: float + last_conversion_date_unix: typing.Optional[int] = None + conversion_progress: typing.Optional[float] = None can_be_downloaded: bool state: ChapterState - statistics: ChapterStatisticsResponse + statistics: typing.Optional[ChapterStatisticsResponse] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/extended_subscription_response_model_currency.py b/src/elevenlabs/types/extended_subscription_response_model_currency.py new file mode 100644 index 00000000..3f566794 --- /dev/null +++ b/src/elevenlabs/types/extended_subscription_response_model_currency.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ExtendedSubscriptionResponseModelCurrency = typing.Union[typing.Literal["usd", "eur"], typing.Any] diff --git a/src/elevenlabs/types/fine_tuning_response.py b/src/elevenlabs/types/fine_tuning_response.py index 4bb159b8..398fb73c 100644 --- a/src/elevenlabs/types/fine_tuning_response.py +++ b/src/elevenlabs/types/fine_tuning_response.py @@ -22,6 +22,8 @@ class FineTuningResponse(UncheckedBaseModel): verification_attempts: typing.Optional[typing.List[VerificationAttemptResponse]] = None slice_ids: typing.Optional[typing.List[str]] = None manual_verification: typing.Optional[ManualVerificationResponse] = None + max_verification_attempts: typing.Optional[int] = None + next_max_verification_attempts_reset_unix_ms: typing.Optional[int] = None finetuning_state: typing.Optional[typing.Optional[typing.Any]] = None if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/generation_config.py b/src/elevenlabs/types/generation_config.py deleted file mode 100644 index 8d5491a5..00000000 --- a/src/elevenlabs/types/generation_config.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class GenerationConfig(UncheckedBaseModel): - chunk_length_schedule: typing.Optional[typing.List[float]] = pydantic.Field(default=None) - """ - This is an advanced setting that most users shouldn't need to use. It relates to our - generation schedule explained [here](https://elevenlabs.io/docs/api-reference/websockets#understanding-how-our-websockets-buffer-text). - - Determines the minimum amount of text that needs to be sent and present in our - buffer before audio starts being generated. This is to maximise the amount of context available to - the model to improve audio quality, whilst balancing latency of the returned audio chunks. - - The default value is: [120, 160, 250, 290]. - - This means that the first chunk of audio will not be generated until you send text that - totals at least 120 characters long. The next chunk of audio will only be generated once a - further 160 characters have been sent. The third audio chunk will be generated after the - next 250 characters. Then the fourth, and beyond, will be generated in sets of at least 290 characters. - - Customize this array to suit your needs. If you want to generate audio more frequently - to optimise latency, you can reduce the values in the array. Note that setting the values - too low may result in lower quality audio. Please test and adjust as needed. - - Each item should be in the range 50-500. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/initialize_connection.py b/src/elevenlabs/types/initialize_connection.py deleted file mode 100644 index efc1d88f..00000000 --- a/src/elevenlabs/types/initialize_connection.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -import pydantic -from .realtime_voice_settings import RealtimeVoiceSettings -from .generation_config import GenerationConfig -import typing_extensions -from ..core.serialization import FieldMetadata -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class InitializeConnection(UncheckedBaseModel): - text: typing.Literal[" "] = pydantic.Field(default=" ") - """ - The initial text that must be sent is a blank space. - """ - - voice_settings: typing.Optional[RealtimeVoiceSettings] = None - generation_config: typing.Optional[GenerationConfig] = pydantic.Field(default=None) - """ - This property should only be provided in the first message you send. - """ - - xi_api_key: typing_extensions.Annotated[str, FieldMetadata(alias="xi-api-key")] = pydantic.Field() - """ - Your ElevenLabs API key. This is a required parameter that should be provided in the first message you send. - You can find your API key in the [API Keys section](https://elevenlabs.io/docs/api-reference/websockets#api-keys). - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/library_voice_response.py b/src/elevenlabs/types/library_voice_response.py index bba76f22..84de7efb 100644 --- a/src/elevenlabs/types/library_voice_response.py +++ b/src/elevenlabs/types/library_voice_response.py @@ -1,6 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .library_voice_response_model_category import LibraryVoiceResponseModelCategory import typing_extensions from ..core.serialization import FieldMetadata import typing @@ -18,7 +19,7 @@ class LibraryVoiceResponse(UncheckedBaseModel): age: str descriptive: str use_case: str - category: str + category: LibraryVoiceResponseModelCategory language: str description: str preview_url: str diff --git a/src/elevenlabs/types/library_voice_response_model_category.py b/src/elevenlabs/types/library_voice_response_model_category.py new file mode 100644 index 00000000..7d3d40fe --- /dev/null +++ b/src/elevenlabs/types/library_voice_response_model_category.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LibraryVoiceResponseModelCategory = typing.Union[ + typing.Literal["generated", "cloned", "premade", "professional", "famous", "high_quality"], typing.Any +] diff --git a/src/elevenlabs/types/model.py b/src/elevenlabs/types/model.py index 11545b16..74fb3815 100644 --- a/src/elevenlabs/types/model.py +++ b/src/elevenlabs/types/model.py @@ -3,6 +3,8 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .language_response import LanguageResponse +from .model_rates_response_model import ModelRatesResponseModel +from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -23,6 +25,8 @@ class Model(UncheckedBaseModel): max_characters_request_subscribed_user: typing.Optional[int] = None maximum_text_length_per_request: typing.Optional[int] = None languages: typing.Optional[typing.List[LanguageResponse]] = None + model_rates: typing.Optional[ModelRatesResponseModel] = None + concurrency_group: typing.Optional[ModelResponseModelConcurrencyGroup] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/model_rates_response_model.py b/src/elevenlabs/types/model_rates_response_model.py new file mode 100644 index 00000000..a695d91b --- /dev/null +++ b/src/elevenlabs/types/model_rates_response_model.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ModelRatesResponseModel(UncheckedBaseModel): + character_cost_multiplier: float + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/model_response_model_concurrency_group.py b/src/elevenlabs/types/model_response_model_concurrency_group.py new file mode 100644 index 00000000..1f25a1e0 --- /dev/null +++ b/src/elevenlabs/types/model_response_model_concurrency_group.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ModelResponseModelConcurrencyGroup = typing.Union[typing.Literal["standard", "turbo"], typing.Any] diff --git a/src/elevenlabs/types/normalized_alignment.py b/src/elevenlabs/types/normalized_alignment.py deleted file mode 100644 index cac3e0c6..00000000 --- a/src/elevenlabs/types/normalized_alignment.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class NormalizedAlignment(UncheckedBaseModel): - """ - Alignment information for the generated audio given the input normalized text sequence. - """ - - char_start_times_ms: typing.Optional[typing.List[int]] = pydantic.Field(default=None) - """ - A list of starting times (in milliseconds) for each character in the normalized text as it - corresponds to the audio. For instance, the character 'H' starts at time 0 ms in the audio. - Note these times are relative to the returned chunk from the model, and not the - full audio response. - """ - - chars_durations_ms: typing.Optional[typing.List[int]] = pydantic.Field(default=None) - """ - A list of durations (in milliseconds) for each character in the normalized text as it - corresponds to the audio. For instance, the character 'H' lasts for 3 ms in the audio. - Note these times are relative to the returned chunk from the model, and not the - full audio response. - """ - - chars: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - A list of characters in the normalized text sequence. For instance, the first character is 'H'. - Note that this list may contain spaces, punctuation, and other special characters. - The length of this list should be the same as the lengths of `char_start_times_ms` and `chars_durations_ms`. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/project_extended_response_model.py b/src/elevenlabs/types/project_extended_response_model.py index 847f5d23..87b9dcab 100644 --- a/src/elevenlabs/types/project_extended_response_model.py +++ b/src/elevenlabs/types/project_extended_response_model.py @@ -1,9 +1,13 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -from .project_state import ProjectState import typing +from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience +from .project_state import ProjectState +from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel +from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset from .chapter_response import ChapterResponse +from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -15,10 +19,26 @@ class ProjectExtendedResponseModel(UncheckedBaseModel): default_title_voice_id: str default_paragraph_voice_id: str default_model_id: str - last_conversion_date_unix: int + last_conversion_date_unix: typing.Optional[int] = None can_be_downloaded: bool + title: typing.Optional[str] = None + author: typing.Optional[str] = None + description: typing.Optional[str] = None + genres: typing.Optional[typing.List[str]] = None + cover_image_url: typing.Optional[str] = None + target_audience: typing.Optional[ProjectExtendedResponseModelTargetAudience] = None + language: typing.Optional[str] = None + content_type: typing.Optional[str] = None + original_publication_date: typing.Optional[str] = None + mature_content: typing.Optional[bool] = None + isbn_number: typing.Optional[str] = None + volume_normalization: bool state: ProjectState + access_level: ProjectExtendedResponseModelAccessLevel + quality_preset: ProjectExtendedResponseModelQualityPreset chapters: typing.List[ChapterResponse] + pronunciation_dictionary_versions: typing.List[PronunciationDictionaryVersionResponseModel] + experimental: typing.Dict[str, typing.Optional[typing.Any]] if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_extended_response_model_access_level.py b/src/elevenlabs/types/project_extended_response_model_access_level.py new file mode 100644 index 00000000..53427425 --- /dev/null +++ b/src/elevenlabs/types/project_extended_response_model_access_level.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectExtendedResponseModelAccessLevel = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any] diff --git a/src/elevenlabs/types/project_extended_response_model_quality_preset.py b/src/elevenlabs/types/project_extended_response_model_quality_preset.py new file mode 100644 index 00000000..8b10a5f9 --- /dev/null +++ b/src/elevenlabs/types/project_extended_response_model_quality_preset.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectExtendedResponseModelQualityPreset = typing.Union[ + typing.Literal["standard", "high", "highest", "ultra", "ultra_lossless"], typing.Any +] diff --git a/src/elevenlabs/types/project_extended_response_model_target_audience.py b/src/elevenlabs/types/project_extended_response_model_target_audience.py new file mode 100644 index 00000000..5fa6dc33 --- /dev/null +++ b/src/elevenlabs/types/project_extended_response_model_target_audience.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectExtendedResponseModelTargetAudience = typing.Union[ + typing.Literal["children", "young adult", "adult", "all ages"], typing.Any +] diff --git a/src/elevenlabs/types/project_response.py b/src/elevenlabs/types/project_response.py index 735032d7..5da3d836 100644 --- a/src/elevenlabs/types/project_response.py +++ b/src/elevenlabs/types/project_response.py @@ -1,9 +1,11 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .project_response_model_target_audience import ProjectResponseModelTargetAudience from .project_state import ProjectState +from .project_response_model_access_level import ProjectResponseModelAccessLevel from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing import pydantic @@ -14,13 +16,22 @@ class ProjectResponse(UncheckedBaseModel): default_title_voice_id: str default_paragraph_voice_id: str default_model_id: str - last_conversion_date_unix: int + last_conversion_date_unix: typing.Optional[int] = None can_be_downloaded: bool - title: str - author: str - isbn_number: str + title: typing.Optional[str] = None + author: typing.Optional[str] = None + description: typing.Optional[str] = None + genres: typing.Optional[typing.List[str]] = None + cover_image_url: typing.Optional[str] = None + target_audience: typing.Optional[ProjectResponseModelTargetAudience] = None + language: typing.Optional[str] = None + content_type: typing.Optional[str] = None + original_publication_date: typing.Optional[str] = None + mature_content: typing.Optional[bool] = None + isbn_number: typing.Optional[str] = None volume_normalization: bool state: ProjectState + access_level: ProjectResponseModelAccessLevel if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_response_model_access_level.py b/src/elevenlabs/types/project_response_model_access_level.py new file mode 100644 index 00000000..b5d62265 --- /dev/null +++ b/src/elevenlabs/types/project_response_model_access_level.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectResponseModelAccessLevel = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any] diff --git a/src/elevenlabs/types/project_response_model_target_audience.py b/src/elevenlabs/types/project_response_model_target_audience.py new file mode 100644 index 00000000..f235fc35 --- /dev/null +++ b/src/elevenlabs/types/project_response_model_target_audience.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectResponseModelTargetAudience = typing.Union[ + typing.Literal["children", "young adult", "adult", "all ages"], typing.Any +] diff --git a/src/elevenlabs/types/project_snapshot_upload_response_model.py b/src/elevenlabs/types/project_snapshot_upload_response_model.py index 58493d00..c19ca85c 100644 --- a/src/elevenlabs/types/project_snapshot_upload_response_model.py +++ b/src/elevenlabs/types/project_snapshot_upload_response_model.py @@ -1,14 +1,14 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -from .status import Status +from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic class ProjectSnapshotUploadResponseModel(UncheckedBaseModel): - status: Status + status: ProjectSnapshotUploadResponseModelStatus acx_volume_normalization: typing.Optional[bool] = None if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/project_snapshot_upload_response_model_status.py b/src/elevenlabs/types/project_snapshot_upload_response_model_status.py new file mode 100644 index 00000000..884059e0 --- /dev/null +++ b/src/elevenlabs/types/project_snapshot_upload_response_model_status.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectSnapshotUploadResponseModelStatus = typing.Union[ + typing.Literal["success", "in_queue", "pending", "failed"], typing.Any +] diff --git a/src/elevenlabs/types/pronunciation_dictionary_version_response_model.py b/src/elevenlabs/types/pronunciation_dictionary_version_response_model.py new file mode 100644 index 00000000..21150c87 --- /dev/null +++ b/src/elevenlabs/types/pronunciation_dictionary_version_response_model.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class PronunciationDictionaryVersionResponseModel(UncheckedBaseModel): + version_id: str + pronunciation_dictionary_id: str + dictionary_name: str + version_name: str + created_by: str + creation_time_unix: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/realtime_voice_settings.py b/src/elevenlabs/types/realtime_voice_settings.py deleted file mode 100644 index 983efc51..00000000 --- a/src/elevenlabs/types/realtime_voice_settings.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import pydantic -import typing -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class RealtimeVoiceSettings(UncheckedBaseModel): - stability: float = pydantic.Field() - """ - Defines the stability for voice settings. - """ - - similarity_boost: float = pydantic.Field() - """ - Defines the similarity boost for voice settings. - """ - - style: typing.Optional[float] = pydantic.Field(default=None) - """ - Defines the style for voice settings. This parameter is available on V2+ models. - """ - - use_speaker_boost: typing.Optional[bool] = pydantic.Field(default=None) - """ - Defines the use speaker boost for voice settings. This parameter is available on V2+ models. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/send_text.py b/src/elevenlabs/types/send_text.py deleted file mode 100644 index e1a391e5..00000000 --- a/src/elevenlabs/types/send_text.py +++ /dev/null @@ -1,34 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class SendText(UncheckedBaseModel): - text: str - try_trigger_generation: typing.Optional[bool] = pydantic.Field(default=None) - """ - This is an advanced setting that most users shouldn't need to use. It relates to our generation schedule - explained [here](#understanding-how-our-websockets-buffer-text). - - Use this to attempt to immediately trigger the generation of audio, overriding the `chunk_length_schedule`. - Unlike flush, `try_trigger_generation` will only generate audio if our - buffer contains more than a minimum - threshold of characters, this is to ensure a higher quality response from our model. - - Note that overriding the chunk schedule to generate small amounts of - text may result in lower quality audio, therefore, only use this parameter if you - really need text to be processed immediately. We generally recommend keeping the default value of - `false` and adjusting the `chunk_length_schedule` in the `generation_config` instead. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/source.py b/src/elevenlabs/types/source.py deleted file mode 100644 index d5e1b55e..00000000 --- a/src/elevenlabs/types/source.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -Source = typing.Union[typing.Literal["TTS", "STS"], typing.Any] diff --git a/src/elevenlabs/types/speech_history_item_response.py b/src/elevenlabs/types/speech_history_item_response.py index 21addda4..783c55d0 100644 --- a/src/elevenlabs/types/speech_history_item_response.py +++ b/src/elevenlabs/types/speech_history_item_response.py @@ -4,7 +4,7 @@ import typing from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory from .feedback_item import FeedbackItem -from .source import Source +from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource from .history_alignments_response_model import HistoryAlignmentsResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -26,7 +26,7 @@ class SpeechHistoryItemResponse(UncheckedBaseModel): settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None feedback: typing.Optional[FeedbackItem] = None share_link_id: typing.Optional[str] = None - source: typing.Optional[Source] = None + source: typing.Optional[SpeechHistoryItemResponseModelSource] = None alignments: typing.Optional[HistoryAlignmentsResponseModel] = None if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/speech_history_item_response_model_source.py b/src/elevenlabs/types/speech_history_item_response_model_source.py new file mode 100644 index 00000000..7ed84840 --- /dev/null +++ b/src/elevenlabs/types/speech_history_item_response_model_source.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechHistoryItemResponseModelSource = typing.Union[typing.Literal["TTS", "STS"], typing.Any] diff --git a/src/elevenlabs/types/status.py b/src/elevenlabs/types/status.py deleted file mode 100644 index 7023643d..00000000 --- a/src/elevenlabs/types/status.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -Status = typing.Union[typing.Literal["success", "in_queue", "pending", "failed"], typing.Any] diff --git a/src/elevenlabs/types/subscription.py b/src/elevenlabs/types/subscription.py index a89f632b..b480e2c4 100644 --- a/src/elevenlabs/types/subscription.py +++ b/src/elevenlabs/types/subscription.py @@ -2,7 +2,7 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing -from .currency import Currency +from .extended_subscription_response_model_currency import ExtendedSubscriptionResponseModelCurrency from .subscription_status import SubscriptionStatus from .extended_subscription_response_model_billing_period import ExtendedSubscriptionResponseModelBillingPeriod from .extended_subscription_response_model_character_refresh_period import ( @@ -27,7 +27,7 @@ class Subscription(UncheckedBaseModel): can_extend_voice_limit: bool can_use_instant_voice_cloning: bool can_use_professional_voice_cloning: bool - currency: typing.Optional[Currency] = None + currency: typing.Optional[ExtendedSubscriptionResponseModelCurrency] = None status: typing.Optional[SubscriptionStatus] = None billing_period: typing.Optional[ExtendedSubscriptionResponseModelBillingPeriod] = None character_refresh_period: typing.Optional[ExtendedSubscriptionResponseModelCharacterRefreshPeriod] = None diff --git a/src/elevenlabs/types/subscription_response.py b/src/elevenlabs/types/subscription_response.py index 0513a818..75bc496f 100644 --- a/src/elevenlabs/types/subscription_response.py +++ b/src/elevenlabs/types/subscription_response.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -from .currency import Currency +from .subscription_response_model_currency import SubscriptionResponseModelCurrency from .subscription_status import SubscriptionStatus from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod from .subscription_response_model_character_refresh_period import SubscriptionResponseModelCharacterRefreshPeriod @@ -24,7 +24,7 @@ class SubscriptionResponse(UncheckedBaseModel): can_extend_voice_limit: bool can_use_instant_voice_cloning: bool can_use_professional_voice_cloning: bool - currency: Currency + currency: SubscriptionResponseModelCurrency status: SubscriptionStatus billing_period: SubscriptionResponseModelBillingPeriod character_refresh_period: SubscriptionResponseModelCharacterRefreshPeriod diff --git a/src/elevenlabs/types/subscription_response_model_currency.py b/src/elevenlabs/types/subscription_response_model_currency.py new file mode 100644 index 00000000..9cba2c8d --- /dev/null +++ b/src/elevenlabs/types/subscription_response_model_currency.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SubscriptionResponseModelCurrency = typing.Union[typing.Literal["usd", "eur"], typing.Any] diff --git a/src/elevenlabs/types/voice.py b/src/elevenlabs/types/voice.py index f3d1e570..ea2a4222 100644 --- a/src/elevenlabs/types/voice.py +++ b/src/elevenlabs/types/voice.py @@ -3,6 +3,7 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .voice_sample import VoiceSample +from .voice_response_model_category import VoiceResponseModelCategory from .fine_tuning_response import FineTuningResponse from .voice_settings import VoiceSettings from .voice_sharing_response import VoiceSharingResponse @@ -16,7 +17,7 @@ class Voice(UncheckedBaseModel): voice_id: str name: typing.Optional[str] = None samples: typing.Optional[typing.List[VoiceSample]] = None - category: typing.Optional[str] = None + category: typing.Optional[VoiceResponseModelCategory] = None fine_tuning: typing.Optional[FineTuningResponse] = None labels: typing.Optional[typing.Dict[str, str]] = None description: typing.Optional[str] = None @@ -27,9 +28,10 @@ class Voice(UncheckedBaseModel): high_quality_base_model_ids: typing.Optional[typing.List[str]] = None safety_control: typing.Optional[VoiceResponseModelSafetyControl] = None voice_verification: typing.Optional[VoiceVerificationResponse] = None - owner_id: typing.Optional[str] = None permission_on_resource: typing.Optional[str] = None + is_owner: typing.Optional[bool] = None is_legacy: typing.Optional[bool] = None + is_mixed: typing.Optional[bool] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_preview_response_model.py b/src/elevenlabs/types/voice_preview_response_model.py new file mode 100644 index 00000000..c4384182 --- /dev/null +++ b/src/elevenlabs/types/voice_preview_response_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class VoicePreviewResponseModel(UncheckedBaseModel): + audio_base_64: str + generated_voice_id: str + media_type: typing.Optional[typing.Literal["audio/mpeg"]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/voice_previews_response_model.py b/src/elevenlabs/types/voice_previews_response_model.py new file mode 100644 index 00000000..721505b4 --- /dev/null +++ b/src/elevenlabs/types/voice_previews_response_model.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .voice_preview_response_model import VoicePreviewResponseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class VoicePreviewsResponseModel(UncheckedBaseModel): + previews: typing.List[VoicePreviewResponseModel] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/voice_response_model_category.py b/src/elevenlabs/types/voice_response_model_category.py new file mode 100644 index 00000000..2743db78 --- /dev/null +++ b/src/elevenlabs/types/voice_response_model_category.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VoiceResponseModelCategory = typing.Union[ + typing.Literal["generated", "cloned", "premade", "professional", "famous", "high_quality"], typing.Any +] diff --git a/src/elevenlabs/types/voice_response_model_safety_control.py b/src/elevenlabs/types/voice_response_model_safety_control.py index 29bd845a..8c887cd5 100644 --- a/src/elevenlabs/types/voice_response_model_safety_control.py +++ b/src/elevenlabs/types/voice_response_model_safety_control.py @@ -3,5 +3,6 @@ import typing VoiceResponseModelSafetyControl = typing.Union[ - typing.Literal["NONE", "BAN", "CAPTCHA", "CAPTCHA_AND_MODERATION"], typing.Any + typing.Literal["NONE", "BAN", "CAPTCHA", "CAPTCHA_AND_MODERATION", "ENTERPRISE_BAN", "ENTERPRISE_CAPTCHA"], + typing.Any, ] diff --git a/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py b/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py new file mode 100644 index 00000000..cdd6875b --- /dev/null +++ b/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class VoiceSharingModerationCheckResponseModel(UncheckedBaseModel): + date_checked_unix: typing.Optional[int] = None + name_value: typing.Optional[str] = None + name_check: typing.Optional[bool] = None + description_value: typing.Optional[str] = None + description_check: typing.Optional[bool] = None + sample_ids: typing.Optional[typing.List[str]] = None + sample_checks: typing.Optional[typing.List[float]] = None + captcha_ids: typing.Optional[typing.List[str]] = None + captcha_checks: typing.Optional[typing.List[float]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/voice_sharing_response.py b/src/elevenlabs/types/voice_sharing_response.py index beb346c8..365a560d 100644 --- a/src/elevenlabs/types/voice_sharing_response.py +++ b/src/elevenlabs/types/voice_sharing_response.py @@ -3,8 +3,9 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .voice_sharing_state import VoiceSharingState -from .category import Category +from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory from .review_status import ReviewStatus +from .voice_sharing_moderation_check_response_model import VoiceSharingModerationCheckResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -24,7 +25,7 @@ class VoiceSharingResponse(UncheckedBaseModel): disable_at_unix: typing.Optional[int] = None voice_mixing_allowed: typing.Optional[bool] = None featured: typing.Optional[bool] = None - category: typing.Optional[Category] = None + category: typing.Optional[VoiceSharingResponseModelCategory] = None reader_app_enabled: typing.Optional[bool] = None image_url: typing.Optional[str] = None ban_reason: typing.Optional[str] = None @@ -40,6 +41,7 @@ class VoiceSharingResponse(UncheckedBaseModel): twitter_username: typing.Optional[str] = None youtube_username: typing.Optional[str] = None tiktok_username: typing.Optional[str] = None + moderation_check: typing.Optional[VoiceSharingModerationCheckResponseModel] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_sharing_response_model_category.py b/src/elevenlabs/types/voice_sharing_response_model_category.py new file mode 100644 index 00000000..8439ad79 --- /dev/null +++ b/src/elevenlabs/types/voice_sharing_response_model_category.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VoiceSharingResponseModelCategory = typing.Union[ + typing.Literal["generated", "professional", "high_quality", "famous"], typing.Any +] diff --git a/src/elevenlabs/usage/__init__.py b/src/elevenlabs/usage/__init__.py index 27d1e497..f3ea2659 100644 --- a/src/elevenlabs/usage/__init__.py +++ b/src/elevenlabs/usage/__init__.py @@ -1,5 +1,2 @@ # This file was auto-generated by Fern from our API Definition. -from .types import UsageGetCharactersUsageMetricsRequestBreakdownType - -__all__ = ["UsageGetCharactersUsageMetricsRequestBreakdownType"] diff --git a/src/elevenlabs/usage/client.py b/src/elevenlabs/usage/client.py index a6c75588..0d4c04c1 100644 --- a/src/elevenlabs/usage/client.py +++ b/src/elevenlabs/usage/client.py @@ -2,9 +2,7 @@ from ..core.client_wrapper import SyncClientWrapper import typing -from .types.usage_get_characters_usage_metrics_request_breakdown_type import ( - UsageGetCharactersUsageMetricsRequestBreakdownType, -) +from ..types.breakdown_types import BreakdownTypes from ..core.request_options import RequestOptions from ..types.usage_characters_response_model import UsageCharactersResponseModel from ..core.unchecked_base_model import construct_type @@ -25,11 +23,11 @@ def get_characters_usage_metrics( start_unix: int, end_unix: int, include_workspace_metrics: typing.Optional[bool] = None, - breakdown_type: typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType] = None, + breakdown_type: typing.Optional[BreakdownTypes] = None, request_options: typing.Optional[RequestOptions] = None, ) -> UsageCharactersResponseModel: """ - Returns the characters usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis. + Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis. Parameters ---------- @@ -42,7 +40,7 @@ def get_characters_usage_metrics( include_workspace_metrics : typing.Optional[bool] Whether or not to include the statistics of the entire workspace. - breakdown_type : typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType] + breakdown_type : typing.Optional[BreakdownTypes] How to break down the information. Cannot be "user" if include_workspace_metrics is False. request_options : typing.Optional[RequestOptions] @@ -111,11 +109,11 @@ async def get_characters_usage_metrics( start_unix: int, end_unix: int, include_workspace_metrics: typing.Optional[bool] = None, - breakdown_type: typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType] = None, + breakdown_type: typing.Optional[BreakdownTypes] = None, request_options: typing.Optional[RequestOptions] = None, ) -> UsageCharactersResponseModel: """ - Returns the characters usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis. + Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis. Parameters ---------- @@ -128,7 +126,7 @@ async def get_characters_usage_metrics( include_workspace_metrics : typing.Optional[bool] Whether or not to include the statistics of the entire workspace. - breakdown_type : typing.Optional[UsageGetCharactersUsageMetricsRequestBreakdownType] + breakdown_type : typing.Optional[BreakdownTypes] How to break down the information. Cannot be "user" if include_workspace_metrics is False. request_options : typing.Optional[RequestOptions] diff --git a/src/elevenlabs/usage/types/__init__.py b/src/elevenlabs/usage/types/__init__.py deleted file mode 100644 index 93750b4c..00000000 --- a/src/elevenlabs/usage/types/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .usage_get_characters_usage_metrics_request_breakdown_type import ( - UsageGetCharactersUsageMetricsRequestBreakdownType, -) - -__all__ = ["UsageGetCharactersUsageMetricsRequestBreakdownType"] diff --git a/src/elevenlabs/usage/types/usage_get_characters_usage_metrics_request_breakdown_type.py b/src/elevenlabs/usage/types/usage_get_characters_usage_metrics_request_breakdown_type.py deleted file mode 100644 index 5275386a..00000000 --- a/src/elevenlabs/usage/types/usage_get_characters_usage_metrics_request_breakdown_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -UsageGetCharactersUsageMetricsRequestBreakdownType = typing.Union[ - typing.Literal["none", "voice", "user", "api_keys"], typing.Any -] diff --git a/src/elevenlabs/voice_generation/client.py b/src/elevenlabs/voice_generation/client.py index d02804a4..d79d9664 100644 --- a/src/elevenlabs/voice_generation/client.py +++ b/src/elevenlabs/voice_generation/client.py @@ -97,7 +97,7 @@ def generate( Text to generate, text length has to be between 100 and 1000. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -134,7 +134,8 @@ def generate( ) as _response: try: if 200 <= _response.status_code < 300: - for _chunk in _response.iter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): yield _chunk return _response.read() @@ -159,6 +160,7 @@ def create_a_previously_generated_voice( voice_name: str, voice_description: str, generated_voice_id: str, + played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT, labels: typing.Optional[typing.Dict[str, str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> Voice: @@ -176,6 +178,9 @@ def create_a_previously_generated_voice( generated_voice_id : str The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]] + List of voice ids that the user has played but not selected. Used for RLHF. + labels : typing.Optional[typing.Dict[str, str]] Optional, metadata to add to the created voice. Defaults to None. @@ -207,6 +212,7 @@ def create_a_previously_generated_voice( "voice_name": voice_name, "voice_description": voice_description, "generated_voice_id": generated_voice_id, + "played_not_selected_voice_ids": played_not_selected_voice_ids, "labels": labels, }, request_options=request_options, @@ -324,7 +330,7 @@ async def generate( Text to generate, text length has to be between 100 and 1000. request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. Yields ------ @@ -369,7 +375,8 @@ async def main() -> None: ) as _response: try: if 200 <= _response.status_code < 300: - async for _chunk in _response.aiter_bytes(): + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): yield _chunk return await _response.aread() @@ -394,6 +401,7 @@ async def create_a_previously_generated_voice( voice_name: str, voice_description: str, generated_voice_id: str, + played_not_selected_voice_ids: typing.Optional[typing.Sequence[str]] = OMIT, labels: typing.Optional[typing.Dict[str, str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> Voice: @@ -411,6 +419,9 @@ async def create_a_previously_generated_voice( generated_voice_id : str The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]] + List of voice ids that the user has played but not selected. Used for RLHF. + labels : typing.Optional[typing.Dict[str, str]] Optional, metadata to add to the created voice. Defaults to None. @@ -450,6 +461,7 @@ async def main() -> None: "voice_name": voice_name, "voice_description": voice_description, "generated_voice_id": generated_voice_id, + "played_not_selected_voice_ids": played_not_selected_voice_ids, "labels": labels, }, request_options=request_options, diff --git a/src/elevenlabs/voices/client.py b/src/elevenlabs/voices/client.py index 40f3f645..e0e5a7e0 100644 --- a/src/elevenlabs/voices/client.py +++ b/src/elevenlabs/voices/client.py @@ -14,6 +14,7 @@ from ..types.voice import Voice from ..core.serialization import convert_and_respect_annotation_metadata from .. import core +from ..types.add_voice_ivc_response_model import AddVoiceIvcResponseModel from ..types.add_voice_response_model import AddVoiceResponseModel from ..types.get_library_voices_response import GetLibraryVoicesResponse from ..types.profile_page_response_model import ProfilePageResponseModel @@ -387,10 +388,11 @@ def add( *, name: str, files: typing.List[core.File], + remove_background_noise: typing.Optional[bool] = OMIT, description: typing.Optional[str] = OMIT, labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> AddVoiceResponseModel: + ) -> AddVoiceIvcResponseModel: """ Add a new voice to your collection of voices in VoiceLab. @@ -402,6 +404,9 @@ def add( files : typing.List[core.File] See core.File for more documentation + remove_background_noise : typing.Optional[bool] + If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + description : typing.Optional[str] How would you describe the voice? @@ -413,7 +418,7 @@ def add( Returns ------- - AddVoiceResponseModel + AddVoiceIvcResponseModel Successful Response Examples @@ -432,6 +437,7 @@ def add( method="POST", data={ "name": name, + "remove_background_noise": remove_background_noise, "description": description, "labels": labels, }, @@ -444,9 +450,9 @@ def add( try: if 200 <= _response.status_code < 300: return typing.cast( - AddVoiceResponseModel, + AddVoiceIvcResponseModel, construct_type( - type_=AddVoiceResponseModel, # type: ignore + type_=AddVoiceIvcResponseModel, # type: ignore object_=_response.json(), ), ) @@ -471,6 +477,7 @@ def edit( *, name: str, files: typing.Optional[typing.List[core.File]] = OMIT, + remove_background_noise: typing.Optional[bool] = OMIT, description: typing.Optional[str] = OMIT, labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -489,6 +496,9 @@ def edit( files : typing.Optional[typing.List[core.File]] See core.File for more documentation + remove_background_noise : typing.Optional[bool] + If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + description : typing.Optional[str] How would you describe the voice? @@ -520,6 +530,7 @@ def edit( method="POST", data={ "name": name, + "remove_background_noise": remove_background_noise, "description": description, "labels": labels, }, @@ -559,7 +570,6 @@ def add_sharing_voice( voice_id: str, *, new_name: str, - xi_app_check_token: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None, ) -> AddVoiceResponseModel: """ @@ -576,9 +586,6 @@ def add_sharing_voice( new_name : str The name that identifies this voice. This will be displayed in the dropdown of the website. - xi_app_check_token : typing.Optional[str] - Your app check token. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -606,9 +613,6 @@ def add_sharing_voice( json={ "new_name": new_name, }, - headers={ - "xi-app-check-token": str(xi_app_check_token) if xi_app_check_token is not None else None, - }, request_options=request_options, omit=OMIT, ) @@ -1317,10 +1321,11 @@ async def add( *, name: str, files: typing.List[core.File], + remove_background_noise: typing.Optional[bool] = OMIT, description: typing.Optional[str] = OMIT, labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> AddVoiceResponseModel: + ) -> AddVoiceIvcResponseModel: """ Add a new voice to your collection of voices in VoiceLab. @@ -1332,6 +1337,9 @@ async def add( files : typing.List[core.File] See core.File for more documentation + remove_background_noise : typing.Optional[bool] + If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + description : typing.Optional[str] How would you describe the voice? @@ -1343,7 +1351,7 @@ async def add( Returns ------- - AddVoiceResponseModel + AddVoiceIvcResponseModel Successful Response Examples @@ -1370,6 +1378,7 @@ async def main() -> None: method="POST", data={ "name": name, + "remove_background_noise": remove_background_noise, "description": description, "labels": labels, }, @@ -1382,9 +1391,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - AddVoiceResponseModel, + AddVoiceIvcResponseModel, construct_type( - type_=AddVoiceResponseModel, # type: ignore + type_=AddVoiceIvcResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1409,6 +1418,7 @@ async def edit( *, name: str, files: typing.Optional[typing.List[core.File]] = OMIT, + remove_background_noise: typing.Optional[bool] = OMIT, description: typing.Optional[str] = OMIT, labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -1427,6 +1437,9 @@ async def edit( files : typing.Optional[typing.List[core.File]] See core.File for more documentation + remove_background_noise : typing.Optional[bool] + If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + description : typing.Optional[str] How would you describe the voice? @@ -1466,6 +1479,7 @@ async def main() -> None: method="POST", data={ "name": name, + "remove_background_noise": remove_background_noise, "description": description, "labels": labels, }, @@ -1505,7 +1519,6 @@ async def add_sharing_voice( voice_id: str, *, new_name: str, - xi_app_check_token: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None, ) -> AddVoiceResponseModel: """ @@ -1522,9 +1535,6 @@ async def add_sharing_voice( new_name : str The name that identifies this voice. This will be displayed in the dropdown of the website. - xi_app_check_token : typing.Optional[str] - Your app check token. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1560,9 +1570,6 @@ async def main() -> None: json={ "new_name": new_name, }, - headers={ - "xi-app-check-token": str(xi_app_check_token) if xi_app_check_token is not None else None, - }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/workspace/client.py b/src/elevenlabs/workspace/client.py index ff9411cc..b56b77b2 100644 --- a/src/elevenlabs/workspace/client.py +++ b/src/elevenlabs/workspace/client.py @@ -3,6 +3,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper from ..core.request_options import RequestOptions +from ..types.sso_provider_response_model import SsoProviderResponseModel from ..core.unchecked_base_model import construct_type from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError @@ -21,6 +22,65 @@ class WorkspaceClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper + def get_sso_provider_admin( + self, *, workspace_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> SsoProviderResponseModel: + """ + Parameters + ---------- + workspace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SsoProviderResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.workspace.get_sso_provider_admin( + workspace_id="workspace_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "admin/n8enylacgd/sso-provider", + method="GET", + params={ + "workspace_id": workspace_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SsoProviderResponseModel, + construct_type( + type_=SsoProviderResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def invite_user( self, *, email: str, request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: @@ -228,6 +288,73 @@ class AsyncWorkspaceClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper + async def get_sso_provider_admin( + self, *, workspace_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> SsoProviderResponseModel: + """ + Parameters + ---------- + workspace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SsoProviderResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.workspace.get_sso_provider_admin( + workspace_id="workspace_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "admin/n8enylacgd/sso-provider", + method="GET", + params={ + "workspace_id": workspace_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SsoProviderResponseModel, + construct_type( + type_=SsoProviderResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def invite_user( self, *, email: str, request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: From 42998d76dc5d1b56c2d3e1f1d9f3a820c2ebb1b7 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 10:01:13 +0100 Subject: [PATCH 14/45] SDK regeneration (#398) Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 60127203..1ea79694 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.12.0-beta0" +version = "1.12.0" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index ae429e14..5663aec2 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.12.0-beta0", + "X-Fern-SDK-Version": "1.12.0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From 7fee6d88c722be84cc61ef8a3f94c4f94b35d771 Mon Sep 17 00:00:00 2001 From: Laco Date: Wed, 30 Oct 2024 18:36:13 +0100 Subject: [PATCH 15/45] Restore conversational_ai client and fix .fernignore (#399) This was removed by automatic SDK regen since the path in fernignore file was not updated when renaming the package :/ --- .fernignore | 6 +- poetry.lock | 41 +++- pyproject.toml | 9 +- .../conversational_ai/conversation.py | 215 ++++++++++++++++++ .../default_audio_interface.py | 83 +++++++ src/elevenlabs/core/client_wrapper.py | 2 +- 6 files changed, 351 insertions(+), 5 deletions(-) create mode 100644 src/elevenlabs/conversational_ai/conversation.py create mode 100644 src/elevenlabs/conversational_ai/default_audio_interface.py diff --git a/.fernignore b/.fernignore index 1a50c353..4669f044 100644 --- a/.fernignore +++ b/.fernignore @@ -1,10 +1,14 @@ # Specify files that shouldn't be modified by Fern src/elevenlabs/client.py -src/elevenlabs/conversation.py +src/elevenlabs/conversational_ai/conversation.py +src/elevenlabs/conversational_ai/default_audio_interface.py src/elevenlabs/play.py src/elevenlabs/realtime_tts.py +pyproject.toml +poetry.lock + .github/workflows/ci.yml .github/workflows/tests.yml diff --git a/poetry.lock b/poetry.lock index 46e79034..1e679e68 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -351,6 +351,29 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pyaudio" +version = "0.2.14" +description = "Cross-platform audio I/O with PortAudio" +optional = true +python-versions = "*" +files = [ + {file = "PyAudio-0.2.14-cp310-cp310-win32.whl", hash = "sha256:126065b5e82a1c03ba16e7c0404d8f54e17368836e7d2d92427358ad44fefe61"}, + {file = "PyAudio-0.2.14-cp310-cp310-win_amd64.whl", hash = "sha256:2a166fc88d435a2779810dd2678354adc33499e9d4d7f937f28b20cc55893e83"}, + {file = "PyAudio-0.2.14-cp311-cp311-win32.whl", hash = "sha256:506b32a595f8693811682ab4b127602d404df7dfc453b499c91a80d0f7bad289"}, + {file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"}, + {file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"}, + {file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"}, + {file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"}, + {file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"}, + {file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"}, + {file = "PyAudio-0.2.14-cp39-cp39-win_amd64.whl", hash = "sha256:009f357ee5aa6bc8eb19d69921cd30e98c42cddd34210615d592a71d09c4bd57"}, + {file = "PyAudio-0.2.14.tar.gz", hash = "sha256:78dfff3879b4994d1f4fc6485646a57755c6ee3c19647a491f790a0895bd2f87"}, +] + +[package.extras] +test = ["numpy"] + [[package]] name = "pydantic" version = "2.9.2" @@ -610,6 +633,17 @@ files = [ {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] +[[package]] +name = "types-pyaudio" +version = "0.2.16.20240516" +description = "Typing stubs for pyaudio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-pyaudio-0.2.16.20240516.tar.gz", hash = "sha256:f1c419ccc78b00d26c6c1ae4fcb17f7e4f08af2c2b9b73b12fcbc4a4ffa3a2c7"}, + {file = "types_pyaudio-0.2.16.20240516-py3-none-any.whl", hash = "sha256:40063f13ae15a422cbd4a2a783653eb3e1091bdd23fc7ab8ca3abc21ad0d13f8"}, +] + [[package]] name = "types-python-dateutil" version = "2.9.0.20241003" @@ -744,7 +778,10 @@ files = [ {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] +[extras] +pyaudio = ["pyaudio"] + [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "a53420244251981fe047bbb97d6005fffb6b63447718cc640562750fffcc8c75" +content-hash = "af57dd0aacaa752d61d29db9f958f2d8d0950d51ab868c925a2a973689de5ff7" diff --git a/pyproject.toml b/pyproject.toml index 1ea79694..b45f567f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.12.0" +version = "1.12.1" description = "" readme = "README.md" authors = [] @@ -40,14 +40,21 @@ requests = ">=2.20" typing_extensions = ">= 4.0.0" websockets = ">=11.0" +# Optional extras. +pyaudio = { version = ">=0.2.14", optional = true } + [tool.poetry.dev-dependencies] mypy = "1.0.1" pytest = "^7.4.0" pytest-asyncio = "^0.23.5" python-dateutil = "^2.9.0" +types-pyaudio = "^0.2.16.20240516" types-python-dateutil = "^2.9.0.20240316" ruff = "^0.5.6" +[tool.poetry.extras] +pyaudio = ["pyaudio"] + [tool.pytest.ini_options] testpaths = [ "tests" ] asyncio_mode = "auto" diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py new file mode 100644 index 00000000..13533ab8 --- /dev/null +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -0,0 +1,215 @@ +from abc import ABC, abstractmethod +import base64 +import json +import threading +from typing import Callable, Optional + +from websockets.sync.client import connect + +from ..base_client import BaseElevenLabs + + +class AudioInterface(ABC): + """AudioInterface provides an abstraction for handling audio input and output.""" + + @abstractmethod + def start(self, input_callback: Callable[[bytes], None]): + """Starts the audio interface. + + Called one time before the conversation starts. + The `input_callback` should be called regularly with input audio chunks from + the user. The audio should be in 16-bit PCM mono format at 16kHz. Recommended + chunk size is 4000 samples (250 milliseconds). + """ + pass + + @abstractmethod + def stop(self): + """Stops the audio interface. + + Called one time after the conversation ends. Should clean up any resources + used by the audio interface and stop any audio streams. Do not call the + `input_callback` from `start` after this method is called. + """ + pass + + @abstractmethod + def output(self, audio: bytes): + """Output audio to the user. + + The `audio` input is in 16-bit PCM mono format at 16kHz. Implementations can + choose to do additional buffering. This method should return quickly and not + block the calling thread. + """ + pass + + @abstractmethod + def interrupt(self): + """Interruption signal to stop any audio output. + + User has interrupted the agent and all previosly buffered audio output should + be stopped. + """ + pass + + +class Conversation: + client: BaseElevenLabs + agent_id: str + requires_auth: bool + + audio_interface: AudioInterface + callback_agent_response: Optional[Callable[[str], None]] + callback_agent_response_correction: Optional[Callable[[str, str], None]] + callback_user_transcript: Optional[Callable[[str], None]] + callback_latency_measurement: Optional[Callable[[int], None]] + + _thread: Optional[threading.Thread] = None + _should_stop: threading.Event = threading.Event() + _conversation_id: Optional[str] = None + _last_interrupt_id: int = 0 + + def __init__( + self, + client: BaseElevenLabs, + agent_id: str, + *, + requires_auth: bool, + audio_interface: AudioInterface, + callback_agent_response: Optional[Callable[[str], None]] = None, + callback_agent_response_correction: Optional[Callable[[str, str], None]] = None, + callback_user_transcript: Optional[Callable[[str], None]] = None, + callback_latency_measurement: Optional[Callable[[int], None]] = None, + ): + """Conversational AI session. + + BETA: This API is subject to change without regard to backwards compatibility. + + Args: + client: The ElevenLabs client to use for the conversation. + agent_id: The ID of the agent to converse with. + requires_auth: Whether the agent requires authentication. + audio_interface: The audio interface to use for input and output. + callback_agent_response: Callback for agent responses. + callback_agent_response_correction: Callback for agent response corrections. + First argument is the original response (previously given to + callback_agent_response), second argument is the corrected response. + callback_user_transcript: Callback for user transcripts. + callback_latency_measurement: Callback for latency measurements (in milliseconds). + """ + + self.client = client + self.agent_id = agent_id + self.requires_auth = requires_auth + + self.audio_interface = audio_interface + self.callback_agent_response = callback_agent_response + self.callback_agent_response_correction = callback_agent_response_correction + self.callback_user_transcript = callback_user_transcript + self.callback_latency_measurement = callback_latency_measurement + + def start_session(self): + """Starts the conversation session. + + Will run in background thread until `end_session` is called. + """ + ws_url = self._get_signed_url() if self.requires_auth else self._get_wss_url() + self._thread = threading.Thread(target=self._run, args=(ws_url,)) + self._thread.start() + + def end_session(self): + """Ends the conversation session.""" + self.audio_interface.stop() + self._should_stop.set() + + def wait_for_session_end(self) -> Optional[str]: + """Waits for the conversation session to end. + + You must call `end_session` before calling this method, otherwise it will block. + + Returns the conversation ID, if available. + """ + if not self._thread: + raise RuntimeError("Session not started.") + self._thread.join() + return self._conversation_id + + def _run(self, ws_url: str): + with connect(ws_url) as ws: + + def input_callback(audio): + ws.send( + json.dumps( + { + "user_audio_chunk": base64.b64encode(audio).decode(), + } + ) + ) + + self.audio_interface.start(input_callback) + while not self._should_stop.is_set(): + try: + message = json.loads(ws.recv(timeout=0.5)) + if self._should_stop.is_set(): + return + self._handle_message(message, ws) + except TimeoutError: + pass + + def _handle_message(self, message, ws): + if message["type"] == "conversation_initiation_metadata": + event = message["conversation_initiation_metadata_event"] + assert self._conversation_id is None + self._conversation_id = event["conversation_id"] + elif message["type"] == "audio": + event = message["audio_event"] + if int(event["event_id"]) <= self._last_interrupt_id: + return + audio = base64.b64decode(event["audio_base_64"]) + self.audio_interface.output(audio) + elif message["type"] == "agent_response": + if self.callback_agent_response: + event = message["agent_response_event"] + self.callback_agent_response(event["agent_response"].strip()) + elif message["type"] == "agent_response_correction": + if self.callback_agent_response_correction: + event = message["agent_response_correction_event"] + self.callback_agent_response_correction( + event["original_agent_response"].strip(), event["corrected_agent_response"].strip() + ) + elif message["type"] == "user_transcript": + if self.callback_user_transcript: + event = message["user_transcription_event"] + self.callback_user_transcript(event["user_transcript"].strip()) + elif message["type"] == "interruption": + event = message["interruption_event"] + self.last_interrupt_id = int(event["event_id"]) + self.audio_interface.interrupt() + elif message["type"] == "ping": + event = message["ping_event"] + ws.send( + json.dumps( + { + "type": "pong", + "event_id": event["event_id"], + } + ) + ) + if self.callback_latency_measurement and event["ping_ms"]: + self.callback_latency_measurement(int(event["ping_ms"])) + else: + pass # Ignore all other message types. + + def _get_wss_url(https://github.com/self): + base_url = self.client._client_wrapper._base_url + # Replace http(s) with ws(s). + base_ws_url = base_url.replace("http", "ws", 1) # First occurrence only. + return f"{base_ws_url}/v1/convai/conversation?agent_id={self.agent_id}" + + def _get_signed_url(https://github.com/self): + # TODO: Use generated SDK method once available. + response = self.client._client_wrapper.httpx_client.request( + f"v1/convai/conversation/get_signed_url?agent_id={self.agent_id}", + method="GET", + ) + return response.json()["signed_url"] diff --git a/src/elevenlabs/conversational_ai/default_audio_interface.py b/src/elevenlabs/conversational_ai/default_audio_interface.py new file mode 100644 index 00000000..b1660d85 --- /dev/null +++ b/src/elevenlabs/conversational_ai/default_audio_interface.py @@ -0,0 +1,83 @@ +from typing import Callable +import queue +import threading + +from .conversation import AudioInterface + + +class DefaultAudioInterface(AudioInterface): + INPUT_FRAMES_PER_BUFFER = 4000 # 250ms @ 16kHz + OUTPUT_FRAMES_PER_BUFFER = 1000 # 62.5ms @ 16kHz + + def __init__(self): + try: + import pyaudio + except ImportError: + raise ImportError("To use DefaultAudioInterface you must install pyaudio.") + self.pyaudio = pyaudio + + def start(self, input_callback: Callable[[bytes], None]): + # Audio input is using callbacks from pyaudio which we simply pass through. + self.input_callback = input_callback + + # Audio output is buffered so we can handle interruptions. + # Start a separate thread to handle writing to the output stream. + self.output_queue: queue.Queue[bytes] = queue.Queue() + self.should_stop = threading.Event() + self.output_thread = threading.Thread(target=self._output_thread) + + self.p = self.pyaudio.PyAudio() + self.in_stream = self.p.open( + format=self.pyaudio.paInt16, + channels=1, + rate=16000, + input=True, + stream_callback=self._in_callback, + frames_per_buffer=self.INPUT_FRAMES_PER_BUFFER, + start=True, + ) + self.out_stream = self.p.open( + format=self.pyaudio.paInt16, + channels=1, + rate=16000, + output=True, + frames_per_buffer=self.OUTPUT_FRAMES_PER_BUFFER, + start=True, + ) + + self.output_thread.start() + + def stop(self): + self.should_stop.set() + self.output_thread.join() + self.in_stream.stop_stream() + self.in_stream.close() + self.out_stream.close() + self.p.terminate() + + def output(self, audio: bytes): + self.output_queue.put(audio) + + def interrupt(self): + # Clear the output queue to stop any audio that is currently playing. + # Note: We can't atomically clear the whole queue, but we are doing + # it from the message handling thread so no new audio will be added + # while we are clearing. + try: + while True: + _ = self.output_queue.get(block=False) + except queue.Empty: + pass + + def _output_thread(self): + while not self.should_stop.is_set(): + try: + audio = self.output_queue.get(timeout=0.25) + self.out_stream.write(audio) + except queue.Empty: + pass + + def _in_callback(self, in_data, frame_count, time_info, status): + if self.input_callback: + self.input_callback(in_data) + return (None, self.pyaudio.paContinue) diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 5663aec2..41aea407 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.12.0", + "X-Fern-SDK-Version": "1.12.1", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From 88b8a967069d45fcb576cde1a76eb47160bd43a6 Mon Sep 17 00:00:00 2001 From: Laco Date: Thu, 31 Oct 2024 12:41:25 +0100 Subject: [PATCH 16/45] Remove pyproject.toml + poetry.lock from .fernignore (#401) Need to have that generated, should support the features we need for the manually added stuff in #399 (and #389). But need to merge this first before being able to test the generated SDK preview. --- .fernignore | 3 --- 1 file changed, 3 deletions(-) diff --git a/.fernignore b/.fernignore index 4669f044..21514ae5 100644 --- a/.fernignore +++ b/.fernignore @@ -6,9 +6,6 @@ src/elevenlabs/conversational_ai/default_audio_interface.py src/elevenlabs/play.py src/elevenlabs/realtime_tts.py -pyproject.toml -poetry.lock - .github/workflows/ci.yml .github/workflows/tests.yml From 243f851d7a9af79dbb615a6ccd8bd053db07ae66 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 31 Oct 2024 12:47:06 +0100 Subject: [PATCH 17/45] SDK regeneration (#402) Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com> --- poetry.lock | 2 +- pyproject.toml | 12 +++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1e679e68..a4c918eb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "annotated-types" diff --git a/pyproject.toml b/pyproject.toml index b45f567f..7bc3f825 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,26 +34,21 @@ Repository = 'https://github.com/elevenlabs/elevenlabs-python' [tool.poetry.dependencies] python = "^3.8" httpx = ">=0.21.2" +pyaudio = { version = ">=0.2.14", optional = true} pydantic = ">= 1.9.2" pydantic-core = "^2.18.2" requests = ">=2.20" typing_extensions = ">= 4.0.0" websockets = ">=11.0" -# Optional extras. -pyaudio = { version = ">=0.2.14", optional = true } - [tool.poetry.dev-dependencies] mypy = "1.0.1" pytest = "^7.4.0" pytest-asyncio = "^0.23.5" python-dateutil = "^2.9.0" -types-pyaudio = "^0.2.16.20240516" types-python-dateutil = "^2.9.0.20240316" ruff = "^0.5.6" - -[tool.poetry.extras] -pyaudio = ["pyaudio"] +types-pyaudio = "^0.2.16.20240516" [tool.pytest.ini_options] testpaths = [ "tests" ] @@ -69,3 +64,6 @@ line-length = 120 [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" + +[tool.poetry.extras] +pyaudio=["pyaudio"] From ed5d3b739327471243516ff7ad26e85ef7b079a4 Mon Sep 17 00:00:00 2001 From: Hikmet Demir Date: Thu, 21 Nov 2024 14:05:45 +0000 Subject: [PATCH 18/45] Conversation.py update for elevenlabs_extra_body (#403) * Conversation.py update for elevenlabs_extra_body * updates * naming * addressing * change * lint * addressed compile error * ksdjfghksdjhdf --- .../conversational_ai/conversation.py | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py index 13533ab8..6913f99a 100644 --- a/src/elevenlabs/conversational_ai/conversation.py +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -52,12 +52,21 @@ def interrupt(self): """ pass - +class ConversationConfig: + """Configuration options for the Conversation.""" + def __init__( + self, + extra_body: Optional[dict] = None, + conversation_config_override: Optional[dict] = None, + ): + self.extra_body = extra_body or {} + self.conversation_config_override = conversation_config_override or {} + class Conversation: client: BaseElevenLabs agent_id: str requires_auth: bool - + config: ConversationConfig audio_interface: AudioInterface callback_agent_response: Optional[Callable[[str], None]] callback_agent_response_correction: Optional[Callable[[str, str], None]] @@ -76,6 +85,8 @@ def __init__( *, requires_auth: bool, audio_interface: AudioInterface, + config: Optional[ConversationConfig] = None, + callback_agent_response: Optional[Callable[[str], None]] = None, callback_agent_response_correction: Optional[Callable[[str, str], None]] = None, callback_user_transcript: Optional[Callable[[str], None]] = None, @@ -104,6 +115,7 @@ def __init__( self.audio_interface = audio_interface self.callback_agent_response = callback_agent_response + self.config = config or ConversationConfig() self.callback_agent_response_correction = callback_agent_response_correction self.callback_user_transcript = callback_user_transcript self.callback_latency_measurement = callback_latency_measurement @@ -136,6 +148,15 @@ def wait_for_session_end(self) -> Optional[str]: def _run(self, ws_url: str): with connect(ws_url) as ws: + ws.send( + json.dumps( + { + "type": "conversation_initiation_client_data", + "custom_llm_extra_body": self.config.extra_body, + "conversation_config_override": self.config.conversation_config_override, + } + ) + ) def input_callback(audio): ws.send( @@ -161,6 +182,7 @@ def _handle_message(self, message, ws): event = message["conversation_initiation_metadata_event"] assert self._conversation_id is None self._conversation_id = event["conversation_id"] + elif message["type"] == "audio": event = message["audio_event"] if int(event["event_id"]) <= self._last_interrupt_id: @@ -212,4 +234,4 @@ def _get_signed_url(https://github.com/self): f"v1/convai/conversation/get_signed_url?agent_id={self.agent_id}", method="GET", ) - return response.json()["signed_url"] + return response.json()["signed_url"] \ No newline at end of file From 8ae881ef60aad4c0c9b77439551813a1f3ab81de Mon Sep 17 00:00:00 2001 From: Laco Date: Thu, 21 Nov 2024 15:12:40 +0100 Subject: [PATCH 19/45] Bump to version 1.13.0 (#404) --- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7bc3f825..287f8aec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.12.1" +version = "1.13.0" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 41aea407..043753fc 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.12.1", + "X-Fern-SDK-Version": "1.13.0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From 521192bfff0186993ee141f7e9e71e66c9fc2884 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Fri, 29 Nov 2024 11:20:32 +0000 Subject: [PATCH 20/45] SDK regeneration --- poetry.lock | 261 +- pyproject.toml | 2 +- reference.md | 2937 ++++++++++------- src/elevenlabs/__init__.py | 211 +- src/elevenlabs/audio_isolation/client.py | 52 - src/elevenlabs/base_client.py | 12 +- src/elevenlabs/conversational_ai/__init__.py | 13 + src/elevenlabs/conversational_ai/client.py | 2209 +++++++++++++ .../conversational_ai/types/__init__.py | 13 + ...nvai_agents_agent_id_patch_secrets_item.py | 48 + src/elevenlabs/core/client_wrapper.py | 2 +- src/elevenlabs/dubbing/client.py | 32 - src/elevenlabs/history/__init__.py | 3 + src/elevenlabs/history/client.py | 21 + src/elevenlabs/history/types/__init__.py | 5 + .../types/history_get_all_request_source.py | 5 + src/elevenlabs/projects/__init__.py | 4 +- src/elevenlabs/projects/client.py | 65 +- src/elevenlabs/projects/types/__init__.py | 3 +- .../types/projects_add_request_fiction.py | 5 + src/elevenlabs/speech_to_speech/client.py | 80 +- .../text_to_sound_effects/client.py | 34 - src/elevenlabs/text_to_speech/client.py | 16 +- src/elevenlabs/text_to_voice/__init__.py | 3 + src/elevenlabs/text_to_voice/client.py | 79 +- .../text_to_voice/types/__init__.py | 5 + ...e_create_previews_request_output_format.py | 20 + src/elevenlabs/types/__init__.py | 196 +- ....py => add_agent_secret_response_model.py} | 10 +- .../add_knowledge_base_response_model.py | 19 + src/elevenlabs/types/agent_ban.py | 22 + src/elevenlabs/types/agent_config.py | 30 + src/elevenlabs/types/agent_config_override.py | 22 + .../types/agent_metadata_response_model.py | 19 + .../types/agent_platform_settings.py | 28 + .../types/agent_summary_response_model.py | 21 + src/elevenlabs/types/allowlist_item.py | 19 + .../types/array_json_schema_property.py | 30 + .../types/array_json_schema_property_items.py | 13 + .../types/asr_conversational_config.py | 25 + src/elevenlabs/types/asr_input_format.py | 7 + src/elevenlabs/types/asr_provider.py | 5 + src/elevenlabs/types/asr_quality.py | 5 + src/elevenlabs/types/auth_settings.py | 22 + src/elevenlabs/types/authorization_method.py | 7 + src/elevenlabs/types/ban_reason_type.py | 5 + src/elevenlabs/types/breakdown_types.py | 2 +- src/elevenlabs/types/chapter_response.py | 1 + src/elevenlabs/types/client_event.py | 21 + src/elevenlabs/types/client_tool_config.py | 35 + .../types/conv_ai_new_secret_config.py | 20 + .../types/conv_ai_secret_locator.py | 23 + .../types/conv_ai_stored_secret_config.py | 20 + .../conversation_charging_common_model.py | 19 + src/elevenlabs/types/conversation_config.py | 21 + .../conversation_config_client_override.py | 22 + ...versation_history_analysis_common_model.py | 29 + ...evaluation_criteria_result_common_model.py | 22 + ...versation_history_metadata_common_model.py | 25 + ...rsation_history_transcript_common_model.py | 26 + ...on_history_transcript_common_model_role.py | 5 + ...story_transcript_tool_call_common_model.py | 22 + ...ory_transcript_tool_result_common_model.py | 23 + .../conversation_initiation_client_data.py | 21 + .../conversation_signed_url_response_model.py | 19 + .../conversation_summary_response_model.py | 28 + ...versation_summary_response_model_status.py | 5 + .../types/conversation_token_db_model.py | 23 + .../types/conversation_token_purpose.py | 5 + src/elevenlabs/types/conversational_config.py | 36 + .../types/create_agent_response_model.py | 19 + src/elevenlabs/types/custom_llm.py | 22 + .../data_collection_result_common_model.py | 23 + src/elevenlabs/types/embed_config.py | 38 + src/elevenlabs/types/embed_config_avatar.py | 58 + src/elevenlabs/types/embed_variant.py | 5 + src/elevenlabs/types/evaluation_settings.py | 25 + .../types/evaluation_success_result.py | 5 + .../types/get_agent_embed_response_model.py | 21 + .../types/get_agent_link_response_model.py | 21 + .../types/get_agent_response_model.py | 36 + .../types/get_agents_page_response_model.py | 22 + .../types/get_conversation_response_model.py | 30 + .../get_conversation_response_model_status.py | 5 + .../get_conversations_page_response_model.py | 22 + .../types/get_knowledge_base_reponse_model.py | 22 + .../get_knowledge_base_reponse_model_type.py | 5 + src/elevenlabs/types/image_avatar.py | 19 + .../types/knowledge_base_locator.py | 22 + .../types/knowledge_base_locator_type.py | 5 + .../types/literal_json_schema_property.py | 21 + .../literal_json_schema_property_type.py | 5 + src/elevenlabs/types/llm.py | 21 + .../types/object_json_schema_property.py | 29 + ...t_json_schema_property_properties_value.py | 13 + src/elevenlabs/types/orb_avatar.py | 20 + .../types/post_agent_avatar_response_model.py | 20 + .../types/project_extended_response_model.py | 6 + ...response_model_apply_text_normalization.py | 7 + ...project_extended_response_model_fiction.py | 5 + src/elevenlabs/types/project_response.py | 4 + .../types/project_response_model_fiction.py | 5 + src/elevenlabs/types/prompt_agent.py | 37 + src/elevenlabs/types/prompt_agent_override.py | 19 + .../types/prompt_agent_tools_item.py | 56 + .../types/prompt_evaluation_criteria.py | 26 + ...ronunciation_dictionary_version_locator.py | 26 + .../types/query_params_json_schema.py | 21 + .../types/reader_resource_response_model.py | 21 + ...r_resource_response_model_resource_type.py | 5 + ...o_provider_response_model_provider_type.py | 5 - .../types/tts_conversational_config.py | 31 + .../tts_conversational_config_override.py | 19 + .../types/tts_conversational_model.py | 5 + .../types/tts_optimize_streaming_latency.py | 3 + src/elevenlabs/types/tts_output_format.py | 7 + src/elevenlabs/types/turn_config.py | 21 + src/elevenlabs/types/turn_mode.py | 5 + src/elevenlabs/types/url_avatar.py | 19 + src/elevenlabs/types/user.py | 2 + src/elevenlabs/types/voice.py | 1 + .../types/voice_preview_response_model.py | 5 +- .../types/voice_previews_response_model.py | 1 + .../types/voice_sharing_response.py | 2 + .../types/webhook_tool_api_schema_config.py | 40 + .../webhook_tool_api_schema_config_method.py | 5 + ...api_schema_config_request_headers_value.py | 6 + src/elevenlabs/types/webhook_tool_config.py | 34 + src/elevenlabs/voice_generation/client.py | 4 +- src/elevenlabs/workspace/client.py | 127 - 130 files changed, 6529 insertions(+), 1723 deletions(-) create mode 100644 src/elevenlabs/conversational_ai/__init__.py create mode 100644 src/elevenlabs/conversational_ai/client.py create mode 100644 src/elevenlabs/conversational_ai/types/__init__.py create mode 100644 src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py create mode 100644 src/elevenlabs/history/types/__init__.py create mode 100644 src/elevenlabs/history/types/history_get_all_request_source.py create mode 100644 src/elevenlabs/projects/types/projects_add_request_fiction.py create mode 100644 src/elevenlabs/text_to_voice/types/__init__.py create mode 100644 src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py rename src/elevenlabs/types/{sso_provider_response_model.py => add_agent_secret_response_model.py} (66%) create mode 100644 src/elevenlabs/types/add_knowledge_base_response_model.py create mode 100644 src/elevenlabs/types/agent_ban.py create mode 100644 src/elevenlabs/types/agent_config.py create mode 100644 src/elevenlabs/types/agent_config_override.py create mode 100644 src/elevenlabs/types/agent_metadata_response_model.py create mode 100644 src/elevenlabs/types/agent_platform_settings.py create mode 100644 src/elevenlabs/types/agent_summary_response_model.py create mode 100644 src/elevenlabs/types/allowlist_item.py create mode 100644 src/elevenlabs/types/array_json_schema_property.py create mode 100644 src/elevenlabs/types/array_json_schema_property_items.py create mode 100644 src/elevenlabs/types/asr_conversational_config.py create mode 100644 src/elevenlabs/types/asr_input_format.py create mode 100644 src/elevenlabs/types/asr_provider.py create mode 100644 src/elevenlabs/types/asr_quality.py create mode 100644 src/elevenlabs/types/auth_settings.py create mode 100644 src/elevenlabs/types/authorization_method.py create mode 100644 src/elevenlabs/types/ban_reason_type.py create mode 100644 src/elevenlabs/types/client_event.py create mode 100644 src/elevenlabs/types/client_tool_config.py create mode 100644 src/elevenlabs/types/conv_ai_new_secret_config.py create mode 100644 src/elevenlabs/types/conv_ai_secret_locator.py create mode 100644 src/elevenlabs/types/conv_ai_stored_secret_config.py create mode 100644 src/elevenlabs/types/conversation_charging_common_model.py create mode 100644 src/elevenlabs/types/conversation_config.py create mode 100644 src/elevenlabs/types/conversation_config_client_override.py create mode 100644 src/elevenlabs/types/conversation_history_analysis_common_model.py create mode 100644 src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py create mode 100644 src/elevenlabs/types/conversation_history_metadata_common_model.py create mode 100644 src/elevenlabs/types/conversation_history_transcript_common_model.py create mode 100644 src/elevenlabs/types/conversation_history_transcript_common_model_role.py create mode 100644 src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py create mode 100644 src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py create mode 100644 src/elevenlabs/types/conversation_initiation_client_data.py create mode 100644 src/elevenlabs/types/conversation_signed_url_response_model.py create mode 100644 src/elevenlabs/types/conversation_summary_response_model.py create mode 100644 src/elevenlabs/types/conversation_summary_response_model_status.py create mode 100644 src/elevenlabs/types/conversation_token_db_model.py create mode 100644 src/elevenlabs/types/conversation_token_purpose.py create mode 100644 src/elevenlabs/types/conversational_config.py create mode 100644 src/elevenlabs/types/create_agent_response_model.py create mode 100644 src/elevenlabs/types/custom_llm.py create mode 100644 src/elevenlabs/types/data_collection_result_common_model.py create mode 100644 src/elevenlabs/types/embed_config.py create mode 100644 src/elevenlabs/types/embed_config_avatar.py create mode 100644 src/elevenlabs/types/embed_variant.py create mode 100644 src/elevenlabs/types/evaluation_settings.py create mode 100644 src/elevenlabs/types/evaluation_success_result.py create mode 100644 src/elevenlabs/types/get_agent_embed_response_model.py create mode 100644 src/elevenlabs/types/get_agent_link_response_model.py create mode 100644 src/elevenlabs/types/get_agent_response_model.py create mode 100644 src/elevenlabs/types/get_agents_page_response_model.py create mode 100644 src/elevenlabs/types/get_conversation_response_model.py create mode 100644 src/elevenlabs/types/get_conversation_response_model_status.py create mode 100644 src/elevenlabs/types/get_conversations_page_response_model.py create mode 100644 src/elevenlabs/types/get_knowledge_base_reponse_model.py create mode 100644 src/elevenlabs/types/get_knowledge_base_reponse_model_type.py create mode 100644 src/elevenlabs/types/image_avatar.py create mode 100644 src/elevenlabs/types/knowledge_base_locator.py create mode 100644 src/elevenlabs/types/knowledge_base_locator_type.py create mode 100644 src/elevenlabs/types/literal_json_schema_property.py create mode 100644 src/elevenlabs/types/literal_json_schema_property_type.py create mode 100644 src/elevenlabs/types/llm.py create mode 100644 src/elevenlabs/types/object_json_schema_property.py create mode 100644 src/elevenlabs/types/object_json_schema_property_properties_value.py create mode 100644 src/elevenlabs/types/orb_avatar.py create mode 100644 src/elevenlabs/types/post_agent_avatar_response_model.py create mode 100644 src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py create mode 100644 src/elevenlabs/types/project_extended_response_model_fiction.py create mode 100644 src/elevenlabs/types/project_response_model_fiction.py create mode 100644 src/elevenlabs/types/prompt_agent.py create mode 100644 src/elevenlabs/types/prompt_agent_override.py create mode 100644 src/elevenlabs/types/prompt_agent_tools_item.py create mode 100644 src/elevenlabs/types/prompt_evaluation_criteria.py create mode 100644 src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py create mode 100644 src/elevenlabs/types/query_params_json_schema.py create mode 100644 src/elevenlabs/types/reader_resource_response_model.py create mode 100644 src/elevenlabs/types/reader_resource_response_model_resource_type.py delete mode 100644 src/elevenlabs/types/sso_provider_response_model_provider_type.py create mode 100644 src/elevenlabs/types/tts_conversational_config.py create mode 100644 src/elevenlabs/types/tts_conversational_config_override.py create mode 100644 src/elevenlabs/types/tts_conversational_model.py create mode 100644 src/elevenlabs/types/tts_optimize_streaming_latency.py create mode 100644 src/elevenlabs/types/tts_output_format.py create mode 100644 src/elevenlabs/types/turn_config.py create mode 100644 src/elevenlabs/types/turn_mode.py create mode 100644 src/elevenlabs/types/url_avatar.py create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config.py create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config_method.py create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py create mode 100644 src/elevenlabs/types/webhook_tool_config.py diff --git a/poetry.lock b/poetry.lock index a4c918eb..31db4bac 100644 --- a/poetry.lock +++ b/poetry.lock @@ -199,13 +199,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.6" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, - {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] @@ -220,13 +220,13 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.27.2" +version = "0.28.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, - {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, + {file = "httpx-0.28.0-py3-none-any.whl", hash = "sha256:dc0b419a0cfeb6e8b34e85167c0da2671206f5095f1baa9663d23bcfd6b535fc"}, + {file = "httpx-0.28.0.tar.gz", hash = "sha256:0858d3bab51ba7e386637f22a61d8ccddaeec5f3fe4209da3a6168dbb91573e0"}, ] [package.dependencies] @@ -234,7 +234,6 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] brotli = ["brotli", "brotlicffi"] @@ -327,13 +326,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] @@ -364,6 +363,8 @@ files = [ {file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"}, {file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"}, {file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"}, + {file = "PyAudio-0.2.14-cp313-cp313-win32.whl", hash = "sha256:95328285b4dab57ea8c52a4a996cb52be6d629353315be5bfda403d15932a497"}, + {file = "PyAudio-0.2.14-cp313-cp313-win_amd64.whl", hash = "sha256:692d8c1446f52ed2662120bcd9ddcb5aa2b71f38bda31e58b19fb4672fffba69"}, {file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"}, {file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"}, {file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"}, @@ -376,22 +377,19 @@ test = ["numpy"] [[package]] name = "pydantic" -version = "2.9.2" +version = "2.10.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, - {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, + {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, + {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.4" -typing-extensions = [ - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, -] +pydantic-core = "2.27.1" +typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] @@ -399,100 +397,111 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.4" +version = "2.27.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, - {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, - {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, - {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, - {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, - {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, - {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, - {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, - {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, - {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, - {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, - {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, - {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, - {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, + {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, + {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, + {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, + {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, + {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, + {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, + {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, + {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, + {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, + {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, + {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, + {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, + {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, + {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, + {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, + {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, ] [package.dependencies] @@ -624,13 +633,43 @@ files = [ [[package]] name = "tomli" -version = "2.0.2" +version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index 287f8aec..3a07be86 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.13.0" +version = "1.13.1" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 3fa8ba09..5ab4d727 100644 --- a/reference.md +++ b/reference.md @@ -75,6 +75,22 @@ client.history.get_all(
    +**search:** `typing.Optional[str]` — search term used for filtering + +
    +
    + +
    +
    + +**source:** `typing.Optional[HistoryGetAllRequestSource]` — Source of the generated history item + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -376,235 +392,7 @@ client.history.download(
    ## TextToSoundEffects -
    client.text_to_sound_effects.convert(...) -
    -
    - -#### 📝 Description - -
    -
    - -
    -
    - -Converts a text of your choice into sound -
    -
    -
    -
    - -#### 🔌 Usage - -
    -
    - -
    -
    - -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.text_to_sound_effects.convert( - text="string", - duration_seconds=1.1, - prompt_influence=1.1, -) - -``` -
    -
    -
    -
    - -#### ⚙️ Parameters - -
    -
    - -
    -
    - -**text:** `str` — The text that will get converted into a sound effect. - -
    -
    - -
    -
    - -**duration_seconds:** `typing.Optional[float]` — The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None. - -
    -
    - -
    -
    - -**prompt_influence:** `typing.Optional[float]` — A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3. - -
    -
    - -
    -
    - -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. - -
    -
    -
    -
    - - -
    -
    -
    - ## AudioIsolation -
    client.audio_isolation.audio_isolation(...) -
    -
    - -#### 📝 Description - -
    -
    - -
    -
    - -Removes background noise from audio -
    -
    -
    -
    - -#### 🔌 Usage - -
    -
    - -
    -
    - -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.audio_isolation.audio_isolation() - -``` -
    -
    -
    -
    - -#### ⚙️ Parameters - -
    -
    - -
    -
    - -**audio:** `from __future__ import annotations - -core.File` — See core.File for more documentation - -
    -
    - -
    -
    - -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. - -
    -
    -
    -
    - - -
    -
    -
    - -
    client.audio_isolation.audio_isolation_stream(...) -
    -
    - -#### 📝 Description - -
    -
    - -
    -
    - -Removes background noise from audio and streams the result -
    -
    -
    -
    - -#### 🔌 Usage - -
    -
    - -
    -
    - -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.audio_isolation.audio_isolation_stream() - -``` -
    -
    -
    -
    - -#### ⚙️ Parameters - -
    -
    - -
    -
    - -**audio:** `from __future__ import annotations - -core.File` — See core.File for more documentation - -
    -
    - -
    -
    - -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. - -
    -
    -
    -
    - - -
    -
    -
    - ## Samples
    client.samples.delete(...)
    @@ -895,7 +683,7 @@ client.text_to_speech.convert(
    -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. +**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
    @@ -1086,7 +874,7 @@ client.text_to_speech.convert_with_timestamps(
    -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. +**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
    @@ -1286,7 +1074,7 @@ client.text_to_speech.convert_as_stream(
    -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. +**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
    @@ -1479,7 +1267,7 @@ client.text_to_speech.stream_with_timestamps(
    -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. +**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
    @@ -1550,7 +1338,8 @@ client.text_to_speech.stream_with_timestamps(
    ## SpeechToSpeech -
    client.speech_to_speech.convert(...) +## VoiceGeneration +
    client.voice_generation.generate_parameters()
    @@ -1562,7 +1351,7 @@ client.text_to_speech.stream_with_timestamps(
    -Create speech by combining the content and emotion of the uploaded audio with a voice of your choice. +Get possible parameters for the /v1/voice-generation/generate-voice endpoint.
    @@ -1582,12 +1371,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.speech_to_speech.convert( - voice_id="string", - enable_logging=True, - optimize_streaming_latency="0", - output_format="mp3_22050_32", -) +client.voice_generation.generate_parameters() ``` @@ -1603,49 +1387,81 @@ client.speech_to_speech.convert(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    + +
    -
    -
    - -**audio:** `from __future__ import annotations -core.File` — See core.File for more documentation -
    +
    +
    client.voice_generation.generate(...)
    -**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - -
    -
    +#### 📝 Description
    -**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. - -
    -
    -
    -**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio. - +Generate a random voice based on parameters. This method returns a generated_voice_id in the response header, and a sample of the voice in the body. If you like the generated voice call /v1/voice-generation/create-voice with the generated_voice_id to create the voice. +
    +
    + + + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.voice_generation.generate( + gender="female", + accent="american", + age="middle_aged", + accent_strength=2.0, + text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**gender:** `Gender` — Category code corresponding to the gender of the generated voice. Possible values: female, male. +
    -**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. +**accent:** `str` — Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian.
    @@ -1653,7 +1469,7 @@ core.File` — See core.File for more documentation
    -**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. +**age:** `Age` — Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
    @@ -1661,7 +1477,7 @@ core.File` — See core.File for more documentation
    -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. +**accent_strength:** `float` — The strength of the accent of the generated voice. Has to be between 0.3 and 2.0.
    @@ -1669,7 +1485,7 @@ core.File` — See core.File for more documentation
    -**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. +**text:** `str` — Text to generate, text length has to be between 100 and 1000.
    @@ -1689,7 +1505,7 @@ core.File` — See core.File for more documentation
    -
    client.speech_to_speech.convert_as_stream(...) +
    client.voice_generation.create_a_previously_generated_voice(...)
    @@ -1701,7 +1517,7 @@ core.File` — See core.File for more documentation
    -Create speech by combining the content and emotion of the uploaded audio with a voice of your choice and returns an audio stream. +Create a previously generated voice. This endpoint should be called after you fetched a generated_voice_id using /v1/voice-generation/generate-voice.
    @@ -1721,11 +1537,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.speech_to_speech.convert_as_stream( - voice_id="string", - enable_logging="0", - optimize_streaming_latency="mp3_22050_32", - output_format="string", +client.voice_generation.create_a_previously_generated_voice( + voice_name="Alex", + voice_description="Middle-aged American woman", + generated_voice_id="rbVJFu6SGRD1dbWpKnWl", ) ``` @@ -1742,54 +1557,7 @@ client.speech_to_speech.convert_as_stream(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. - -
    -
    - -
    -
    - -**audio:** `from __future__ import annotations - -core.File` — See core.File for more documentation - -
    -
    - -
    -
    - -**enable_logging:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. - -
    -
    - -
    -
    - -**optimize_streaming_latency:** `typing.Optional[OutputFormat]` — The output format of the generated audio. - -
    -
    - -
    -
    - -**output_format:** `typing.Optional[str]` - -Output format of the generated audio. Must be one of: -mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. -mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. -mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. -mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. -mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. -mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. -pcm_16000 - PCM format (S16LE) with 16kHz sample rate. -pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. -pcm_24000 - PCM format (S16LE) with 24kHz sample rate. -pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. -ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. +**voice_name:** `str` — Name to use for the created voice.
    @@ -1797,7 +1565,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. +**voice_description:** `str` — Description to use for the created voice.
    @@ -1805,7 +1573,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. +**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
    @@ -1813,7 +1581,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. +**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
    @@ -1821,7 +1589,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. +**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
    @@ -1829,7 +1597,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -1841,8 +1609,8 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -## VoiceGeneration -
    client.voice_generation.generate_parameters() +## TextToVoice +
    client.text_to_voice.create_previews(...)
    @@ -1854,7 +1622,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
    -Get possible parameters for the /v1/voice-generation/generate-voice endpoint. +Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
    @@ -1874,7 +1642,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voice_generation.generate_parameters() +client.text_to_voice.create_previews( + voice_description="A sassy little squeaky mouse", + text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.", +) ``` @@ -1890,6 +1661,51 @@ client.voice_generation.generate_parameters()
    +**voice_description:** `str` — Description to use for the created voice. + +
    +
    + +
    +
    + +**text:** `str` — Text to generate, text length has to be between 100 and 1000. + +
    +
    + +
    +
    + +**output_format:** `typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]` + +Output format of the generated audio. Must be one of: +mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. +mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. +mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. +mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. +mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. +mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. +pcm_16000 - PCM format (S16LE) with 16kHz sample rate. +pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. +pcm_24000 - PCM format (S16LE) with 24kHz sample rate. +pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. +ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. + +
    +
    + +
    +
    + +**auto_generate_text:** `typing.Optional[bool]` — Whether to automatically generate a text suitable for the voice description. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -1902,7 +1718,7 @@ client.voice_generation.generate_parameters()
    -
    client.voice_generation.generate(...) +
    client.text_to_voice.create_voice_from_preview(...)
    @@ -1914,7 +1730,7 @@ client.voice_generation.generate_parameters()
    -Generate a random voice based on parameters. This method returns a generated_voice_id in the response header, and a sample of the voice in the body. If you like the generated voice call /v1/voice-generation/create-voice with the generated_voice_id to create the voice. +Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
    @@ -1934,12 +1750,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voice_generation.generate( - gender="female", - accent="american", - age="middle_aged", - accent_strength=2.0, - text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", +client.text_to_voice.create_voice_from_preview( + voice_name="Little squeaky mouse", + voice_description="A sassy little squeaky mouse", + generated_voice_id="37HceQefKmEi3bGovXjL", ) ``` @@ -1956,7 +1770,7 @@ client.voice_generation.generate(
    -**gender:** `Gender` — Category code corresponding to the gender of the generated voice. Possible values: female, male. +**voice_name:** `str` — Name to use for the created voice.
    @@ -1964,7 +1778,7 @@ client.voice_generation.generate(
    -**accent:** `str` — Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian. +**voice_description:** `str` — Description to use for the created voice.
    @@ -1972,7 +1786,7 @@ client.voice_generation.generate(
    -**age:** `Age` — Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old. +**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
    @@ -1980,7 +1794,7 @@ client.voice_generation.generate(
    -**accent_strength:** `float` — The strength of the accent of the generated voice. Has to be between 0.3 and 2.0. +**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
    @@ -1988,7 +1802,7 @@ client.voice_generation.generate(
    -**text:** `str` — Text to generate, text length has to be between 100 and 1000. +**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
    @@ -1996,7 +1810,7 @@ client.voice_generation.generate(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2008,7 +1822,8 @@ client.voice_generation.generate(
    -
    client.voice_generation.create_a_previously_generated_voice(...) +## User +
    client.user.get_subscription()
    @@ -2020,7 +1835,7 @@ client.voice_generation.generate(
    -Create a previously generated voice. This endpoint should be called after you fetched a generated_voice_id using /v1/voice-generation/generate-voice. +Gets extended information about the users subscription
    @@ -2040,11 +1855,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voice_generation.create_a_previously_generated_voice( - voice_name="Alex", - voice_description="Middle-aged American woman", - generated_voice_id="rbVJFu6SGRD1dbWpKnWl", -) +client.user.get_subscription() ``` @@ -2060,42 +1871,62 @@ client.voice_generation.create_a_previously_generated_voice(
    -**voice_name:** `str` — Name to use for the created voice. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    + +
    -
    -
    -**voice_description:** `str` — Description to use for the created voice. -
    +
    +
    client.user.get()
    -**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. - -
    -
    +#### 📝 Description
    -**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF. - +
    +
    + +Gets information about the user +
    +
    +#### 🔌 Usage +
    -**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None. - +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.user.get() + +```
    +
    +
    + +#### ⚙️ Parameters + +
    +
    @@ -2112,8 +1943,8 @@ client.voice_generation.create_a_previously_generated_voice(
    -## TextToVoice -
    client.text_to_voice.create_previews(...) +## voices +
    client.voices.get_all(...)
    @@ -2125,7 +1956,7 @@ client.voice_generation.create_a_previously_generated_voice(
    -Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice. +Gets a list of all available voices for a user.
    @@ -2145,10 +1976,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.text_to_voice.create_previews( - voice_description="voice_description", - text="text", -) +client.voices.get_all() ``` @@ -2164,15 +1992,7 @@ client.text_to_voice.create_previews(
    -**voice_description:** `str` — Description to use for the created voice. - -
    -
    - -
    -
    - -**text:** `str` — Text to generate, text length has to be between 100 and 1000. +**show_legacy:** `typing.Optional[bool]` — If set to true, legacy premade voices will be included in responses from /v1/voices
    @@ -2192,7 +2012,7 @@ client.text_to_voice.create_previews(
    -
    client.text_to_voice.create_voice_from_preview(...) +
    client.voices.get_default_settings()
    @@ -2204,7 +2024,7 @@ client.text_to_voice.create_previews(
    -Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews. +Gets the default settings for voices. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
    @@ -2224,11 +2044,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.text_to_voice.create_voice_from_preview( - voice_name="voice_name", - voice_description="voice_description", - generated_voice_id="generated_voice_id", -) +client.voices.get_default_settings() ``` @@ -2244,46 +2060,6 @@ client.text_to_voice.create_voice_from_preview(
    -**voice_name:** `str` — Name to use for the created voice. - -
    -
    - -
    -
    - -**voice_description:** `str` — Description to use for the created voice. - -
    -
    - -
    -
    - -**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. - -
    -
    - -
    -
    - -**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None. - -
    -
    - -
    -
    - -**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF. - -
    -
    - -
    -
    - **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2296,8 +2072,7 @@ client.text_to_voice.create_voice_from_preview(
    -## User -
    client.user.get_subscription() +
    client.voices.get_settings(...)
    @@ -2309,7 +2084,7 @@ client.text_to_voice.create_voice_from_preview(
    -Gets extended information about the users subscription +Returns the settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
    @@ -2329,7 +2104,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.user.get_subscription() +client.voices.get_settings( + voice_id="2EiwWnXFnvU5JabPnv8n", +) ``` @@ -2345,6 +2122,14 @@ client.user.get_subscription()
    +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2357,7 +2142,7 @@ client.user.get_subscription()
    -
    client.user.get() +
    client.voices.get(...)
    @@ -2369,7 +2154,7 @@ client.user.get_subscription()
    -Gets information about the user +Returns metadata about a specific voice.
    @@ -2389,7 +2174,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.user.get() +client.voices.get( + voice_id="29vD33N1CtxCmqQRPOHJ", +) ``` @@ -2405,6 +2192,22 @@ client.user.get()
    +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. + +
    +
    + +
    +
    + +**with_settings:** `typing.Optional[bool]` — If set will return settings information corresponding to the voice, requires authorization. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2417,8 +2220,7 @@ client.user.get()
    -## voices -
    client.voices.get_all(...) +
    client.voices.delete(...)
    @@ -2430,7 +2232,7 @@ client.user.get()
    -Gets a list of all available voices for a user. +Deletes a voice by its ID.
    @@ -2450,7 +2252,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_all() +client.voices.delete( + voice_id="29vD33N1CtxCmqQRPOHJ", +) ``` @@ -2466,7 +2270,7 @@ client.voices.get_all()
    -**show_legacy:** `typing.Optional[bool]` — If set to true, legacy premade voices will be included in responses from /v1/voices +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
    @@ -2486,7 +2290,7 @@ client.voices.get_all()
    -
    client.voices.get_default_settings() +
    client.voices.edit_settings(...)
    @@ -2498,7 +2302,7 @@ client.voices.get_all()
    -Gets the default settings for voices. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
    @@ -2513,12 +2317,19 @@ Gets the default settings for voices. "similarity_boost" corresponds to"Clarity
    ```python -from elevenlabs import ElevenLabs +from elevenlabs import ElevenLabs, VoiceSettings client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_default_settings() +client.voices.edit_settings( + voice_id="29vD33N1CtxCmqQRPOHJ", + request=VoiceSettings( + stability=0.1, + similarity_boost=0.3, + style=0.2, + ), +) ```
    @@ -2534,6 +2345,22 @@ client.voices.get_default_settings()
    +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. + +
    +
    + +
    +
    + +**request:** `VoiceSettings` + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2546,7 +2373,7 @@ client.voices.get_default_settings()
    -
    client.voices.get_settings(...) +
    client.voices.add(...)
    @@ -2558,7 +2385,7 @@ client.voices.get_default_settings()
    -Returns the settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +Add a new voice to your collection of voices in VoiceLab.
    @@ -2578,8 +2405,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_settings( - voice_id="2EiwWnXFnvU5JabPnv8n", +client.voices.add( + name="Alex", ) ``` @@ -2596,7 +2423,41 @@ client.voices.get_settings(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. +**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. + +
    +
    + +
    +
    + +**files:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation + +
    +
    + +
    +
    + +**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + +
    +
    + +
    +
    + +**description:** `typing.Optional[str]` — How would you describe the voice? + +
    +
    + +
    +
    + +**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice.
    @@ -2616,7 +2477,7 @@ client.voices.get_settings(
    -
    client.voices.get(...) +
    client.voices.edit(...)
    @@ -2628,7 +2489,7 @@ client.voices.get_settings(
    -Returns metadata about a specific voice. +Edit a voice created by you.
    @@ -2648,8 +2509,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get( - voice_id="29vD33N1CtxCmqQRPOHJ", +client.voices.edit( + voice_id="JBFqnCBsd6RMkjVDRZzb", + name="George", ) ``` @@ -2674,7 +2536,41 @@ client.voices.get(
    -**with_settings:** `typing.Optional[bool]` — If set will return settings information corresponding to the voice, requires authorization. +**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. + +
    +
    + +
    +
    + +**files:** `from __future__ import annotations + +typing.Optional[typing.List[core.File]]` — See core.File for more documentation + +
    +
    + +
    +
    + +**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. + +
    +
    + +
    +
    + +**description:** `typing.Optional[str]` — How would you describe the voice? + +
    +
    + +
    +
    + +**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice.
    @@ -2694,7 +2590,7 @@ client.voices.get(
    -
    client.voices.delete(...) +
    client.voices.add_sharing_voice(...)
    @@ -2706,7 +2602,7 @@ client.voices.get(
    -Deletes a voice by its ID. +Add a sharing voice to your collection of voices in VoiceLab.
    @@ -2726,8 +2622,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.delete( - voice_id="29vD33N1CtxCmqQRPOHJ", +client.voices.add_sharing_voice( + public_user_id="63e84100a6bf7874ba37a1bab9a31828a379ec94b891b401653b655c5110880f", + voice_id="sB1b5zUrxQVAFl2PhZFp", + new_name="Alita", ) ``` @@ -2744,6 +2642,14 @@ client.voices.delete(
    +**public_user_id:** `str` — Public user ID used to publicly identify ElevenLabs users. + +
    +
    + +
    +
    + **voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
    @@ -2752,6 +2658,14 @@ client.voices.delete(
    +**new_name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -2764,7 +2678,7 @@ client.voices.delete(
    -
    client.voices.edit_settings(...) +
    client.voices.get_shared(...)
    @@ -2776,7 +2690,7 @@ client.voices.delete(
    -Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +Gets a list of shared voices.
    @@ -2791,18 +2705,15 @@ Edit your settings for a specific voice. "similarity_boost" corresponds to"Clari
    ```python -from elevenlabs import ElevenLabs, VoiceSettings +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.edit_settings( - voice_id="29vD33N1CtxCmqQRPOHJ", - request=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, - ), +client.voices.get_shared( + page_size=1, + gender="female", + language="en", ) ``` @@ -2819,7 +2730,7 @@ client.voices.edit_settings(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. +**page_size:** `typing.Optional[int]` — How many shared voices to return at maximum. Can not exceed 100, defaults to 30.
    @@ -2827,7 +2738,7 @@ client.voices.edit_settings(
    -**request:** `VoiceSettings` +**category:** `typing.Optional[str]` — voice category used for filtering
    @@ -2835,69 +2746,63 @@ client.voices.edit_settings(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**gender:** `typing.Optional[str]` — gender used for filtering
    -
    -
    +
    +
    +**age:** `typing.Optional[str]` — age used for filtering +
    -
    -
    client.voices.add(...)
    -#### 📝 Description - -
    -
    +**accent:** `typing.Optional[str]` — accent used for filtering + +
    +
    -Add a new voice to your collection of voices in VoiceLab. -
    -
    +**language:** `typing.Optional[str]` — language used for filtering +
    -#### 🔌 Usage -
    +**search:** `typing.Optional[str]` — search term used for filtering + +
    +
    +
    -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.voices.add( - name="Alex", -) - -``` -
    -
    +**use_cases:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — use-case used for filtering + -#### ⚙️ Parameters -
    +**descriptives:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — search term used for filtering + +
    +
    +
    -**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. +**featured:** `typing.Optional[bool]` — Filter featured voices
    @@ -2905,9 +2810,7 @@ client.voices.add(
    -**files:** `from __future__ import annotations - -typing.List[core.File]` — See core.File for more documentation +**reader_app_enabled:** `typing.Optional[bool]` — Filter voices that are enabled for the reader app
    @@ -2915,7 +2818,7 @@ typing.List[core.File]` — See core.File for more documentation
    -**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. +**owner_id:** `typing.Optional[str]` — Filter voices by public owner ID
    @@ -2923,7 +2826,7 @@ typing.List[core.File]` — See core.File for more documentation
    -**description:** `typing.Optional[str]` — How would you describe the voice? +**sort:** `typing.Optional[str]` — sort criteria
    @@ -2931,7 +2834,7 @@ typing.List[core.File]` — See core.File for more documentation
    -**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice. +**page:** `typing.Optional[int]`
    @@ -2951,7 +2854,7 @@ typing.List[core.File]` — See core.File for more documentation
    -
    client.voices.edit(...) +
    client.voices.get_similar_library_voices(...)
    @@ -2963,7 +2866,7 @@ typing.List[core.File]` — See core.File for more documentation
    -Edit a voice created by you. +Returns a list of shared voices similar to the provided audio sample. If neither similarity_threshold nor top_k is provided, we will apply default values.
    @@ -2983,10 +2886,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.edit( - voice_id="JBFqnCBsd6RMkjVDRZzb", - name="George", -) +client.voices.get_similar_library_voices() ``` @@ -3002,33 +2902,9 @@ client.voices.edit(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. - -
    -
    - -
    -
    - -**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. - -
    -
    - -
    -
    - -**files:** `from __future__ import annotations - -typing.Optional[typing.List[core.File]]` — See core.File for more documentation - -
    -
    - -
    -
    +**audio_file:** `from __future__ import annotations -**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. +typing.Optional[core.File]` — See core.File for more documentation
    @@ -3036,7 +2912,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
    -**description:** `typing.Optional[str]` — How would you describe the voice? +**similarity_threshold:** `typing.Optional[float]` — Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned.
    @@ -3044,7 +2920,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
    -**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice. +**top_k:** `typing.Optional[int]` — Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>.
    @@ -3064,7 +2940,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
    -
    client.voices.add_sharing_voice(...) +
    client.voices.get_a_profile_page(...)
    @@ -3076,7 +2952,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
    -Add a sharing voice to your collection of voices in VoiceLab. +Gets a profile page based on a handle
    @@ -3096,10 +2972,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.add_sharing_voice( - public_user_id="63e84100a6bf7874ba37a1bab9a31828a379ec94b891b401653b655c5110880f", - voice_id="sB1b5zUrxQVAFl2PhZFp", - new_name="Alita", +client.voices.get_a_profile_page( + handle="talexgeorge", ) ``` @@ -3116,7 +2990,7 @@ client.voices.add_sharing_voice(
    -**public_user_id:** `str` — Public user ID used to publicly identify ElevenLabs users. +**handle:** `str` — Handle for a VA's profile page
    @@ -3124,19 +2998,64 @@ client.voices.add_sharing_voice(
    -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    + +
    + + + + +
    + +## Projects +
    client.projects.get_all() +
    +
    + +#### 📝 Description
    -**new_name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website. - +
    +
    + +Returns a list of your projects together and its metadata. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.get_all() + +``` +
    +
    +#### ⚙️ Parameters + +
    +
    +
    @@ -3152,7 +3071,7 @@ client.voices.add_sharing_voice(
    -
    client.voices.get_shared(...) +
    client.projects.add(...)
    @@ -3164,7 +3083,7 @@ client.voices.add_sharing_voice(
    -Gets a list of shared voices. +Creates a new project, it can be either initialized as blank, from a document or from a URL.
    @@ -3184,10 +3103,11 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_shared( - page_size=1, - gender="female", - language="en", +client.projects.add( + name="name", + default_title_voice_id="default_title_voice_id", + default_paragraph_voice_id="default_paragraph_voice_id", + default_model_id="default_model_id", ) ``` @@ -3204,7 +3124,7 @@ client.voices.get_shared(
    -**page_size:** `typing.Optional[int]` — How many shared voices to return at maximum. Can not exceed 100, defaults to 30. +**name:** `str` — The name of the project, used for identification only.
    @@ -3212,7 +3132,7 @@ client.voices.get_shared(
    -**category:** `typing.Optional[str]` — voice category used for filtering +**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles.
    @@ -3220,7 +3140,7 @@ client.voices.get_shared(
    -**gender:** `typing.Optional[str]` — gender used for filtering +**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs.
    @@ -3228,7 +3148,7 @@ client.voices.get_shared(
    -**age:** `typing.Optional[str]` — age used for filtering +**default_model_id:** `str` — The model_id of the model to be used for this project, you can query GET https://api.elevenlabs.io/v1/models to list all available models.
    @@ -3236,7 +3156,7 @@ client.voices.get_shared(
    -**accent:** `typing.Optional[str]` — accent used for filtering +**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
    @@ -3244,7 +3164,9 @@ client.voices.get_shared(
    -**language:** `typing.Optional[str]` — language used for filtering +**from_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
    @@ -3252,7 +3174,14 @@ client.voices.get_shared(
    -**search:** `typing.Optional[str]` — search term used for filtering +**quality_preset:** `typing.Optional[str]` + +Output quality of the generated audio. Must be one of: +standard - standard output format, 128kbps with 44.1kHz sample rate. +high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%. +ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%. +ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%. +
    @@ -3260,7 +3189,7 @@ client.voices.get_shared(
    -**use_cases:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — use-case used for filtering +**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
    @@ -3268,7 +3197,7 @@ client.voices.get_shared(
    -**descriptives:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — search term used for filtering +**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
    @@ -3276,7 +3205,7 @@ client.voices.get_shared(
    -**featured:** `typing.Optional[bool]` — Filter featured voices +**description:** `typing.Optional[str]` — An optional description of the project.
    @@ -3284,7 +3213,7 @@ client.voices.get_shared(
    -**reader_app_enabled:** `typing.Optional[bool]` — Filter voices that are enabled for the reader app +**genres:** `typing.Optional[typing.List[str]]` — An optional list of genres associated with the project.
    @@ -3292,7 +3221,7 @@ client.voices.get_shared(
    -**owner_id:** `typing.Optional[str]` — Filter voices by public owner ID +**target_audience:** `typing.Optional[ProjectsAddRequestTargetAudience]` — An optional target audience of the project.
    @@ -3300,7 +3229,7 @@ client.voices.get_shared(
    -**sort:** `typing.Optional[str]` — sort criteria +**language:** `typing.Optional[str]` — An optional language of the project. Two-letter language code (ISO 639-1).
    @@ -3308,7 +3237,7 @@ client.voices.get_shared(
    -**page:** `typing.Optional[int]` +**content_type:** `typing.Optional[str]` — An optional content type of the project.
    @@ -3316,69 +3245,47 @@ client.voices.get_shared(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**original_publication_date:** `typing.Optional[str]` — An optional original publication date of the project, in the format YYYY-MM-DD or YYYY.
    - -
    +
    +
    +**mature_content:** `typing.Optional[bool]` — An optional mature content of the project. +
    -
    -
    client.voices.get_similar_library_voices(...)
    -#### 📝 Description +**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download. + +
    +
    +**acx_volume_normalization:** `typing.Optional[bool]` — [Deprecated] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements + +
    +
    +
    -Returns a list of shared voices similar to the provided audio sample. If neither similarity_threshold nor top_k is provided, we will apply default values. -
    -
    +**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements + -#### 🔌 Usage -
    -
    -
    - -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.voices.get_similar_library_voices() - -``` -
    -
    -
    -
    - -#### ⚙️ Parameters - -
    -
    - -
    -
    - -**audio_file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**pronunciation_dictionary_locators:** `typing.Optional[typing.List[str]]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
    @@ -3386,7 +3293,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**similarity_threshold:** `typing.Optional[float]` — Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned. +**fiction:** `typing.Optional[ProjectsAddRequestFiction]` — An optional fiction of the project.
    @@ -3394,7 +3301,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**top_k:** `typing.Optional[int]` — Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>. +**quality_check_on:** `typing.Optional[bool]` — Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
    @@ -3414,7 +3321,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -
    client.voices.get_a_profile_page(...) +
    client.projects.get(...)
    @@ -3426,7 +3333,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -Gets a profile page based on a handle +Returns information about a specific project. This endpoint returns more detailed information about a project than GET api.elevenlabs.io/v1/projects.
    @@ -3446,8 +3353,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.voices.get_a_profile_page( - handle="talexgeorge", +client.projects.get( + project_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -3464,7 +3371,7 @@ client.voices.get_a_profile_page(
    -**handle:** `str` — Handle for a VA's profile page +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
    @@ -3484,8 +3391,7 @@ client.voices.get_a_profile_page(
    -## Projects -
    client.projects.get_all() +
    client.projects.edit_basic_project_info(...)
    @@ -3497,7 +3403,7 @@ client.voices.get_a_profile_page(
    -Returns a list of your projects together and its metadata. +Edits basic project info.
    @@ -3517,7 +3423,12 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.get_all() +client.projects.edit_basic_project_info( + project_id="21m00Tcm4TlvDq8ikWAM", + name="name", + default_title_voice_id="default_title_voice_id", + default_paragraph_voice_id="default_paragraph_voice_id", +) ``` @@ -3533,72 +3444,47 @@ client.projects.get_all()
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
    - -
    +
    +
    +**name:** `str` — The name of the project, used for identification only. +
    -
    -
    client.projects.add(...)
    -#### 📝 Description - -
    -
    +**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles. + +
    +
    -Creates a new project, it can be either initialized as blank, from a document or from a URL. -
    -
    +**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs. +
    -#### 🔌 Usage -
    -
    -
    - -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.projects.add( - name="name", - default_title_voice_id="default_title_voice_id", - default_paragraph_voice_id="default_paragraph_voice_id", - default_model_id="default_model_id", -) - -``` -
    -
    +**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download. +
    -#### ⚙️ Parameters - -
    -
    -
    -**name:** `str` — The name of the project, used for identification only. +**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
    @@ -3606,7 +3492,7 @@ client.projects.add(
    -**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles. +**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
    @@ -3614,7 +3500,7 @@ client.projects.add(
    -**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs. +**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
    @@ -3622,7 +3508,7 @@ client.projects.add(
    -**default_model_id:** `str` — The model_id of the model to be used for this project, you can query GET https://api.elevenlabs.io/v1/models to list all available models. +**quality_check_on:** `typing.Optional[bool]` — Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
    @@ -3630,72 +3516,69 @@ client.projects.add(
    -**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    +
    +
    -
    -
    - -**from_document:** `from __future__ import annotations -typing.Optional[core.File]` — See core.File for more documentation -
    +
    +
    client.projects.delete(...)
    -**quality_preset:** `typing.Optional[str]` - -Output quality of the generated audio. Must be one of: -standard - standard output format, 128kbps with 44.1kHz sample rate. -high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%. -ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%. -ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%. +#### 📝 Description - -
    -
    +
    +
    -**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download. - +Delete a project by its project_id. +
    +
    +#### 🔌 Usage +
    -**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download. - -
    -
    -
    -**description:** `typing.Optional[str]` — An optional description of the project. - +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.delete( + project_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +#### ⚙️ Parameters +
    -**genres:** `typing.Optional[typing.List[str]]` — An optional list of genres associated with the project. - -
    -
    -
    -**target_audience:** `typing.Optional[ProjectsAddRequestTargetAudience]` — An optional target audience of the project. +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
    @@ -3703,63 +3586,69 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
    -**language:** `typing.Optional[str]` — An optional language of the project. Two-letter language code (ISO 639-1). +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    + + -
    -
    -**content_type:** `typing.Optional[str]` — An optional content type of the project. -
    +
    +
    client.projects.convert(...)
    -**original_publication_date:** `typing.Optional[str]` — An optional original publication date of the project, in the format YYYY-MM-DD or YYYY. - -
    -
    +#### 📝 Description
    -**mature_content:** `typing.Optional[bool]` — An optional mature content of the project. - -
    -
    -
    -**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download. - +Starts conversion of a project and all of its chapters. +
    +
    +#### 🔌 Usage +
    -**acx_volume_normalization:** `typing.Optional[bool]` — [Deprecated] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements - -
    -
    -
    -**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements - +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.convert( + project_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +#### ⚙️ Parameters +
    -**pronunciation_dictionary_locators:** `typing.Optional[typing.List[str]]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
    @@ -3779,7 +3668,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
    -
    client.projects.get(...) +
    client.projects.get_snapshots(...)
    @@ -3791,7 +3680,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
    -Returns information about a specific project. This endpoint returns more detailed information about a project than GET api.elevenlabs.io/v1/projects. +Gets the snapshots of a project.
    @@ -3811,7 +3700,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.get( +client.projects.get_snapshots( project_id="21m00Tcm4TlvDq8ikWAM", ) @@ -3849,7 +3738,7 @@ client.projects.get(
    -
    client.projects.edit_basic_project_info(...) +
    client.projects.stream_archive(...)
    @@ -3861,7 +3750,7 @@ client.projects.get(
    -Edits basic project info. +Streams archive with project audio.
    @@ -3881,11 +3770,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.edit_basic_project_info( +client.projects.stream_archive( project_id="21m00Tcm4TlvDq8ikWAM", - name="name", - default_title_voice_id="default_title_voice_id", - default_paragraph_voice_id="default_paragraph_voice_id", + project_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -3910,7 +3797,7 @@ client.projects.edit_basic_project_info(
    -**name:** `str` — The name of the project, used for identification only. +**project_snapshot_id:** `str` — The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project.
    @@ -3918,31 +3805,478 @@ client.projects.edit_basic_project_info(
    -**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    - -
    -
    - -**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs. -
    -
    -
    -**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download. -
    +
    +
    client.projects.add_chapter_to_a_project(...)
    -**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download. +#### 📝 Description + +
    +
    + +
    +
    + +Creates a new chapter either as blank or from a URL. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.add_chapter_to_a_project( + project_id="21m00Tcm4TlvDq8ikWAM", + name="name", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**name:** `str` — The name of the chapter, used for identification only. + +
    +
    + +
    +
    + +**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.projects.update_pronunciation_dictionaries(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.update_pronunciation_dictionaries( + project_id="21m00Tcm4TlvDq8ikWAM", + pronunciation_dictionary_locators=[ + PronunciationDictionaryVersionLocator( + pronunciation_dictionary_id="pronunciation_dictionary_id", + version_id="version_id", + ) + ], +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +## Chapters +
    client.chapters.get_all(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Returns a list of your chapters for a project together and its metadata. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.chapters.get_all( + project_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.chapters.get(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Returns information about a specific chapter. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.chapters.get( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.chapters.delete(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Delete a chapter by its chapter_id. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.chapters.delete( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.chapters.convert(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Starts conversion of a specific chapter. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.chapters.convert( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
    @@ -3950,15 +4284,174 @@ client.projects.edit_basic_project_info(
    -**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    +
    +
    + + +
    +
    +
    +
    client.chapters.get_all_snapshots(...)
    -**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements +#### 📝 Description + +
    +
    + +
    +
    + +Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.chapters.get_all_snapshots( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.chapters.stream_snapshot(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Stream the audio from a chapter snapshot. Use `GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the chapter snapshots of a chapter. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.chapters.stream_snapshot( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", + chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + +
    +
    + +
    +
    + +**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. + +
    +
    + +
    +
    + +**chapter_snapshot_id:** `str` — The chapter_snapshot_id of the chapter snapshot. You can query GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots to the all available snapshots for a chapter. + +
    +
    + +
    +
    + +**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format.
    @@ -3978,57 +4471,148 @@ client.projects.edit_basic_project_info(
    -
    client.projects.delete(...) +## Dubbing +
    client.dubbing.dub_a_video_or_an_audio_file(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Dubs provided audio or video file into given language. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.dubbing.dub_a_video_or_an_audio_file( + target_lang="target_lang", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**target_lang:** `str` — The Target language to dub the content into. + +
    +
    + +
    +
    + +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
    +
    + +
    +
    + +**name:** `typing.Optional[str]` — Name of the dubbing project. + +
    +
    + +
    +
    + +**source_url:** `typing.Optional[str]` — URL of the source video/audio file. + +
    +
    + +
    +
    + +**source_lang:** `typing.Optional[str]` — Source language. + +
    +
    +
    -#### 📝 Description +**num_speakers:** `typing.Optional[int]` — Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers + +
    +
    +**watermark:** `typing.Optional[bool]` — Whether to apply watermark to the output video. + +
    +
    +
    -Delete a project by its project_id. -
    -
    +**start_time:** `typing.Optional[int]` — Start time of the source video/audio file. +
    -#### 🔌 Usage -
    +**end_time:** `typing.Optional[int]` — End time of the source video/audio file. + +
    +
    +
    -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.projects.delete( - project_id="21m00Tcm4TlvDq8ikWAM", -) - -``` -
    -
    +**highest_resolution:** `typing.Optional[bool]` — Whether to use the highest resolution available. +
    -#### ⚙️ Parameters -
    +**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. + +
    +
    +
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]'
    @@ -4048,7 +4632,7 @@ client.projects.delete(
    -
    client.projects.convert(...) +
    client.dubbing.get_dubbing_project_metadata(...)
    @@ -4060,7 +4644,7 @@ client.projects.delete(
    -Starts conversion of a project and all of its chapters. +Returns metadata about a dubbing project, including whether it's still in progress or not
    @@ -4080,8 +4664,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.convert( - project_id="21m00Tcm4TlvDq8ikWAM", +client.dubbing.get_dubbing_project_metadata( + dubbing_id="dubbing_id", ) ``` @@ -4098,7 +4682,7 @@ client.projects.convert(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**dubbing_id:** `str` — ID of the dubbing project.
    @@ -4118,7 +4702,7 @@ client.projects.convert(
    -
    client.projects.get_snapshots(...) +
    client.dubbing.delete_dubbing_project(...)
    @@ -4130,7 +4714,7 @@ client.projects.convert(
    -Gets the snapshots of a project. +Deletes a dubbing project.
    @@ -4150,8 +4734,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.get_snapshots( - project_id="21m00Tcm4TlvDq8ikWAM", +client.dubbing.delete_dubbing_project( + dubbing_id="dubbing_id", ) ``` @@ -4168,7 +4752,7 @@ client.projects.get_snapshots(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**dubbing_id:** `str` — ID of the dubbing project.
    @@ -4188,7 +4772,7 @@ client.projects.get_snapshots(
    -
    client.projects.stream_audio(...) +
    client.dubbing.get_transcript_for_dub(...)
    @@ -4200,7 +4784,7 @@ client.projects.get_snapshots(
    -Stream the audio from a project snapshot. +Returns transcript for the dub as an SRT file.
    @@ -4220,10 +4804,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.stream_audio( - project_id="string", - project_snapshot_id="string", - convert_to_mpeg=True, +client.dubbing.get_transcript_for_dub( + dubbing_id="dubbing_id", + language_code="language_code", ) ``` @@ -4240,7 +4823,7 @@ client.projects.stream_audio(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**dubbing_id:** `str` — ID of the dubbing project.
    @@ -4248,7 +4831,7 @@ client.projects.stream_audio(
    -**project_snapshot_id:** `str` — The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project. +**language_code:** `str` — ID of the language.
    @@ -4256,7 +4839,9 @@ client.projects.stream_audio(
    -**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format. +**format_type:** `typing.Optional[ + GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType +]` — Format to use for the subtitle file, either 'srt' or 'webvtt'
    @@ -4264,7 +4849,7 @@ client.projects.stream_audio(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -4276,7 +4861,8 @@ client.projects.stream_audio(
    -
    client.projects.stream_archive(...) +## Models +
    client.models.get_all()
    @@ -4288,7 +4874,7 @@ client.projects.stream_audio(
    -Streams archive with project audio. +Gets a list of available models.
    @@ -4308,10 +4894,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.stream_archive( - project_id="21m00Tcm4TlvDq8ikWAM", - project_snapshot_id="21m00Tcm4TlvDq8ikWAM", -) +client.models.get_all() ``` @@ -4327,22 +4910,6 @@ client.projects.stream_archive(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. - -
    -
    - -
    -
    - -**project_snapshot_id:** `str` — The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project. - -
    -
    - -
    -
    - **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -4355,7 +4922,8 @@ client.projects.stream_archive(
    -
    client.projects.add_chapter_to_a_project(...) +## AudioNative +
    client.audio_native.create(...)
    @@ -4367,7 +4935,7 @@ client.projects.stream_archive(
    -Creates a new chapter either as blank or from a URL. +Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet.
    @@ -4387,8 +4955,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.add_chapter_to_a_project( - project_id="21m00Tcm4TlvDq8ikWAM", +client.audio_native.create( name="name", ) @@ -4406,7 +4973,7 @@ client.projects.add_chapter_to_a_project(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**name:** `str` — Project name.
    @@ -4414,7 +4981,7 @@ client.projects.add_chapter_to_a_project(
    -**name:** `str` — The name of the chapter, used for identification only. +**image:** `typing.Optional[str]` — Image URL used in the player. If not provided, default image set in the Player settings is used.
    @@ -4422,7 +4989,7 @@ client.projects.add_chapter_to_a_project(
    -**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. +**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
    @@ -4430,75 +4997,65 @@ client.projects.add_chapter_to_a_project(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
    - -
    +
    +
    +**small:** `typing.Optional[bool]` — Whether to use small player or not. If not provided, default value set in the Player settings is used. +
    -
    -
    client.projects.update_pronunciation_dictionaries(...)
    -#### 📝 Description +**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used. + +
    +
    +**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used. + +
    +
    +
    -Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does. -
    -
    +**sessionization:** `typing.Optional[int]` — Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used. + -#### 🔌 Usage -
    +**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used. + +
    +
    +
    -```python -from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.projects.update_pronunciation_dictionaries( - project_id="21m00Tcm4TlvDq8ikWAM", - pronunciation_dictionary_locators=[ - PronunciationDictionaryVersionLocator( - pronunciation_dictionary_id="pronunciation_dictionary_id", - version_id="version_id", - ) - ], -) - -``` -
    -
    +**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used. + -#### ⚙️ Parameters -
    -
    -
    +**file:** `from __future__ import annotations -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +typing.Optional[core.File]` — See core.File for more documentation
    @@ -4506,7 +5063,7 @@ client.projects.update_pronunciation_dictionaries(
    -**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. +**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
    @@ -4526,8 +5083,8 @@ client.projects.update_pronunciation_dictionaries(
    -## Chapters -
    client.chapters.get_all(...) +## Usage +
    client.usage.get_characters_usage_metrics(...)
    @@ -4539,7 +5096,7 @@ client.projects.update_pronunciation_dictionaries(
    -Returns a list of your chapters for a project together and its metadata. +Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
    @@ -4559,8 +5116,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.get_all( - project_id="21m00Tcm4TlvDq8ikWAM", +client.usage.get_characters_usage_metrics( + start_unix=1, + end_unix=1, ) ``` @@ -4577,7 +5135,7 @@ client.chapters.get_all(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day.
    @@ -4585,70 +5143,15 @@ client.chapters.get_all(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day. -
    -
    - -
    - - - - -
    - -
    client.chapters.get(...) -
    -
    - -#### 📝 Description - -
    -
    - -
    -
    - -Returns information about a specific chapter. -
    -
    -#### 🔌 Usage - -
    -
    - -
    -
    - -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.chapters.get( - project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", -) - -``` -
    -
    -
    -
    - -#### ⚙️ Parameters - -
    -
    -
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace.
    @@ -4656,7 +5159,7 @@ client.chapters.get(
    -**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. +**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False.
    @@ -4676,7 +5179,8 @@ client.chapters.get(
    -
    client.chapters.delete(...) +## PronunciationDictionary +
    client.pronunciation_dictionary.add_from_file(...)
    @@ -4688,7 +5192,7 @@ client.chapters.get(
    -Delete a chapter by its chapter_id. +Creates a new pronunciation dictionary from a lexicon .PLS file
    @@ -4708,9 +5212,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.delete( - project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", +client.pronunciation_dictionary.add_from_file( + name="name", ) ``` @@ -4727,7 +5230,7 @@ client.chapters.delete(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**name:** `str` — The name of the pronunciation dictionary, used for identification only.
    @@ -4735,7 +5238,25 @@ client.chapters.delete(
    -**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
    +
    + +
    +
    + +**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only. + +
    +
    + +
    +
    + +**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'editor' or 'viewer'. If not provided, defaults to no access.
    @@ -4755,7 +5276,7 @@ client.chapters.delete(
    -
    client.chapters.convert(...) +
    client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...)
    @@ -4767,7 +5288,7 @@ client.chapters.delete(
    -Starts conversion of a specific chapter. +Add rules to the pronunciation dictionary
    @@ -4783,13 +5304,22 @@ Starts conversion of a specific chapter. ```python from elevenlabs import ElevenLabs +from elevenlabs.pronunciation_dictionary import ( + PronunciationDictionaryRule_Phoneme, +) client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.convert( - project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", +client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( + pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", + rules=[ + PronunciationDictionaryRule_Phoneme( + string_to_replace="rules", + phoneme="rules", + alphabet="rules", + ) + ], ) ``` @@ -4806,7 +5336,7 @@ client.chapters.convert(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
    @@ -4814,7 +5344,11 @@ client.chapters.convert(
    -**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. +**rules:** `typing.Sequence[PronunciationDictionaryRule]` + +List of pronunciation rules. Rule can be either: + an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', } + or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' }
    @@ -4834,7 +5368,7 @@ client.chapters.convert(
    -
    client.chapters.get_all_snapshots(...) +
    client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...)
    @@ -4846,7 +5380,7 @@ client.chapters.convert(
    -Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. +Remove rules from the pronunciation dictionary
    @@ -4866,9 +5400,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.get_all_snapshots( - project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", +client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( + pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", + rule_strings=["rule_strings"], ) ``` @@ -4885,7 +5419,7 @@ client.chapters.get_all_snapshots(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
    @@ -4893,7 +5427,7 @@ client.chapters.get_all_snapshots(
    -**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. +**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary.
    @@ -4913,7 +5447,7 @@ client.chapters.get_all_snapshots(
    -
    client.chapters.stream_snapshot(...) +
    client.pronunciation_dictionary.download(...)
    @@ -4925,7 +5459,7 @@ client.chapters.get_all_snapshots(
    -Stream the audio from a chapter snapshot. Use `GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the chapter snapshots of a chapter. +Get PLS file with a pronunciation dictionary version rules
    @@ -4945,10 +5479,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.stream_snapshot( - project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", - chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", +client.pronunciation_dictionary.download( + dictionary_id="Fm6AvNgS53NXe6Kqxp3e", + version_id="KZFyRUq3R6kaqhKI146w", ) ``` @@ -4965,23 +5498,7 @@ client.chapters.stream_snapshot(
    -**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. - -
    -
    - -
    -
    - -**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. - -
    -
    - -
    -
    - -**chapter_snapshot_id:** `str` — The chapter_snapshot_id of the chapter snapshot. You can query GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots to the all available snapshots for a chapter. +**dictionary_id:** `str` — The id of the pronunciation dictionary
    @@ -4989,7 +5506,7 @@ client.chapters.stream_snapshot(
    -**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format. +**version_id:** `str` — The id of the version of the pronunciation dictionary
    @@ -5009,8 +5526,7 @@ client.chapters.stream_snapshot(
    -## Dubbing -
    client.dubbing.dub_a_video_or_an_audio_file(...) +
    client.pronunciation_dictionary.get(...)
    @@ -5022,7 +5538,7 @@ client.chapters.stream_snapshot(
    -Dubs provided audio or video file into given language. +Get metadata for a pronunciation dictionary
    @@ -5042,8 +5558,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.dub_a_video_or_an_audio_file( - target_lang="target_lang", +client.pronunciation_dictionary.get( + pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e", ) ``` @@ -5060,7 +5576,7 @@ client.dubbing.dub_a_video_or_an_audio_file(
    -**target_lang:** `str` — The Target language to dub the content into. +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
    @@ -5068,81 +5584,69 @@ client.dubbing.dub_a_video_or_an_audio_file(
    -**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    - -
    -
    - -**name:** `typing.Optional[str]` — Name of the dubbing project. -
    -
    -
    -**source_url:** `typing.Optional[str]` — URL of the source video/audio file. -
    +
    +
    client.pronunciation_dictionary.get_all(...)
    -**source_lang:** `typing.Optional[str]` — Source language. - -
    -
    +#### 📝 Description
    -**num_speakers:** `typing.Optional[int]` — Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers - -
    -
    -
    -**watermark:** `typing.Optional[bool]` — Whether to apply watermark to the output video. - +Get a list of the pronunciation dictionaries you have access to and their metadata +
    +
    +#### 🔌 Usage +
    -**start_time:** `typing.Optional[int]` — Start time of the source video/audio file. - -
    -
    -
    -**end_time:** `typing.Optional[int]` — End time of the source video/audio file. - +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.pronunciation_dictionary.get_all( + page_size=1, +) + +```
    + + + +#### ⚙️ Parameters
    -**highest_resolution:** `typing.Optional[bool]` — Whether to use the highest resolution available. - -
    -
    -
    -**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
    @@ -5150,7 +5654,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]' +**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
    @@ -5170,7 +5674,8 @@ typing.Optional[core.File]` — See core.File for more documentation
    -
    client.dubbing.get_dubbing_project_metadata(...) +## Workspace +
    client.workspace.invite_user(...)
    @@ -5182,7 +5687,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -Returns metadata about a dubbing project, including whether it's still in progress or not +Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
    @@ -5202,8 +5707,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.get_dubbing_project_metadata( - dubbing_id="dubbing_id", +client.workspace.invite_user( + email="email", ) ``` @@ -5220,7 +5725,7 @@ client.dubbing.get_dubbing_project_metadata(
    -**dubbing_id:** `str` — ID of the dubbing project. +**email:** `str` — Email of the target user.
    @@ -5240,7 +5745,7 @@ client.dubbing.get_dubbing_project_metadata(
    -
    client.dubbing.delete_dubbing_project(...) +
    client.workspace.delete_existing_invitation(...)
    @@ -5252,7 +5757,7 @@ client.dubbing.get_dubbing_project_metadata(
    -Deletes a dubbing project. +Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
    @@ -5272,8 +5777,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.delete_dubbing_project( - dubbing_id="dubbing_id", +client.workspace.delete_existing_invitation( + email="email", ) ``` @@ -5290,7 +5795,7 @@ client.dubbing.delete_dubbing_project(
    -**dubbing_id:** `str` — ID of the dubbing project. +**email:** `str` — Email of the target user.
    @@ -5310,7 +5815,7 @@ client.dubbing.delete_dubbing_project(
    -
    client.dubbing.get_dubbed_file(...) +
    client.workspace.update_member(...)
    @@ -5322,7 +5827,7 @@ client.dubbing.delete_dubbing_project(
    -Returns dubbed file as a streamed file. Videos will be returned in MP4 format and audio only dubs will be returned in MP3. +Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
    @@ -5342,9 +5847,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.get_dubbed_file( - dubbing_id="string", - language_code="string", +client.workspace.update_member( + email="email", ) ``` @@ -5361,7 +5865,7 @@ client.dubbing.get_dubbed_file(
    -**dubbing_id:** `str` — ID of the dubbing project. +**email:** `str` — Email of the target user.
    @@ -5369,7 +5873,7 @@ client.dubbing.get_dubbed_file(
    -**language_code:** `str` — ID of the language. +**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account.
    @@ -5377,7 +5881,15 @@ client.dubbing.get_dubbed_file(
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. +**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -5389,7 +5901,8 @@ client.dubbing.get_dubbed_file(
    -
    client.dubbing.get_transcript_for_dub(...) +## ConversationalAi +
    client.conversational_ai.get_signed_url(...)
    @@ -5401,7 +5914,7 @@ client.dubbing.get_dubbed_file(
    -Returns transcript for the dub as an SRT file. +Get a signed url to start a conversation with an agent with an agent that requires authorization
    @@ -5421,9 +5934,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.get_transcript_for_dub( - dubbing_id="dubbing_id", - language_code="language_code", +client.conversational_ai.get_signed_url( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -5440,7 +5952,7 @@ client.dubbing.get_transcript_for_dub(
    -**dubbing_id:** `str` — ID of the dubbing project. +**agent_id:** `str` — The id of the agent you're taking the action on.
    @@ -5448,41 +5960,36 @@ client.dubbing.get_transcript_for_dub(
    -**language_code:** `str` — ID of the language. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    + +
    -
    -
    -**format_type:** `typing.Optional[ - GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType -]` — Format to use for the subtitle file, either 'srt' or 'webvtt' -
    +
    +
    client.conversational_ai.create_agent(...)
    -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
    -
    - - - +#### 📝 Description - - -
    +
    +
    -## Workspace -
    client.workspace.get_sso_provider_admin(...)
    +Create an agent from a config object +
    +
    +
    +
    + #### 🔌 Usage
    @@ -5492,13 +5999,13 @@ client.dubbing.get_transcript_for_dub(
    ```python -from elevenlabs import ElevenLabs +from elevenlabs import ConversationalConfig, ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.get_sso_provider_admin( - workspace_id="workspace_id", +client.conversational_ai.create_agent( + conversation_config=ConversationalConfig(), ) ``` @@ -5515,7 +6022,23 @@ client.workspace.get_sso_provider_admin(
    -**workspace_id:** `str` +**conversation_config:** `ConversationalConfig` — Conversation configuration for an agent + +
    +
    + +
    +
    + +**platform_settings:** `typing.Optional[AgentPlatformSettings]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + +
    +
    + +
    +
    + +**name:** `typing.Optional[str]` — A name to make the agent easier to find
    @@ -5535,7 +6058,7 @@ client.workspace.get_sso_provider_admin(
    -
    client.workspace.invite_user(...) +
    client.conversational_ai.get_agent(...)
    @@ -5547,7 +6070,7 @@ client.workspace.get_sso_provider_admin(
    -Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. +Retrieve config for an agent
    @@ -5567,8 +6090,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.invite_user( - email="email", +client.conversational_ai.get_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -5585,7 +6108,7 @@ client.workspace.invite_user(
    -**email:** `str` — Email of the target user. +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -5605,7 +6128,7 @@ client.workspace.invite_user(
    -
    client.workspace.delete_existing_invitation(...) +
    client.conversational_ai.delete_agent(...)
    @@ -5617,7 +6140,7 @@ client.workspace.invite_user(
    -Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators. +Delete an agent
    @@ -5637,8 +6160,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.delete_existing_invitation( - email="email", +client.conversational_ai.delete_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -5655,7 +6178,7 @@ client.workspace.delete_existing_invitation(
    -**email:** `str` — Email of the target user. +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -5675,7 +6198,7 @@ client.workspace.delete_existing_invitation(
    -
    client.workspace.update_member(...) +
    client.conversational_ai.update_agent(...)
    @@ -5687,7 +6210,7 @@ client.workspace.delete_existing_invitation(
    -Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators. +Patches an Agent settings
    @@ -5707,8 +6230,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.update_member( - email="email", +client.conversational_ai.update_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -5725,7 +6248,7 @@ client.workspace.update_member(
    -**email:** `str` — Email of the target user. +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -5733,7 +6256,7 @@ client.workspace.update_member(
    -**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account. +**conversation_config:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Conversation configuration for an agent
    @@ -5741,7 +6264,27 @@ client.workspace.update_member(
    -**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace. +**platform_settings:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + +
    +
    + +
    +
    + +**secrets:** `typing.Optional[ + typing.Sequence[ + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem + ] +]` — A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones + +
    +
    + +
    +
    + +**name:** `typing.Optional[str]` — A name to make the agent easier to find
    @@ -5761,8 +6304,7 @@ client.workspace.update_member(
    -## Models -
    client.models.get_all() +
    client.conversational_ai.get_widget(...)
    @@ -5774,7 +6316,7 @@ client.workspace.update_member(
    -Gets a list of available models. +Retrieve the widget configuration for an agent
    @@ -5794,7 +6336,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.models.get_all() +client.conversational_ai.get_widget( + agent_id="21m00Tcm4TlvDq8ikWAM", +) ``` @@ -5810,6 +6354,22 @@ client.models.get_all()
    +**agent_id:** `str` — The id of an agent. This is returned on agent creation. + +
    +
    + +
    +
    + +**conversation_signature:** `typing.Optional[str]` — An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -5822,8 +6382,7 @@ client.models.get_all()
    -## AudioNative -
    client.audio_native.create(...) +
    client.conversational_ai.get_link(...)
    @@ -5835,7 +6394,7 @@ client.models.get_all()
    -Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet. +Get the current link used to share the agent with others
    @@ -5855,8 +6414,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.audio_native.create( - name="name", +client.conversational_ai.get_link( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -5873,7 +6432,7 @@ client.audio_native.create(
    -**name:** `str` — Project name. +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -5881,81 +6440,69 @@ client.audio_native.create(
    -**image:** `typing.Optional[str]` — Image URL used in the player. If not provided, default image set in the Player settings is used. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    - -
    -
    - -**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used. -
    -
    -
    -**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used. -
    +
    +
    client.conversational_ai.post_avatar(...)
    -**small:** `typing.Optional[bool]` — Whether to use small player or not. If not provided, default value set in the Player settings is used. - -
    -
    +#### 📝 Description
    -**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used. - -
    -
    -
    -**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used. - +Sets the avatar for an agent displayed in the widget
    - -
    -
    - -**sessionization:** `typing.Optional[int]` — Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used. -
    +#### 🔌 Usage +
    -**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used. - -
    -
    -
    -**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used. - +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.post_avatar( + agent_id="21m00Tcm4TlvDq8ikWAM", +) + +```
    + + + +#### ⚙️ Parameters
    -**file:** `from __future__ import annotations +
    +
    -typing.Optional[core.File]` — See core.File for more documentation +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -5963,7 +6510,9 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not. +**avatar_file:** `from __future__ import annotations + +core.File` — See core.File for more documentation
    @@ -5983,8 +6532,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -## Usage -
    client.usage.get_characters_usage_metrics(...) +
    client.conversational_ai.get_knowledge_base_document(...)
    @@ -5996,7 +6544,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis. +Get details about a specific documentation making up the agent's knowledge base
    @@ -6016,9 +6564,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.usage.get_characters_usage_metrics( - start_unix=1, - end_unix=1, +client.conversational_ai.get_knowledge_base_document( + agent_id="21m00Tcm4TlvDq8ikWAM", + documentation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -6035,23 +6583,7 @@ client.usage.get_characters_usage_metrics(
    -**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day. - -
    -
    - -
    -
    - -**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day. - -
    -
    - -
    -
    - -**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace. +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -6059,7 +6591,7 @@ client.usage.get_characters_usage_metrics(
    -**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False. +**documentation_id:** `str` — The id of a document from the agent's knowledge base. This is returned on document addition.
    @@ -6079,8 +6611,7 @@ client.usage.get_characters_usage_metrics(
    -## PronunciationDictionary -
    client.pronunciation_dictionary.add_from_file(...) +
    client.conversational_ai.add_agent_secret(...)
    @@ -6092,7 +6623,7 @@ client.usage.get_characters_usage_metrics(
    -Creates a new pronunciation dictionary from a lexicon .PLS file +Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
    @@ -6112,8 +6643,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.add_from_file( +client.conversational_ai.add_agent_secret( + agent_id="21m00Tcm4TlvDq8ikWAM", name="name", + secret_value="secret_value", ) ``` @@ -6130,17 +6663,7 @@ client.pronunciation_dictionary.add_from_file(
    -**name:** `str` — The name of the pronunciation dictionary, used for identification only. - -
    -
    - -
    -
    - -**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -6148,7 +6671,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only. +**name:** `str` — A name to help identify a particular agent secret
    @@ -6156,7 +6679,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'editor' or 'viewer'. If not provided, defaults to no access. +**secret_value:** `str` — A value to be encrypted and used by the agent
    @@ -6176,7 +6699,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -
    client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...) +
    client.conversational_ai.create_knowledge_base_document(...)
    @@ -6188,7 +6711,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -Add rules to the pronunciation dictionary +Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
    @@ -6204,22 +6727,12 @@ Add rules to the pronunciation dictionary ```python from elevenlabs import ElevenLabs -from elevenlabs.pronunciation_dictionary import ( - PronunciationDictionaryRule_Phoneme, -) client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( - pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", - rules=[ - PronunciationDictionaryRule_Phoneme( - string_to_replace="rules", - phoneme="rules", - alphabet="rules", - ) - ], +client.conversational_ai.create_knowledge_base_document( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -6236,7 +6749,7 @@ client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
    -**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
    @@ -6244,11 +6757,17 @@ client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
    -**rules:** `typing.Sequence[PronunciationDictionaryRule]` +**url:** `typing.Optional[str]` — URL to a page of documentation that the agent will have access to in order to interact with users. + +
    +
    -List of pronunciation rules. Rule can be either: - an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', } - or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' } +
    +
    + +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
    @@ -6268,7 +6787,7 @@ List of pronunciation rules. Rule can be either:
    -
    client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...) +
    client.conversational_ai.get_agents(...)
    @@ -6280,7 +6799,7 @@ List of pronunciation rules. Rule can be either:
    -Remove rules from the pronunciation dictionary +Returns a page of your agents and their metadata.
    @@ -6300,10 +6819,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( - pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", - rule_strings=["rule_strings"], -) +client.conversational_ai.get_agents() ``` @@ -6319,7 +6835,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
    @@ -6327,7 +6843,15 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary. +**page_size:** `typing.Optional[int]` — How many Agents to return at maximum. Can not exceed 100, defaults to 30. + +
    +
    + +
    +
    + +**search:** `typing.Optional[str]` — Search by agents name.
    @@ -6347,7 +6871,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -
    client.pronunciation_dictionary.download(...) +
    client.conversational_ai.get_conversations(...)
    @@ -6359,7 +6883,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
    -Get PLS file with a pronunciation dictionary version rules +Get all conversations of agents that user owns. With option to restrict to a specific agent.
    @@ -6379,9 +6903,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.download( - dictionary_id="Fm6AvNgS53NXe6Kqxp3e", - version_id="KZFyRUq3R6kaqhKI146w", +client.conversational_ai.get_conversations( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -6398,7 +6921,7 @@ client.pronunciation_dictionary.download(
    -**dictionary_id:** `str` — The id of the pronunciation dictionary +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
    @@ -6406,7 +6929,23 @@ client.pronunciation_dictionary.download(
    -**version_id:** `str` — The id of the version of the pronunciation dictionary +**agent_id:** `typing.Optional[str]` — The id of the agent you're taking the action on. + +
    +
    + +
    +
    + +**call_successful:** `typing.Optional[EvaluationSuccessResult]` — The result of the success evaluation + +
    +
    + +
    +
    + +**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30.
    @@ -6426,7 +6965,7 @@ client.pronunciation_dictionary.download(
    -
    client.pronunciation_dictionary.get(...) +
    client.conversational_ai.get_conversation(...)
    @@ -6438,7 +6977,7 @@ client.pronunciation_dictionary.download(
    -Get metadata for a pronunciation dictionary +Get the details of a particular conversation
    @@ -6458,8 +6997,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.get( - pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e", +client.conversational_ai.get_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -6476,7 +7015,7 @@ client.pronunciation_dictionary.get(
    -**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary +**conversation_id:** `str` — The id of the conversation you're taking the action on.
    @@ -6496,7 +7035,7 @@ client.pronunciation_dictionary.get(
    -
    client.pronunciation_dictionary.get_all(...) +
    client.conversational_ai.get_conversation_audio(...)
    @@ -6508,7 +7047,7 @@ client.pronunciation_dictionary.get(
    -Get a list of the pronunciation dictionaries you have access to and their metadata +Get the audio recording of a particular conversation
    @@ -6528,8 +7067,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.get_all( - page_size=1, +client.conversational_ai.get_conversation_audio( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -6546,15 +7085,7 @@ client.pronunciation_dictionary.get_all(
    -**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response. - -
    -
    - -
    -
    - -**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30. +**conversation_id:** `str` — The id of the conversation you're taking the action on.
    diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py index d6225664..98bee6ba 100644 --- a/src/elevenlabs/__init__.py +++ b/src/elevenlabs/__init__.py @@ -2,25 +2,76 @@ from .types import ( Accent, + AddAgentSecretResponseModel, AddChapterResponseModel, + AddKnowledgeBaseResponseModel, AddProjectResponseModel, AddPronunciationDictionaryResponseModel, AddPronunciationDictionaryRulesResponseModel, AddVoiceIvcResponseModel, AddVoiceResponseModel, Age, + AgentBan, + AgentConfig, + AgentConfigOverride, + AgentMetadataResponseModel, + AgentPlatformSettings, + AgentSummaryResponseModel, + AllowlistItem, + ArrayJsonSchemaProperty, + ArrayJsonSchemaPropertyItems, + AsrConversationalConfig, + AsrInputFormat, + AsrProvider, + AsrQuality, AudioNativeCreateProjectResponseModel, AudioNativeGetEmbedCodeResponseModel, + AuthSettings, + AuthorizationMethod, + BanReasonType, BreakdownTypes, ChapterResponse, ChapterSnapshotResponse, ChapterSnapshotsResponse, ChapterState, ChapterStatisticsResponse, + ClientEvent, + ClientToolConfig, + ConvAiNewSecretConfig, + ConvAiSecretLocator, + ConvAiStoredSecretConfig, + ConversationChargingCommonModel, + ConversationConfig, + ConversationConfigClientOverride, + ConversationHistoryAnalysisCommonModel, + ConversationHistoryEvaluationCriteriaResultCommonModel, + ConversationHistoryMetadataCommonModel, + ConversationHistoryTranscriptCommonModel, + ConversationHistoryTranscriptCommonModelRole, + ConversationHistoryTranscriptToolCallCommonModel, + ConversationHistoryTranscriptToolResultCommonModel, + ConversationInitiationClientData, + ConversationSignedUrlResponseModel, + ConversationSummaryResponseModel, + ConversationSummaryResponseModelStatus, + ConversationTokenDbModel, + ConversationTokenPurpose, + ConversationalConfig, + CreateAgentResponseModel, Currency, + CustomLlm, + DataCollectionResultCommonModel, DoDubbingResponse, DubbingMetadataResponse, EditProjectResponseModel, + EmbedConfig, + EmbedConfigAvatar, + EmbedConfigAvatar_Image, + EmbedConfigAvatar_Orb, + EmbedConfigAvatar_Url, + EmbedVariant, + EvaluationSettings, + EvaluationSuccessResult, ExtendedSubscriptionResponseModelBillingPeriod, ExtendedSubscriptionResponseModelCharacterRefreshPeriod, ExtendedSubscriptionResponseModelCurrency, @@ -28,7 +79,16 @@ FineTuningResponse, FineTuningResponseModelStateValue, Gender, + GetAgentEmbedResponseModel, + GetAgentLinkResponseModel, + GetAgentResponseModel, + GetAgentsPageResponseModel, GetChaptersResponse, + GetConversationResponseModel, + GetConversationResponseModelStatus, + GetConversationsPageResponseModel, + GetKnowledgeBaseReponseModel, + GetKnowledgeBaseReponseModelType, GetLibraryVoicesResponse, GetProjectsResponse, GetPronunciationDictionariesMetadataResponseModel, @@ -40,42 +100,63 @@ HistoryAlignmentsResponseModel, HistoryItem, HttpValidationError, + ImageAvatar, Invoice, + KnowledgeBaseLocator, + KnowledgeBaseLocatorType, LanguageResponse, LibraryVoiceResponse, LibraryVoiceResponseModelCategory, + LiteralJsonSchemaProperty, + LiteralJsonSchemaPropertyType, + Llm, ManualVerificationFileResponse, ManualVerificationResponse, Model, ModelRatesResponseModel, ModelResponseModelConcurrencyGroup, + ObjectJsonSchemaProperty, + ObjectJsonSchemaPropertyPropertiesValue, OptimizeStreamingLatency, + OrbAvatar, OutputFormat, + PostAgentAvatarResponseModel, ProfilePageResponseModel, ProjectExtendedResponseModel, ProjectExtendedResponseModelAccessLevel, + ProjectExtendedResponseModelApplyTextNormalization, + ProjectExtendedResponseModelFiction, ProjectExtendedResponseModelQualityPreset, ProjectExtendedResponseModelTargetAudience, ProjectResponse, ProjectResponseModelAccessLevel, + ProjectResponseModelFiction, ProjectResponseModelTargetAudience, ProjectSnapshotResponse, ProjectSnapshotUploadResponseModel, ProjectSnapshotUploadResponseModelStatus, ProjectSnapshotsResponse, ProjectState, + PromptAgent, + PromptAgentOverride, + PromptAgentToolsItem, + PromptAgentToolsItem_Client, + PromptAgentToolsItem_Webhook, + PromptEvaluationCriteria, PronunciationDictionaryAliasRuleRequestModel, PronunciationDictionaryPhonemeRuleRequestModel, PronunciationDictionaryVersionLocator, PronunciationDictionaryVersionResponseModel, + PydanticPronunciationDictionaryVersionLocator, + QueryParamsJsonSchema, + ReaderResourceResponseModel, + ReaderResourceResponseModelResourceType, RecordingResponse, RemovePronunciationDictionaryRulesResponseModel, ReviewStatus, SpeechHistoryItemResponse, SpeechHistoryItemResponseModelSource, SpeechHistoryItemResponseModelVoiceCategory, - SsoProviderResponseModel, - SsoProviderResponseModelProviderType, Subscription, SubscriptionResponse, SubscriptionResponseModelBillingPeriod, @@ -83,6 +164,14 @@ SubscriptionResponseModelCurrency, SubscriptionStatus, TextToSpeechAsStreamRequest, + TtsConversationalConfig, + TtsConversationalConfigOverride, + TtsConversationalModel, + TtsOptimizeStreamingLatency, + TtsOutputFormat, + TurnConfig, + TurnMode, + UrlAvatar, UsageCharactersResponseModel, User, ValidationError, @@ -102,12 +191,17 @@ VoiceSharingResponseModelCategory, VoiceSharingState, VoiceVerificationResponse, + WebhookToolApiSchemaConfig, + WebhookToolApiSchemaConfigMethod, + WebhookToolApiSchemaConfigRequestHeadersValue, + WebhookToolConfig, ) from .errors import UnprocessableEntityError from . import ( audio_isolation, audio_native, chapters, + conversational_ai, dubbing, history, models, @@ -125,10 +219,16 @@ workspace, ) from .client import AsyncElevenLabs, ElevenLabs +from .conversational_ai import ( + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, +) from .dubbing import GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType from .environment import ElevenLabsEnvironment +from .history import HistoryGetAllRequestSource from .play import play, save, stream -from .projects import ProjectsAddRequestTargetAudience +from .projects import ProjectsAddRequestFiction, ProjectsAddRequestTargetAudience from .pronunciation_dictionary import ( PronunciationDictionaryAddFromFileRequestWorkspaceAccess, PronunciationDictionaryRule, @@ -141,21 +241,43 @@ BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization, BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, ) +from .text_to_voice import TextToVoiceCreatePreviewsRequestOutputFormat from .version import __version__ from .workspace import BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole __all__ = [ "Accent", + "AddAgentSecretResponseModel", "AddChapterResponseModel", + "AddKnowledgeBaseResponseModel", "AddProjectResponseModel", "AddPronunciationDictionaryResponseModel", "AddPronunciationDictionaryRulesResponseModel", "AddVoiceIvcResponseModel", "AddVoiceResponseModel", "Age", + "AgentBan", + "AgentConfig", + "AgentConfigOverride", + "AgentMetadataResponseModel", + "AgentPlatformSettings", + "AgentSummaryResponseModel", + "AllowlistItem", + "ArrayJsonSchemaProperty", + "ArrayJsonSchemaPropertyItems", + "AsrConversationalConfig", + "AsrInputFormat", + "AsrProvider", + "AsrQuality", "AsyncElevenLabs", "AudioNativeCreateProjectResponseModel", "AudioNativeGetEmbedCodeResponseModel", + "AuthSettings", + "AuthorizationMethod", + "BanReasonType", + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem", + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New", + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored", "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization", "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization", "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization", @@ -167,12 +289,45 @@ "ChapterSnapshotsResponse", "ChapterState", "ChapterStatisticsResponse", + "ClientEvent", + "ClientToolConfig", + "ConvAiNewSecretConfig", + "ConvAiSecretLocator", + "ConvAiStoredSecretConfig", + "ConversationChargingCommonModel", + "ConversationConfig", + "ConversationConfigClientOverride", + "ConversationHistoryAnalysisCommonModel", + "ConversationHistoryEvaluationCriteriaResultCommonModel", + "ConversationHistoryMetadataCommonModel", + "ConversationHistoryTranscriptCommonModel", + "ConversationHistoryTranscriptCommonModelRole", + "ConversationHistoryTranscriptToolCallCommonModel", + "ConversationHistoryTranscriptToolResultCommonModel", + "ConversationInitiationClientData", + "ConversationSignedUrlResponseModel", + "ConversationSummaryResponseModel", + "ConversationSummaryResponseModelStatus", + "ConversationTokenDbModel", + "ConversationTokenPurpose", + "ConversationalConfig", + "CreateAgentResponseModel", "Currency", + "CustomLlm", + "DataCollectionResultCommonModel", "DoDubbingResponse", "DubbingMetadataResponse", "EditProjectResponseModel", "ElevenLabs", "ElevenLabsEnvironment", + "EmbedConfig", + "EmbedConfigAvatar", + "EmbedConfigAvatar_Image", + "EmbedConfigAvatar_Orb", + "EmbedConfigAvatar_Url", + "EmbedVariant", + "EvaluationSettings", + "EvaluationSuccessResult", "ExtendedSubscriptionResponseModelBillingPeriod", "ExtendedSubscriptionResponseModelCharacterRefreshPeriod", "ExtendedSubscriptionResponseModelCurrency", @@ -180,7 +335,16 @@ "FineTuningResponse", "FineTuningResponseModelStateValue", "Gender", + "GetAgentEmbedResponseModel", + "GetAgentLinkResponseModel", + "GetAgentResponseModel", + "GetAgentsPageResponseModel", "GetChaptersResponse", + "GetConversationResponseModel", + "GetConversationResponseModelStatus", + "GetConversationsPageResponseModel", + "GetKnowledgeBaseReponseModel", + "GetKnowledgeBaseReponseModelType", "GetLibraryVoicesResponse", "GetProjectsResponse", "GetPronunciationDictionariesMetadataResponseModel", @@ -191,33 +355,54 @@ "History", "HistoryAlignmentResponseModel", "HistoryAlignmentsResponseModel", + "HistoryGetAllRequestSource", "HistoryItem", "HttpValidationError", + "ImageAvatar", "Invoice", + "KnowledgeBaseLocator", + "KnowledgeBaseLocatorType", "LanguageResponse", "LibraryVoiceResponse", "LibraryVoiceResponseModelCategory", + "LiteralJsonSchemaProperty", + "LiteralJsonSchemaPropertyType", + "Llm", "ManualVerificationFileResponse", "ManualVerificationResponse", "Model", "ModelRatesResponseModel", "ModelResponseModelConcurrencyGroup", + "ObjectJsonSchemaProperty", + "ObjectJsonSchemaPropertyPropertiesValue", "OptimizeStreamingLatency", + "OrbAvatar", "OutputFormat", + "PostAgentAvatarResponseModel", "ProfilePageResponseModel", "ProjectExtendedResponseModel", "ProjectExtendedResponseModelAccessLevel", + "ProjectExtendedResponseModelApplyTextNormalization", + "ProjectExtendedResponseModelFiction", "ProjectExtendedResponseModelQualityPreset", "ProjectExtendedResponseModelTargetAudience", "ProjectResponse", "ProjectResponseModelAccessLevel", + "ProjectResponseModelFiction", "ProjectResponseModelTargetAudience", "ProjectSnapshotResponse", "ProjectSnapshotUploadResponseModel", "ProjectSnapshotUploadResponseModelStatus", "ProjectSnapshotsResponse", "ProjectState", + "ProjectsAddRequestFiction", "ProjectsAddRequestTargetAudience", + "PromptAgent", + "PromptAgentOverride", + "PromptAgentToolsItem", + "PromptAgentToolsItem_Client", + "PromptAgentToolsItem_Webhook", + "PromptEvaluationCriteria", "PronunciationDictionaryAddFromFileRequestWorkspaceAccess", "PronunciationDictionaryAliasRuleRequestModel", "PronunciationDictionaryPhonemeRuleRequestModel", @@ -226,14 +411,16 @@ "PronunciationDictionaryRule_Phoneme", "PronunciationDictionaryVersionLocator", "PronunciationDictionaryVersionResponseModel", + "PydanticPronunciationDictionaryVersionLocator", + "QueryParamsJsonSchema", + "ReaderResourceResponseModel", + "ReaderResourceResponseModelResourceType", "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ReviewStatus", "SpeechHistoryItemResponse", "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", - "SsoProviderResponseModel", - "SsoProviderResponseModelProviderType", "Subscription", "SubscriptionResponse", "SubscriptionResponseModelBillingPeriod", @@ -241,7 +428,16 @@ "SubscriptionResponseModelCurrency", "SubscriptionStatus", "TextToSpeechAsStreamRequest", + "TextToVoiceCreatePreviewsRequestOutputFormat", + "TtsConversationalConfig", + "TtsConversationalConfigOverride", + "TtsConversationalModel", + "TtsOptimizeStreamingLatency", + "TtsOutputFormat", + "TurnConfig", + "TurnMode", "UnprocessableEntityError", + "UrlAvatar", "UsageCharactersResponseModel", "User", "ValidationError", @@ -261,10 +457,15 @@ "VoiceSharingResponseModelCategory", "VoiceSharingState", "VoiceVerificationResponse", + "WebhookToolApiSchemaConfig", + "WebhookToolApiSchemaConfigMethod", + "WebhookToolApiSchemaConfigRequestHeadersValue", + "WebhookToolConfig", "__version__", "audio_isolation", "audio_native", "chapters", + "conversational_ai", "dubbing", "history", "models", diff --git a/src/elevenlabs/audio_isolation/client.py b/src/elevenlabs/audio_isolation/client.py index cae2e69a..b52b3203 100644 --- a/src/elevenlabs/audio_isolation/client.py +++ b/src/elevenlabs/audio_isolation/client.py @@ -37,15 +37,6 @@ def audio_isolation( ------ typing.Iterator[bytes] Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.audio_isolation.audio_isolation() """ with self._client_wrapper.httpx_client.stream( "v1/audio-isolation", @@ -97,15 +88,6 @@ def audio_isolation_stream( ------ typing.Iterator[bytes] Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.audio_isolation.audio_isolation_stream() """ with self._client_wrapper.httpx_client.stream( "v1/audio-isolation/stream", @@ -162,23 +144,6 @@ async def audio_isolation( ------ typing.AsyncIterator[bytes] Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.audio_isolation.audio_isolation() - - - asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( "v1/audio-isolation", @@ -230,23 +195,6 @@ async def audio_isolation_stream( ------ typing.AsyncIterator[bytes] Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.audio_isolation.audio_isolation_stream() - - - asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( "v1/audio-isolation/stream", diff --git a/src/elevenlabs/base_client.py b/src/elevenlabs/base_client.py index 50a0d659..a67c7e0e 100644 --- a/src/elevenlabs/base_client.py +++ b/src/elevenlabs/base_client.py @@ -18,11 +18,12 @@ from .projects.client import ProjectsClient from .chapters.client import ChaptersClient from .dubbing.client import DubbingClient -from .workspace.client import WorkspaceClient from .models.client import ModelsClient from .audio_native.client import AudioNativeClient from .usage.client import UsageClient from .pronunciation_dictionary.client import PronunciationDictionaryClient +from .workspace.client import WorkspaceClient +from .conversational_ai.client import ConversationalAiClient from .core.client_wrapper import AsyncClientWrapper from .history.client import AsyncHistoryClient from .text_to_sound_effects.client import AsyncTextToSoundEffectsClient @@ -37,11 +38,12 @@ from .projects.client import AsyncProjectsClient from .chapters.client import AsyncChaptersClient from .dubbing.client import AsyncDubbingClient -from .workspace.client import AsyncWorkspaceClient from .models.client import AsyncModelsClient from .audio_native.client import AsyncAudioNativeClient from .usage.client import AsyncUsageClient from .pronunciation_dictionary.client import AsyncPronunciationDictionaryClient +from .workspace.client import AsyncWorkspaceClient +from .conversational_ai.client import AsyncConversationalAiClient class BaseElevenLabs: @@ -115,11 +117,12 @@ def __init__( self.projects = ProjectsClient(client_wrapper=self._client_wrapper) self.chapters = ChaptersClient(client_wrapper=self._client_wrapper) self.dubbing = DubbingClient(client_wrapper=self._client_wrapper) - self.workspace = WorkspaceClient(client_wrapper=self._client_wrapper) self.models = ModelsClient(client_wrapper=self._client_wrapper) self.audio_native = AudioNativeClient(client_wrapper=self._client_wrapper) self.usage = UsageClient(client_wrapper=self._client_wrapper) self.pronunciation_dictionary = PronunciationDictionaryClient(client_wrapper=self._client_wrapper) + self.workspace = WorkspaceClient(client_wrapper=self._client_wrapper) + self.conversational_ai = ConversationalAiClient(client_wrapper=self._client_wrapper) class AsyncBaseElevenLabs: @@ -193,11 +196,12 @@ def __init__( self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper) self.chapters = AsyncChaptersClient(client_wrapper=self._client_wrapper) self.dubbing = AsyncDubbingClient(client_wrapper=self._client_wrapper) - self.workspace = AsyncWorkspaceClient(client_wrapper=self._client_wrapper) self.models = AsyncModelsClient(client_wrapper=self._client_wrapper) self.audio_native = AsyncAudioNativeClient(client_wrapper=self._client_wrapper) self.usage = AsyncUsageClient(client_wrapper=self._client_wrapper) self.pronunciation_dictionary = AsyncPronunciationDictionaryClient(client_wrapper=self._client_wrapper) + self.workspace = AsyncWorkspaceClient(client_wrapper=self._client_wrapper) + self.conversational_ai = AsyncConversationalAiClient(client_wrapper=self._client_wrapper) def _get_base_url(https://github.com/*,%20base_url:%20typing.Optional[str]%20=%20None,%20environment:%20ElevenLabsEnvironment) -> str: diff --git a/src/elevenlabs/conversational_ai/__init__.py b/src/elevenlabs/conversational_ai/__init__.py new file mode 100644 index 00000000..a05e4b59 --- /dev/null +++ b/src/elevenlabs/conversational_ai/__init__.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, +) + +__all__ = [ + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem", + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New", + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored", +] diff --git a/src/elevenlabs/conversational_ai/client.py b/src/elevenlabs/conversational_ai/client.py new file mode 100644 index 00000000..da92c7f9 --- /dev/null +++ b/src/elevenlabs/conversational_ai/client.py @@ -0,0 +1,2209 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.conversation_signed_url_response_model import ConversationSignedUrlResponseModel +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.conversational_config import ConversationalConfig +from ..types.agent_platform_settings import AgentPlatformSettings +from ..types.create_agent_response_model import CreateAgentResponseModel +from ..core.serialization import convert_and_respect_annotation_metadata +from ..types.get_agent_response_model import GetAgentResponseModel +from ..core.jsonable_encoder import jsonable_encoder +from .types.body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import ( + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, +) +from ..types.get_agent_embed_response_model import GetAgentEmbedResponseModel +from ..types.get_agent_link_response_model import GetAgentLinkResponseModel +from .. import core +from ..types.post_agent_avatar_response_model import PostAgentAvatarResponseModel +from ..types.get_knowledge_base_reponse_model import GetKnowledgeBaseReponseModel +from ..types.add_agent_secret_response_model import AddAgentSecretResponseModel +from ..types.add_knowledge_base_response_model import AddKnowledgeBaseResponseModel +from ..types.get_agents_page_response_model import GetAgentsPageResponseModel +from ..types.evaluation_success_result import EvaluationSuccessResult +from ..types.get_conversations_page_response_model import GetConversationsPageResponseModel +from ..types.get_conversation_response_model import GetConversationResponseModel +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ConversationalAiClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def get_signed_url( + self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> ConversationSignedUrlResponseModel: + """ + Get a signed url to start a conversation with an agent with an agent that requires authorization + + Parameters + ---------- + agent_id : str + The id of the agent you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConversationSignedUrlResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_signed_url( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/convai/conversation/get_signed_url", + method="GET", + params={ + "agent_id": agent_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConversationSignedUrlResponseModel, + construct_type( + type_=ConversationSignedUrlResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_agent( + self, + *, + conversation_config: ConversationalConfig, + platform_settings: typing.Optional[AgentPlatformSettings] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentResponseModel: + """ + Create an agent from a config object + + Parameters + ---------- + conversation_config : ConversationalConfig + Conversation configuration for an agent + + platform_settings : typing.Optional[AgentPlatformSettings] + Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + + name : typing.Optional[str] + A name to make the agent easier to find + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ConversationalConfig, ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.create_agent( + conversation_config=ConversationalConfig(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/convai/agents/create", + method="POST", + json={ + "conversation_config": convert_and_respect_annotation_metadata( + object_=conversation_config, annotation=ConversationalConfig, direction="write" + ), + "platform_settings": convert_and_respect_annotation_metadata( + object_=platform_settings, annotation=AgentPlatformSettings, direction="write" + ), + "name": name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAgentResponseModel, + construct_type( + type_=CreateAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetAgentResponseModel: + """ + Retrieve config for an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentResponseModel, + construct_type( + type_=GetAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Dict[str, str]: + """ + Delete an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, str] + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.delete_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, str], + construct_type( + type_=typing.Dict[str, str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_agent( + self, + agent_id: str, + *, + conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + secrets: typing.Optional[ + typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem] + ] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentResponseModel: + """ + Patches an Agent settings + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Conversation configuration for an agent + + platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + + secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]] + A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones + + name : typing.Optional[str] + A name to make the agent easier to find + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.update_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="PATCH", + json={ + "conversation_config": conversation_config, + "platform_settings": platform_settings, + "secrets": convert_and_respect_annotation_metadata( + object_=secrets, + annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem], + direction="write", + ), + "name": name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentResponseModel, + construct_type( + type_=GetAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_widget( + self, + agent_id: str, + *, + conversation_signature: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentEmbedResponseModel: + """ + Retrieve the widget configuration for an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + conversation_signature : typing.Optional[str] + An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentEmbedResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_widget( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget", + method="GET", + params={ + "conversation_signature": conversation_signature, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentEmbedResponseModel, + construct_type( + type_=GetAgentEmbedResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_link( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetAgentLinkResponseModel: + """ + Get the current link used to share the agent with others + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentLinkResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_link( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/link", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentLinkResponseModel, + construct_type( + type_=GetAgentLinkResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_avatar( + self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None + ) -> PostAgentAvatarResponseModel: + """ + Sets the avatar for an agent displayed in the widget + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + avatar_file : core.File + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PostAgentAvatarResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.post_avatar( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar", + method="POST", + data={}, + files={ + "avatar_file": avatar_file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + PostAgentAvatarResponseModel, + construct_type( + type_=PostAgentAvatarResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_knowledge_base_document( + self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetKnowledgeBaseReponseModel: + """ + Get details about a specific documentation making up the agent's knowledge base + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + documentation_id : str + The id of a document from the agent's knowledge base. This is returned on document addition. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetKnowledgeBaseReponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_knowledge_base_document( + agent_id="21m00Tcm4TlvDq8ikWAM", + documentation_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetKnowledgeBaseReponseModel, + construct_type( + type_=GetKnowledgeBaseReponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_agent_secret( + self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None + ) -> AddAgentSecretResponseModel: + """ + Uploads a file or reference a webpage for the agent to use as part of it's knowledge base + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + name : str + A name to help identify a particular agent secret + + secret_value : str + A value to be encrypted and used by the agent + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddAgentSecretResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.add_agent_secret( + agent_id="21m00Tcm4TlvDq8ikWAM", + name="name", + secret_value="secret_value", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret", + method="POST", + json={ + "name": name, + "secret_value": secret_value, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddAgentSecretResponseModel, + construct_type( + type_=AddAgentSecretResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_knowledge_base_document( + self, + agent_id: str, + *, + url: typing.Optional[str] = OMIT, + file: typing.Optional[core.File] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddKnowledgeBaseResponseModel: + """ + Uploads a file or reference a webpage for the agent to use as part of it's knowledge base + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + url : typing.Optional[str] + URL to a page of documentation that the agent will have access to in order to interact with users. + + file : typing.Optional[core.File] + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddKnowledgeBaseResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.create_knowledge_base_document( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base", + method="POST", + data={ + "url": url, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddKnowledgeBaseResponseModel, + construct_type( + type_=AddKnowledgeBaseResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_agents( + self, + *, + cursor: typing.Optional[str] = None, + page_size: typing.Optional[int] = None, + search: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentsPageResponseModel: + """ + Returns a page of your agents and their metadata. + + Parameters + ---------- + cursor : typing.Optional[str] + Used for fetching next page. Cursor is returned in the response. + + page_size : typing.Optional[int] + How many Agents to return at maximum. Can not exceed 100, defaults to 30. + + search : typing.Optional[str] + Search by agents name. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentsPageResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_agents() + """ + _response = self._client_wrapper.httpx_client.request( + "v1/convai/agents", + method="GET", + params={ + "cursor": cursor, + "page_size": page_size, + "search": search, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentsPageResponseModel, + construct_type( + type_=GetAgentsPageResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_conversations( + self, + *, + cursor: typing.Optional[str] = None, + agent_id: typing.Optional[str] = None, + call_successful: typing.Optional[EvaluationSuccessResult] = None, + page_size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetConversationsPageResponseModel: + """ + Get all conversations of agents that user owns. With option to restrict to a specific agent. + + Parameters + ---------- + cursor : typing.Optional[str] + Used for fetching next page. Cursor is returned in the response. + + agent_id : typing.Optional[str] + The id of the agent you're taking the action on. + + call_successful : typing.Optional[EvaluationSuccessResult] + The result of the success evaluation + + page_size : typing.Optional[int] + How many conversations to return at maximum. Can not exceed 100, defaults to 30. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConversationsPageResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_conversations( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/convai/conversations", + method="GET", + params={ + "cursor": cursor, + "agent_id": agent_id, + "call_successful": call_successful, + "page_size": page_size, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConversationsPageResponseModel, + construct_type( + type_=GetConversationsPageResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_conversation( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetConversationResponseModel: + """ + Get the details of a particular conversation + + Parameters + ---------- + conversation_id : str + The id of the conversation you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConversationResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConversationResponseModel, + construct_type( + type_=GetConversationResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_conversation_audio( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Get the audio recording of a particular conversation + + Parameters + ---------- + conversation_id : str + The id of the conversation you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_conversation_audio( + conversation_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncConversationalAiClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_signed_url( + self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> ConversationSignedUrlResponseModel: + """ + Get a signed url to start a conversation with an agent with an agent that requires authorization + + Parameters + ---------- + agent_id : str + The id of the agent you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConversationSignedUrlResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_signed_url( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/conversation/get_signed_url", + method="GET", + params={ + "agent_id": agent_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConversationSignedUrlResponseModel, + construct_type( + type_=ConversationSignedUrlResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_agent( + self, + *, + conversation_config: ConversationalConfig, + platform_settings: typing.Optional[AgentPlatformSettings] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentResponseModel: + """ + Create an agent from a config object + + Parameters + ---------- + conversation_config : ConversationalConfig + Conversation configuration for an agent + + platform_settings : typing.Optional[AgentPlatformSettings] + Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + + name : typing.Optional[str] + A name to make the agent easier to find + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs, ConversationalConfig + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.create_agent( + conversation_config=ConversationalConfig(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/agents/create", + method="POST", + json={ + "conversation_config": convert_and_respect_annotation_metadata( + object_=conversation_config, annotation=ConversationalConfig, direction="write" + ), + "platform_settings": convert_and_respect_annotation_metadata( + object_=platform_settings, annotation=AgentPlatformSettings, direction="write" + ), + "name": name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAgentResponseModel, + construct_type( + type_=CreateAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetAgentResponseModel: + """ + Retrieve config for an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentResponseModel, + construct_type( + type_=GetAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Dict[str, str]: + """ + Delete an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, str] + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.delete_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, str], + construct_type( + type_=typing.Dict[str, str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_agent( + self, + agent_id: str, + *, + conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + secrets: typing.Optional[ + typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem] + ] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentResponseModel: + """ + Patches an Agent settings + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Conversation configuration for an agent + + platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + + secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]] + A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones + + name : typing.Optional[str] + A name to make the agent easier to find + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.update_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="PATCH", + json={ + "conversation_config": conversation_config, + "platform_settings": platform_settings, + "secrets": convert_and_respect_annotation_metadata( + object_=secrets, + annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem], + direction="write", + ), + "name": name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentResponseModel, + construct_type( + type_=GetAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_widget( + self, + agent_id: str, + *, + conversation_signature: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentEmbedResponseModel: + """ + Retrieve the widget configuration for an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + conversation_signature : typing.Optional[str] + An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentEmbedResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_widget( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget", + method="GET", + params={ + "conversation_signature": conversation_signature, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentEmbedResponseModel, + construct_type( + type_=GetAgentEmbedResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_link( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetAgentLinkResponseModel: + """ + Get the current link used to share the agent with others + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentLinkResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_link( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/link", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentLinkResponseModel, + construct_type( + type_=GetAgentLinkResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def post_avatar( + self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None + ) -> PostAgentAvatarResponseModel: + """ + Sets the avatar for an agent displayed in the widget + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + avatar_file : core.File + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PostAgentAvatarResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.post_avatar( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar", + method="POST", + data={}, + files={ + "avatar_file": avatar_file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + PostAgentAvatarResponseModel, + construct_type( + type_=PostAgentAvatarResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_knowledge_base_document( + self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetKnowledgeBaseReponseModel: + """ + Get details about a specific documentation making up the agent's knowledge base + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + documentation_id : str + The id of a document from the agent's knowledge base. This is returned on document addition. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetKnowledgeBaseReponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_knowledge_base_document( + agent_id="21m00Tcm4TlvDq8ikWAM", + documentation_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetKnowledgeBaseReponseModel, + construct_type( + type_=GetKnowledgeBaseReponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_agent_secret( + self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None + ) -> AddAgentSecretResponseModel: + """ + Uploads a file or reference a webpage for the agent to use as part of it's knowledge base + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + name : str + A name to help identify a particular agent secret + + secret_value : str + A value to be encrypted and used by the agent + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddAgentSecretResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.add_agent_secret( + agent_id="21m00Tcm4TlvDq8ikWAM", + name="name", + secret_value="secret_value", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret", + method="POST", + json={ + "name": name, + "secret_value": secret_value, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddAgentSecretResponseModel, + construct_type( + type_=AddAgentSecretResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_knowledge_base_document( + self, + agent_id: str, + *, + url: typing.Optional[str] = OMIT, + file: typing.Optional[core.File] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddKnowledgeBaseResponseModel: + """ + Uploads a file or reference a webpage for the agent to use as part of it's knowledge base + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + url : typing.Optional[str] + URL to a page of documentation that the agent will have access to in order to interact with users. + + file : typing.Optional[core.File] + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddKnowledgeBaseResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.create_knowledge_base_document( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base", + method="POST", + data={ + "url": url, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddKnowledgeBaseResponseModel, + construct_type( + type_=AddKnowledgeBaseResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_agents( + self, + *, + cursor: typing.Optional[str] = None, + page_size: typing.Optional[int] = None, + search: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentsPageResponseModel: + """ + Returns a page of your agents and their metadata. + + Parameters + ---------- + cursor : typing.Optional[str] + Used for fetching next page. Cursor is returned in the response. + + page_size : typing.Optional[int] + How many Agents to return at maximum. Can not exceed 100, defaults to 30. + + search : typing.Optional[str] + Search by agents name. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentsPageResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_agents() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/agents", + method="GET", + params={ + "cursor": cursor, + "page_size": page_size, + "search": search, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentsPageResponseModel, + construct_type( + type_=GetAgentsPageResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_conversations( + self, + *, + cursor: typing.Optional[str] = None, + agent_id: typing.Optional[str] = None, + call_successful: typing.Optional[EvaluationSuccessResult] = None, + page_size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetConversationsPageResponseModel: + """ + Get all conversations of agents that user owns. With option to restrict to a specific agent. + + Parameters + ---------- + cursor : typing.Optional[str] + Used for fetching next page. Cursor is returned in the response. + + agent_id : typing.Optional[str] + The id of the agent you're taking the action on. + + call_successful : typing.Optional[EvaluationSuccessResult] + The result of the success evaluation + + page_size : typing.Optional[int] + How many conversations to return at maximum. Can not exceed 100, defaults to 30. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConversationsPageResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_conversations( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/conversations", + method="GET", + params={ + "cursor": cursor, + "agent_id": agent_id, + "call_successful": call_successful, + "page_size": page_size, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConversationsPageResponseModel, + construct_type( + type_=GetConversationsPageResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_conversation( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetConversationResponseModel: + """ + Get the details of a particular conversation + + Parameters + ---------- + conversation_id : str + The id of the conversation you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConversationResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConversationResponseModel, + construct_type( + type_=GetConversationResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_conversation_audio( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Get the audio recording of a particular conversation + + Parameters + ---------- + conversation_id : str + The id of the conversation you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_conversation_audio( + conversation_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/elevenlabs/conversational_ai/types/__init__.py b/src/elevenlabs/conversational_ai/types/__init__.py new file mode 100644 index 00000000..3d467b3a --- /dev/null +++ b/src/elevenlabs/conversational_ai/types/__init__.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from .body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import ( + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, +) + +__all__ = [ + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem", + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New", + "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored", +] diff --git a/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py b/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py new file mode 100644 index 00000000..ffcbbd74 --- /dev/null +++ b/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +import typing_extensions +from ...core.unchecked_base_model import UnionMetadata + + +class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New(UncheckedBaseModel): + type: typing.Literal["new"] = "new" + name: str + value: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored(UncheckedBaseModel): + type: typing.Literal["stored"] = "stored" + secret_id: str + name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem = typing_extensions.Annotated[ + typing.Union[ + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, + BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, + ], + UnionMetadata(discriminant="type"), +] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 043753fc..6a14c535 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.13.0", + "X-Fern-SDK-Version": "1.13.1", } if self._api_key is not None: headers["xi-api-key"] = self._api_key diff --git a/src/elevenlabs/dubbing/client.py b/src/elevenlabs/dubbing/client.py index f9e3e4ab..5a429299 100644 --- a/src/elevenlabs/dubbing/client.py +++ b/src/elevenlabs/dubbing/client.py @@ -287,18 +287,6 @@ def get_dubbed_file( ------ typing.Iterator[bytes] Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.dubbing.get_dubbed_file( - dubbing_id="string", - language_code="string", - ) """ with self._client_wrapper.httpx_client.stream( f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}", @@ -694,26 +682,6 @@ async def get_dubbed_file( ------ typing.AsyncIterator[bytes] Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.dubbing.get_dubbed_file( - dubbing_id="string", - language_code="string", - ) - - - asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}", diff --git a/src/elevenlabs/history/__init__.py b/src/elevenlabs/history/__init__.py index f3ea2659..5c94f169 100644 --- a/src/elevenlabs/history/__init__.py +++ b/src/elevenlabs/history/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import HistoryGetAllRequestSource + +__all__ = ["HistoryGetAllRequestSource"] diff --git a/src/elevenlabs/history/client.py b/src/elevenlabs/history/client.py index 6bb1f6f1..cd7367b4 100644 --- a/src/elevenlabs/history/client.py +++ b/src/elevenlabs/history/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .types.history_get_all_request_source import HistoryGetAllRequestSource from ..core.request_options import RequestOptions from ..types.get_speech_history_response import GetSpeechHistoryResponse from ..core.unchecked_base_model import construct_type @@ -27,6 +28,8 @@ def get_all( page_size: typing.Optional[int] = None, start_after_history_item_id: typing.Optional[str] = None, voice_id: typing.Optional[str] = None, + search: typing.Optional[str] = None, + source: typing.Optional[HistoryGetAllRequestSource] = None, request_options: typing.Optional[RequestOptions] = None, ) -> GetSpeechHistoryResponse: """ @@ -43,6 +46,12 @@ def get_all( voice_id : typing.Optional[str] Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs. + search : typing.Optional[str] + search term used for filtering + + source : typing.Optional[HistoryGetAllRequestSource] + Source of the generated history item + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -70,6 +79,8 @@ def get_all( "page_size": page_size, "start_after_history_item_id": start_after_history_item_id, "voice_id": voice_id, + "search": search, + "source": source, }, request_options=request_options, ) @@ -347,6 +358,8 @@ async def get_all( page_size: typing.Optional[int] = None, start_after_history_item_id: typing.Optional[str] = None, voice_id: typing.Optional[str] = None, + search: typing.Optional[str] = None, + source: typing.Optional[HistoryGetAllRequestSource] = None, request_options: typing.Optional[RequestOptions] = None, ) -> GetSpeechHistoryResponse: """ @@ -363,6 +376,12 @@ async def get_all( voice_id : typing.Optional[str] Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs. + search : typing.Optional[str] + search term used for filtering + + source : typing.Optional[HistoryGetAllRequestSource] + Source of the generated history item + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -398,6 +417,8 @@ async def main() -> None: "page_size": page_size, "start_after_history_item_id": start_after_history_item_id, "voice_id": voice_id, + "search": search, + "source": source, }, request_options=request_options, ) diff --git a/src/elevenlabs/history/types/__init__.py b/src/elevenlabs/history/types/__init__.py new file mode 100644 index 00000000..c1e50696 --- /dev/null +++ b/src/elevenlabs/history/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .history_get_all_request_source import HistoryGetAllRequestSource + +__all__ = ["HistoryGetAllRequestSource"] diff --git a/src/elevenlabs/history/types/history_get_all_request_source.py b/src/elevenlabs/history/types/history_get_all_request_source.py new file mode 100644 index 00000000..fc4371db --- /dev/null +++ b/src/elevenlabs/history/types/history_get_all_request_source.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +HistoryGetAllRequestSource = typing.Union[typing.Literal["TTS", "STS"], typing.Any] diff --git a/src/elevenlabs/projects/__init__.py b/src/elevenlabs/projects/__init__.py index 7bdfec7e..749f44bc 100644 --- a/src/elevenlabs/projects/__init__.py +++ b/src/elevenlabs/projects/__init__.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from .types import ProjectsAddRequestTargetAudience +from .types import ProjectsAddRequestFiction, ProjectsAddRequestTargetAudience -__all__ = ["ProjectsAddRequestTargetAudience"] +__all__ = ["ProjectsAddRequestFiction", "ProjectsAddRequestTargetAudience"] diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py index 5a944a87..9c249e19 100644 --- a/src/elevenlabs/projects/client.py +++ b/src/elevenlabs/projects/client.py @@ -11,6 +11,7 @@ from ..core.api_error import ApiError from .. import core from .types.projects_add_request_target_audience import ProjectsAddRequestTargetAudience +from .types.projects_add_request_fiction import ProjectsAddRequestFiction from ..types.add_project_response_model import AddProjectResponseModel from ..types.project_extended_response_model import ProjectExtendedResponseModel from ..core.jsonable_encoder import jsonable_encoder @@ -104,6 +105,8 @@ def add( acx_volume_normalization: typing.Optional[bool] = OMIT, volume_normalization: typing.Optional[bool] = OMIT, pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT, + fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT, + quality_check_on: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddProjectResponseModel: """ @@ -176,6 +179,12 @@ def add( pronunciation_dictionary_locators : typing.Optional[typing.List[str]] A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. + fiction : typing.Optional[ProjectsAddRequestFiction] + An optional fiction of the project. + + quality_check_on : typing.Optional[bool] + Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -221,6 +230,8 @@ def add( "acx_volume_normalization": acx_volume_normalization, "volume_normalization": volume_normalization, "pronunciation_dictionary_locators": pronunciation_dictionary_locators, + "fiction": fiction, + "quality_check_on": quality_check_on, }, files={ "from_document": from_document, @@ -322,6 +333,7 @@ def edit_basic_project_info( author: typing.Optional[str] = OMIT, isbn_number: typing.Optional[str] = OMIT, volume_normalization: typing.Optional[bool] = OMIT, + quality_check_on: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EditProjectResponseModel: """ @@ -353,6 +365,9 @@ def edit_basic_project_info( volume_normalization : typing.Optional[bool] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements + quality_check_on : typing.Optional[bool] + Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -386,6 +401,7 @@ def edit_basic_project_info( "author": author, "isbn_number": isbn_number, "volume_normalization": volume_normalization, + "quality_check_on": quality_check_on, }, request_options=request_options, omit=OMIT, @@ -620,19 +636,6 @@ def stream_audio( ------ typing.Iterator[bytes] Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.projects.stream_audio( - project_id="string", - project_snapshot_id="string", - convert_to_mpeg=True, - ) """ with self._client_wrapper.httpx_client.stream( f"v1/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream", @@ -965,6 +968,8 @@ async def add( acx_volume_normalization: typing.Optional[bool] = OMIT, volume_normalization: typing.Optional[bool] = OMIT, pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT, + fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT, + quality_check_on: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AddProjectResponseModel: """ @@ -1037,6 +1042,12 @@ async def add( pronunciation_dictionary_locators : typing.Optional[typing.List[str]] A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. + fiction : typing.Optional[ProjectsAddRequestFiction] + An optional fiction of the project. + + quality_check_on : typing.Optional[bool] + Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1090,6 +1101,8 @@ async def main() -> None: "acx_volume_normalization": acx_volume_normalization, "volume_normalization": volume_normalization, "pronunciation_dictionary_locators": pronunciation_dictionary_locators, + "fiction": fiction, + "quality_check_on": quality_check_on, }, files={ "from_document": from_document, @@ -1199,6 +1212,7 @@ async def edit_basic_project_info( author: typing.Optional[str] = OMIT, isbn_number: typing.Optional[str] = OMIT, volume_normalization: typing.Optional[bool] = OMIT, + quality_check_on: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EditProjectResponseModel: """ @@ -1230,6 +1244,9 @@ async def edit_basic_project_info( volume_normalization : typing.Optional[bool] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements + quality_check_on : typing.Optional[bool] + Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1271,6 +1288,7 @@ async def main() -> None: "author": author, "isbn_number": isbn_number, "volume_normalization": volume_normalization, + "quality_check_on": quality_check_on, }, request_options=request_options, omit=OMIT, @@ -1529,27 +1547,6 @@ async def stream_audio( ------ typing.AsyncIterator[bytes] Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.projects.stream_audio( - project_id="string", - project_snapshot_id="string", - convert_to_mpeg=True, - ) - - - asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( f"v1/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream", diff --git a/src/elevenlabs/projects/types/__init__.py b/src/elevenlabs/projects/types/__init__.py index 42c21d40..e0531cef 100644 --- a/src/elevenlabs/projects/types/__init__.py +++ b/src/elevenlabs/projects/types/__init__.py @@ -1,5 +1,6 @@ # This file was auto-generated by Fern from our API Definition. +from .projects_add_request_fiction import ProjectsAddRequestFiction from .projects_add_request_target_audience import ProjectsAddRequestTargetAudience -__all__ = ["ProjectsAddRequestTargetAudience"] +__all__ = ["ProjectsAddRequestFiction", "ProjectsAddRequestTargetAudience"] diff --git a/src/elevenlabs/projects/types/projects_add_request_fiction.py b/src/elevenlabs/projects/types/projects_add_request_fiction.py new file mode 100644 index 00000000..a5232ff3 --- /dev/null +++ b/src/elevenlabs/projects/types/projects_add_request_fiction.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectsAddRequestFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any] diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py index 981ca920..d4e661b7 100644 --- a/src/elevenlabs/speech_to_speech/client.py +++ b/src/elevenlabs/speech_to_speech/client.py @@ -63,7 +63,7 @@ def convert( Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. @@ -75,20 +75,6 @@ def convert( ------ typing.Iterator[bytes] Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.speech_to_speech.convert( - voice_id="string", - enable_logging=True, - optimize_streaming_latency="0", - output_format="mp3_22050_32", - ) """ with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}", @@ -184,7 +170,7 @@ def convert_as_stream( Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. @@ -196,20 +182,6 @@ def convert_as_stream( ------ typing.Iterator[bytes] Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.speech_to_speech.convert_as_stream( - voice_id="string", - enable_logging="0", - optimize_streaming_latency="mp3_22050_32", - output_format="string", - ) """ with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}/stream", @@ -299,7 +271,7 @@ async def convert( Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. @@ -311,28 +283,6 @@ async def convert( ------ typing.AsyncIterator[bytes] Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.speech_to_speech.convert( - voice_id="string", - enable_logging=True, - optimize_streaming_latency="0", - output_format="mp3_22050_32", - ) - - - asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}", @@ -428,7 +378,7 @@ async def convert_as_stream( Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. @@ -440,28 +390,6 @@ async def convert_as_stream( ------ typing.AsyncIterator[bytes] Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.speech_to_speech.convert_as_stream( - voice_id="string", - enable_logging="0", - optimize_streaming_latency="mp3_22050_32", - output_format="string", - ) - - - asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}/stream", diff --git a/src/elevenlabs/text_to_sound_effects/client.py b/src/elevenlabs/text_to_sound_effects/client.py index af994638..cbf8c36c 100644 --- a/src/elevenlabs/text_to_sound_effects/client.py +++ b/src/elevenlabs/text_to_sound_effects/client.py @@ -47,19 +47,6 @@ def convert( ------ typing.Iterator[bytes] Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.text_to_sound_effects.convert( - text="string", - duration_seconds=1.1, - prompt_influence=1.1, - ) """ with self._client_wrapper.httpx_client.stream( "v1/sound-generation", @@ -128,27 +115,6 @@ async def convert( ------ typing.AsyncIterator[bytes] Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_to_sound_effects.convert( - text="string", - duration_seconds=1.1, - prompt_influence=1.1, - ) - - - asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( "v1/sound-generation", diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py index 6056554e..d58c083d 100644 --- a/src/elevenlabs/text_to_speech/client.py +++ b/src/elevenlabs/text_to_speech/client.py @@ -94,7 +94,7 @@ def convert( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. @@ -252,7 +252,7 @@ def convert_with_timestamps( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. @@ -405,7 +405,7 @@ def convert_as_stream( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. @@ -563,7 +563,7 @@ def stream_with_timestamps( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. @@ -714,7 +714,7 @@ async def convert( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. @@ -880,7 +880,7 @@ async def convert_with_timestamps( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. @@ -1041,7 +1041,7 @@ async def convert_as_stream( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. @@ -1207,7 +1207,7 @@ async def stream_with_timestamps( A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request seed : typing.Optional[int] - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. previous_text : typing.Optional[str] The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation. diff --git a/src/elevenlabs/text_to_voice/__init__.py b/src/elevenlabs/text_to_voice/__init__.py index f3ea2659..1a606e5d 100644 --- a/src/elevenlabs/text_to_voice/__init__.py +++ b/src/elevenlabs/text_to_voice/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import TextToVoiceCreatePreviewsRequestOutputFormat + +__all__ = ["TextToVoiceCreatePreviewsRequestOutputFormat"] diff --git a/src/elevenlabs/text_to_voice/client.py b/src/elevenlabs/text_to_voice/client.py index 5af3934c..53a00fb5 100644 --- a/src/elevenlabs/text_to_voice/client.py +++ b/src/elevenlabs/text_to_voice/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .types.text_to_voice_create_previews_request_output_format import TextToVoiceCreatePreviewsRequestOutputFormat from ..core.request_options import RequestOptions from ..types.voice_previews_response_model import VoicePreviewsResponseModel from ..core.unchecked_base_model import construct_type @@ -21,7 +22,13 @@ def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper def create_previews( - self, *, voice_description: str, text: str, request_options: typing.Optional[RequestOptions] = None + self, + *, + voice_description: str, + text: str, + output_format: typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] = None, + auto_generate_text: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> VoicePreviewsResponseModel: """ Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice. @@ -34,6 +41,23 @@ def create_previews( text : str Text to generate, text length has to be between 100 and 1000. + output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] + Output format of the generated audio. Must be one of: + mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. + mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. + mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. + mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. + mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. + mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. + pcm_16000 - PCM format (S16LE) with 16kHz sample rate. + pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. + pcm_24000 - PCM format (S16LE) with 24kHz sample rate. + pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. + ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. + + auto_generate_text : typing.Optional[bool] + Whether to automatically generate a text suitable for the voice description. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -50,16 +74,20 @@ def create_previews( api_key="YOUR_API_KEY", ) client.text_to_voice.create_previews( - voice_description="voice_description", - text="text", + voice_description="A sassy little squeaky mouse", + text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.", ) """ _response = self._client_wrapper.httpx_client.request( "v1/text-to-voice/create-previews", method="POST", + params={ + "output_format": output_format, + }, json={ "voice_description": voice_description, "text": text, + "auto_generate_text": auto_generate_text, }, request_options=request_options, omit=OMIT, @@ -134,9 +162,9 @@ def create_voice_from_preview( api_key="YOUR_API_KEY", ) client.text_to_voice.create_voice_from_preview( - voice_name="voice_name", - voice_description="voice_description", - generated_voice_id="generated_voice_id", + voice_name="Little squeaky mouse", + voice_description="A sassy little squeaky mouse", + generated_voice_id="37HceQefKmEi3bGovXjL", ) """ _response = self._client_wrapper.httpx_client.request( @@ -182,7 +210,13 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper async def create_previews( - self, *, voice_description: str, text: str, request_options: typing.Optional[RequestOptions] = None + self, + *, + voice_description: str, + text: str, + output_format: typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] = None, + auto_generate_text: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> VoicePreviewsResponseModel: """ Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice. @@ -195,6 +229,23 @@ async def create_previews( text : str Text to generate, text length has to be between 100 and 1000. + output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] + Output format of the generated audio. Must be one of: + mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. + mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. + mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. + mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. + mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. + mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. + pcm_16000 - PCM format (S16LE) with 16kHz sample rate. + pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. + pcm_24000 - PCM format (S16LE) with 24kHz sample rate. + pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. + ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. + + auto_generate_text : typing.Optional[bool] + Whether to automatically generate a text suitable for the voice description. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -216,8 +267,8 @@ async def create_previews( async def main() -> None: await client.text_to_voice.create_previews( - voice_description="voice_description", - text="text", + voice_description="A sassy little squeaky mouse", + text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.", ) @@ -226,9 +277,13 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v1/text-to-voice/create-previews", method="POST", + params={ + "output_format": output_format, + }, json={ "voice_description": voice_description, "text": text, + "auto_generate_text": auto_generate_text, }, request_options=request_options, omit=OMIT, @@ -308,9 +363,9 @@ async def create_voice_from_preview( async def main() -> None: await client.text_to_voice.create_voice_from_preview( - voice_name="voice_name", - voice_description="voice_description", - generated_voice_id="generated_voice_id", + voice_name="Little squeaky mouse", + voice_description="A sassy little squeaky mouse", + generated_voice_id="37HceQefKmEi3bGovXjL", ) diff --git a/src/elevenlabs/text_to_voice/types/__init__.py b/src/elevenlabs/text_to_voice/types/__init__.py new file mode 100644 index 00000000..39c033b9 --- /dev/null +++ b/src/elevenlabs/text_to_voice/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .text_to_voice_create_previews_request_output_format import TextToVoiceCreatePreviewsRequestOutputFormat + +__all__ = ["TextToVoiceCreatePreviewsRequestOutputFormat"] diff --git a/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py b/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py new file mode 100644 index 00000000..6e6980fc --- /dev/null +++ b/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TextToVoiceCreatePreviewsRequestOutputFormat = typing.Union[ + typing.Literal[ + "mp3_22050_32", + "mp3_44100_32", + "mp3_44100_64", + "mp3_44100_96", + "mp3_44100_128", + "mp3_44100_192", + "pcm_16000", + "pcm_22050", + "pcm_24000", + "pcm_44100", + "ulaw_8000", + ], + typing.Any, +] diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py index 2363ce41..389cd770 100644 --- a/src/elevenlabs/types/__init__.py +++ b/src/elevenlabs/types/__init__.py @@ -1,25 +1,80 @@ # This file was auto-generated by Fern from our API Definition. from .accent import Accent +from .add_agent_secret_response_model import AddAgentSecretResponseModel from .add_chapter_response_model import AddChapterResponseModel +from .add_knowledge_base_response_model import AddKnowledgeBaseResponseModel from .add_project_response_model import AddProjectResponseModel from .add_pronunciation_dictionary_response_model import AddPronunciationDictionaryResponseModel from .add_pronunciation_dictionary_rules_response_model import AddPronunciationDictionaryRulesResponseModel from .add_voice_ivc_response_model import AddVoiceIvcResponseModel from .add_voice_response_model import AddVoiceResponseModel from .age import Age +from .agent_ban import AgentBan +from .agent_config import AgentConfig +from .agent_config_override import AgentConfigOverride +from .agent_metadata_response_model import AgentMetadataResponseModel +from .agent_platform_settings import AgentPlatformSettings +from .agent_summary_response_model import AgentSummaryResponseModel +from .allowlist_item import AllowlistItem +from .array_json_schema_property import ArrayJsonSchemaProperty +from .array_json_schema_property_items import ArrayJsonSchemaPropertyItems +from .asr_conversational_config import AsrConversationalConfig +from .asr_input_format import AsrInputFormat +from .asr_provider import AsrProvider +from .asr_quality import AsrQuality from .audio_native_create_project_response_model import AudioNativeCreateProjectResponseModel from .audio_native_get_embed_code_response_model import AudioNativeGetEmbedCodeResponseModel +from .auth_settings import AuthSettings +from .authorization_method import AuthorizationMethod +from .ban_reason_type import BanReasonType from .breakdown_types import BreakdownTypes from .chapter_response import ChapterResponse from .chapter_snapshot_response import ChapterSnapshotResponse from .chapter_snapshots_response import ChapterSnapshotsResponse from .chapter_state import ChapterState from .chapter_statistics_response import ChapterStatisticsResponse +from .client_event import ClientEvent +from .client_tool_config import ClientToolConfig +from .conv_ai_new_secret_config import ConvAiNewSecretConfig +from .conv_ai_secret_locator import ConvAiSecretLocator +from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig +from .conversation_charging_common_model import ConversationChargingCommonModel +from .conversation_config import ConversationConfig +from .conversation_config_client_override import ConversationConfigClientOverride +from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel +from .conversation_history_evaluation_criteria_result_common_model import ( + ConversationHistoryEvaluationCriteriaResultCommonModel, +) +from .conversation_history_metadata_common_model import ConversationHistoryMetadataCommonModel +from .conversation_history_transcript_common_model import ConversationHistoryTranscriptCommonModel +from .conversation_history_transcript_common_model_role import ConversationHistoryTranscriptCommonModelRole +from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel +from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel +from .conversation_initiation_client_data import ConversationInitiationClientData +from .conversation_signed_url_response_model import ConversationSignedUrlResponseModel +from .conversation_summary_response_model import ConversationSummaryResponseModel +from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus +from .conversation_token_db_model import ConversationTokenDbModel +from .conversation_token_purpose import ConversationTokenPurpose +from .conversational_config import ConversationalConfig +from .create_agent_response_model import CreateAgentResponseModel from .currency import Currency +from .custom_llm import CustomLlm +from .data_collection_result_common_model import DataCollectionResultCommonModel from .do_dubbing_response import DoDubbingResponse from .dubbing_metadata_response import DubbingMetadataResponse from .edit_project_response_model import EditProjectResponseModel +from .embed_config import EmbedConfig +from .embed_config_avatar import ( + EmbedConfigAvatar, + EmbedConfigAvatar_Image, + EmbedConfigAvatar_Orb, + EmbedConfigAvatar_Url, +) +from .embed_variant import EmbedVariant +from .evaluation_settings import EvaluationSettings +from .evaluation_success_result import EvaluationSuccessResult from .extended_subscription_response_model_billing_period import ExtendedSubscriptionResponseModelBillingPeriod from .extended_subscription_response_model_character_refresh_period import ( ExtendedSubscriptionResponseModelCharacterRefreshPeriod, @@ -29,7 +84,16 @@ from .fine_tuning_response import FineTuningResponse from .fine_tuning_response_model_state_value import FineTuningResponseModelStateValue from .gender import Gender +from .get_agent_embed_response_model import GetAgentEmbedResponseModel +from .get_agent_link_response_model import GetAgentLinkResponseModel +from .get_agent_response_model import GetAgentResponseModel +from .get_agents_page_response_model import GetAgentsPageResponseModel from .get_chapters_response import GetChaptersResponse +from .get_conversation_response_model import GetConversationResponseModel +from .get_conversation_response_model_status import GetConversationResponseModelStatus +from .get_conversations_page_response_model import GetConversationsPageResponseModel +from .get_knowledge_base_reponse_model import GetKnowledgeBaseReponseModel +from .get_knowledge_base_reponse_model_type import GetKnowledgeBaseReponseModelType from .get_library_voices_response import GetLibraryVoicesResponse from .get_projects_response import GetProjectsResponse from .get_pronunciation_dictionaries_metadata_response_model import GetPronunciationDictionariesMetadataResponseModel @@ -41,42 +105,61 @@ from .history_alignments_response_model import HistoryAlignmentsResponseModel from .history_item import HistoryItem from .http_validation_error import HttpValidationError +from .image_avatar import ImageAvatar from .invoice import Invoice +from .knowledge_base_locator import KnowledgeBaseLocator +from .knowledge_base_locator_type import KnowledgeBaseLocatorType from .language_response import LanguageResponse from .library_voice_response import LibraryVoiceResponse from .library_voice_response_model_category import LibraryVoiceResponseModelCategory +from .literal_json_schema_property import LiteralJsonSchemaProperty +from .literal_json_schema_property_type import LiteralJsonSchemaPropertyType +from .llm import Llm from .manual_verification_file_response import ManualVerificationFileResponse from .manual_verification_response import ManualVerificationResponse from .model import Model from .model_rates_response_model import ModelRatesResponseModel from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup +from .object_json_schema_property import ObjectJsonSchemaProperty +from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue from .optimize_streaming_latency import OptimizeStreamingLatency +from .orb_avatar import OrbAvatar from .output_format import OutputFormat +from .post_agent_avatar_response_model import PostAgentAvatarResponseModel from .profile_page_response_model import ProfilePageResponseModel from .project_extended_response_model import ProjectExtendedResponseModel from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel +from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization +from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience from .project_response import ProjectResponse from .project_response_model_access_level import ProjectResponseModelAccessLevel +from .project_response_model_fiction import ProjectResponseModelFiction from .project_response_model_target_audience import ProjectResponseModelTargetAudience from .project_snapshot_response import ProjectSnapshotResponse from .project_snapshot_upload_response_model import ProjectSnapshotUploadResponseModel from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus from .project_snapshots_response import ProjectSnapshotsResponse from .project_state import ProjectState +from .prompt_agent import PromptAgent +from .prompt_agent_override import PromptAgentOverride +from .prompt_agent_tools_item import PromptAgentToolsItem, PromptAgentToolsItem_Client, PromptAgentToolsItem_Webhook +from .prompt_evaluation_criteria import PromptEvaluationCriteria from .pronunciation_dictionary_alias_rule_request_model import PronunciationDictionaryAliasRuleRequestModel from .pronunciation_dictionary_phoneme_rule_request_model import PronunciationDictionaryPhonemeRuleRequestModel from .pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel +from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator +from .query_params_json_schema import QueryParamsJsonSchema +from .reader_resource_response_model import ReaderResourceResponseModel +from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType from .recording_response import RecordingResponse from .remove_pronunciation_dictionary_rules_response_model import RemovePronunciationDictionaryRulesResponseModel from .review_status import ReviewStatus from .speech_history_item_response import SpeechHistoryItemResponse from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory -from .sso_provider_response_model import SsoProviderResponseModel -from .sso_provider_response_model_provider_type import SsoProviderResponseModelProviderType from .subscription import Subscription from .subscription_response import SubscriptionResponse from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod @@ -84,6 +167,14 @@ from .subscription_response_model_currency import SubscriptionResponseModelCurrency from .subscription_status import SubscriptionStatus from .text_to_speech_as_stream_request import TextToSpeechAsStreamRequest +from .tts_conversational_config import TtsConversationalConfig +from .tts_conversational_config_override import TtsConversationalConfigOverride +from .tts_conversational_model import TtsConversationalModel +from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency +from .tts_output_format import TtsOutputFormat +from .turn_config import TurnConfig +from .turn_mode import TurnMode +from .url_avatar import UrlAvatar from .usage_characters_response_model import UsageCharactersResponseModel from .user import User from .validation_error import ValidationError @@ -103,28 +194,83 @@ from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory from .voice_sharing_state import VoiceSharingState from .voice_verification_response import VoiceVerificationResponse +from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig +from .webhook_tool_api_schema_config_method import WebhookToolApiSchemaConfigMethod +from .webhook_tool_api_schema_config_request_headers_value import WebhookToolApiSchemaConfigRequestHeadersValue +from .webhook_tool_config import WebhookToolConfig __all__ = [ "Accent", + "AddAgentSecretResponseModel", "AddChapterResponseModel", + "AddKnowledgeBaseResponseModel", "AddProjectResponseModel", "AddPronunciationDictionaryResponseModel", "AddPronunciationDictionaryRulesResponseModel", "AddVoiceIvcResponseModel", "AddVoiceResponseModel", "Age", + "AgentBan", + "AgentConfig", + "AgentConfigOverride", + "AgentMetadataResponseModel", + "AgentPlatformSettings", + "AgentSummaryResponseModel", + "AllowlistItem", + "ArrayJsonSchemaProperty", + "ArrayJsonSchemaPropertyItems", + "AsrConversationalConfig", + "AsrInputFormat", + "AsrProvider", + "AsrQuality", "AudioNativeCreateProjectResponseModel", "AudioNativeGetEmbedCodeResponseModel", + "AuthSettings", + "AuthorizationMethod", + "BanReasonType", "BreakdownTypes", "ChapterResponse", "ChapterSnapshotResponse", "ChapterSnapshotsResponse", "ChapterState", "ChapterStatisticsResponse", + "ClientEvent", + "ClientToolConfig", + "ConvAiNewSecretConfig", + "ConvAiSecretLocator", + "ConvAiStoredSecretConfig", + "ConversationChargingCommonModel", + "ConversationConfig", + "ConversationConfigClientOverride", + "ConversationHistoryAnalysisCommonModel", + "ConversationHistoryEvaluationCriteriaResultCommonModel", + "ConversationHistoryMetadataCommonModel", + "ConversationHistoryTranscriptCommonModel", + "ConversationHistoryTranscriptCommonModelRole", + "ConversationHistoryTranscriptToolCallCommonModel", + "ConversationHistoryTranscriptToolResultCommonModel", + "ConversationInitiationClientData", + "ConversationSignedUrlResponseModel", + "ConversationSummaryResponseModel", + "ConversationSummaryResponseModelStatus", + "ConversationTokenDbModel", + "ConversationTokenPurpose", + "ConversationalConfig", + "CreateAgentResponseModel", "Currency", + "CustomLlm", + "DataCollectionResultCommonModel", "DoDubbingResponse", "DubbingMetadataResponse", "EditProjectResponseModel", + "EmbedConfig", + "EmbedConfigAvatar", + "EmbedConfigAvatar_Image", + "EmbedConfigAvatar_Orb", + "EmbedConfigAvatar_Url", + "EmbedVariant", + "EvaluationSettings", + "EvaluationSuccessResult", "ExtendedSubscriptionResponseModelBillingPeriod", "ExtendedSubscriptionResponseModelCharacterRefreshPeriod", "ExtendedSubscriptionResponseModelCurrency", @@ -132,7 +278,16 @@ "FineTuningResponse", "FineTuningResponseModelStateValue", "Gender", + "GetAgentEmbedResponseModel", + "GetAgentLinkResponseModel", + "GetAgentResponseModel", + "GetAgentsPageResponseModel", "GetChaptersResponse", + "GetConversationResponseModel", + "GetConversationResponseModelStatus", + "GetConversationsPageResponseModel", + "GetKnowledgeBaseReponseModel", + "GetKnowledgeBaseReponseModelType", "GetLibraryVoicesResponse", "GetProjectsResponse", "GetPronunciationDictionariesMetadataResponseModel", @@ -144,42 +299,63 @@ "HistoryAlignmentsResponseModel", "HistoryItem", "HttpValidationError", + "ImageAvatar", "Invoice", + "KnowledgeBaseLocator", + "KnowledgeBaseLocatorType", "LanguageResponse", "LibraryVoiceResponse", "LibraryVoiceResponseModelCategory", + "LiteralJsonSchemaProperty", + "LiteralJsonSchemaPropertyType", + "Llm", "ManualVerificationFileResponse", "ManualVerificationResponse", "Model", "ModelRatesResponseModel", "ModelResponseModelConcurrencyGroup", + "ObjectJsonSchemaProperty", + "ObjectJsonSchemaPropertyPropertiesValue", "OptimizeStreamingLatency", + "OrbAvatar", "OutputFormat", + "PostAgentAvatarResponseModel", "ProfilePageResponseModel", "ProjectExtendedResponseModel", "ProjectExtendedResponseModelAccessLevel", + "ProjectExtendedResponseModelApplyTextNormalization", + "ProjectExtendedResponseModelFiction", "ProjectExtendedResponseModelQualityPreset", "ProjectExtendedResponseModelTargetAudience", "ProjectResponse", "ProjectResponseModelAccessLevel", + "ProjectResponseModelFiction", "ProjectResponseModelTargetAudience", "ProjectSnapshotResponse", "ProjectSnapshotUploadResponseModel", "ProjectSnapshotUploadResponseModelStatus", "ProjectSnapshotsResponse", "ProjectState", + "PromptAgent", + "PromptAgentOverride", + "PromptAgentToolsItem", + "PromptAgentToolsItem_Client", + "PromptAgentToolsItem_Webhook", + "PromptEvaluationCriteria", "PronunciationDictionaryAliasRuleRequestModel", "PronunciationDictionaryPhonemeRuleRequestModel", "PronunciationDictionaryVersionLocator", "PronunciationDictionaryVersionResponseModel", + "PydanticPronunciationDictionaryVersionLocator", + "QueryParamsJsonSchema", + "ReaderResourceResponseModel", + "ReaderResourceResponseModelResourceType", "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ReviewStatus", "SpeechHistoryItemResponse", "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", - "SsoProviderResponseModel", - "SsoProviderResponseModelProviderType", "Subscription", "SubscriptionResponse", "SubscriptionResponseModelBillingPeriod", @@ -187,6 +363,14 @@ "SubscriptionResponseModelCurrency", "SubscriptionStatus", "TextToSpeechAsStreamRequest", + "TtsConversationalConfig", + "TtsConversationalConfigOverride", + "TtsConversationalModel", + "TtsOptimizeStreamingLatency", + "TtsOutputFormat", + "TurnConfig", + "TurnMode", + "UrlAvatar", "UsageCharactersResponseModel", "User", "ValidationError", @@ -206,4 +390,8 @@ "VoiceSharingResponseModelCategory", "VoiceSharingState", "VoiceVerificationResponse", + "WebhookToolApiSchemaConfig", + "WebhookToolApiSchemaConfigMethod", + "WebhookToolApiSchemaConfigRequestHeadersValue", + "WebhookToolConfig", ] diff --git a/src/elevenlabs/types/sso_provider_response_model.py b/src/elevenlabs/types/add_agent_secret_response_model.py similarity index 66% rename from src/elevenlabs/types/sso_provider_response_model.py rename to src/elevenlabs/types/add_agent_secret_response_model.py index ee158271..88687d8b 100644 --- a/src/elevenlabs/types/sso_provider_response_model.py +++ b/src/elevenlabs/types/add_agent_secret_response_model.py @@ -1,16 +1,14 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -from .sso_provider_response_model_provider_type import SsoProviderResponseModelProviderType -import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing import pydantic -class SsoProviderResponseModel(UncheckedBaseModel): - provider_type: SsoProviderResponseModelProviderType - provider_id: str - domains: typing.List[str] +class AddAgentSecretResponseModel(UncheckedBaseModel): + id: str + name: str if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/add_knowledge_base_response_model.py b/src/elevenlabs/types/add_knowledge_base_response_model.py new file mode 100644 index 00000000..e9105cb4 --- /dev/null +++ b/src/elevenlabs/types/add_knowledge_base_response_model.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AddKnowledgeBaseResponseModel(UncheckedBaseModel): + id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/agent_ban.py b/src/elevenlabs/types/agent_ban.py new file mode 100644 index 00000000..ac7027b6 --- /dev/null +++ b/src/elevenlabs/types/agent_ban.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .ban_reason_type import BanReasonType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentBan(UncheckedBaseModel): + at_unix: int + reason: typing.Optional[str] = None + reason_type: BanReasonType + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/agent_config.py b/src/elevenlabs/types/agent_config.py new file mode 100644 index 00000000..e7ff782c --- /dev/null +++ b/src/elevenlabs/types/agent_config.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +import typing +from .prompt_agent import PromptAgent +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentConfig(UncheckedBaseModel): + prompt: typing.Optional[PromptAgent] = None + first_message: typing.Optional[str] = None + language: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(ArrayJsonSchemaProperty, AgentConfig=AgentConfig) +update_forward_refs(ObjectJsonSchemaProperty, AgentConfig=AgentConfig) diff --git a/src/elevenlabs/types/agent_config_override.py b/src/elevenlabs/types/agent_config_override.py new file mode 100644 index 00000000..a6f959fd --- /dev/null +++ b/src/elevenlabs/types/agent_config_override.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .prompt_agent_override import PromptAgentOverride +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentConfigOverride(UncheckedBaseModel): + prompt: typing.Optional[PromptAgentOverride] = None + first_message: typing.Optional[str] = None + language: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/agent_metadata_response_model.py b/src/elevenlabs/types/agent_metadata_response_model.py new file mode 100644 index 00000000..3609829e --- /dev/null +++ b/src/elevenlabs/types/agent_metadata_response_model.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AgentMetadataResponseModel(UncheckedBaseModel): + created_at_unix_secs: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/agent_platform_settings.py b/src/elevenlabs/types/agent_platform_settings.py new file mode 100644 index 00000000..595bf41a --- /dev/null +++ b/src/elevenlabs/types/agent_platform_settings.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .auth_settings import AuthSettings +from .evaluation_settings import EvaluationSettings +from .embed_config import EmbedConfig +from .literal_json_schema_property import LiteralJsonSchemaProperty +from .agent_ban import AgentBan +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentPlatformSettings(UncheckedBaseModel): + auth: typing.Optional[AuthSettings] = None + evaluation: typing.Optional[EvaluationSettings] = None + widget: typing.Optional[EmbedConfig] = None + data_collection: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None + ban: typing.Optional[AgentBan] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/agent_summary_response_model.py b/src/elevenlabs/types/agent_summary_response_model.py new file mode 100644 index 00000000..91ec68bb --- /dev/null +++ b/src/elevenlabs/types/agent_summary_response_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AgentSummaryResponseModel(UncheckedBaseModel): + agent_id: str + name: str + created_at_unix_secs: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/allowlist_item.py b/src/elevenlabs/types/allowlist_item.py new file mode 100644 index 00000000..3e10d4b7 --- /dev/null +++ b/src/elevenlabs/types/allowlist_item.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AllowlistItem(UncheckedBaseModel): + hostname: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/array_json_schema_property.py b/src/elevenlabs/types/array_json_schema_property.py new file mode 100644 index 00000000..bc694678 --- /dev/null +++ b/src/elevenlabs/types/array_json_schema_property.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class ArrayJsonSchemaProperty(UncheckedBaseModel): + type: typing.Optional[typing.Literal["array"]] = None + items: "ArrayJsonSchemaPropertyItems" + description: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .object_json_schema_property import ObjectJsonSchemaProperty # noqa: E402 +from .array_json_schema_property_items import ArrayJsonSchemaPropertyItems # noqa: E402 + +update_forward_refs(ObjectJsonSchemaProperty, ArrayJsonSchemaProperty=ArrayJsonSchemaProperty) +update_forward_refs(ArrayJsonSchemaProperty) diff --git a/src/elevenlabs/types/array_json_schema_property_items.py b/src/elevenlabs/types/array_json_schema_property_items.py new file mode 100644 index 00000000..ed27a106 --- /dev/null +++ b/src/elevenlabs/types/array_json_schema_property_items.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .literal_json_schema_property import LiteralJsonSchemaProperty +import typing + +if typing.TYPE_CHECKING: + from .object_json_schema_property import ObjectJsonSchemaProperty + from .array_json_schema_property import ArrayJsonSchemaProperty +ArrayJsonSchemaPropertyItems = typing.Union[ + LiteralJsonSchemaProperty, "ObjectJsonSchemaProperty", "ArrayJsonSchemaProperty" +] diff --git a/src/elevenlabs/types/asr_conversational_config.py b/src/elevenlabs/types/asr_conversational_config.py new file mode 100644 index 00000000..125c8335 --- /dev/null +++ b/src/elevenlabs/types/asr_conversational_config.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .asr_quality import AsrQuality +from .asr_provider import AsrProvider +from .asr_input_format import AsrInputFormat +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AsrConversationalConfig(UncheckedBaseModel): + quality: typing.Optional[AsrQuality] = None + provider: typing.Optional[AsrProvider] = None + user_input_audio_format: typing.Optional[AsrInputFormat] = None + keywords: typing.Optional[typing.List[str]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/asr_input_format.py b/src/elevenlabs/types/asr_input_format.py new file mode 100644 index 00000000..5d0623d4 --- /dev/null +++ b/src/elevenlabs/types/asr_input_format.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrInputFormat = typing.Union[ + typing.Literal["pcm_16000", "pcm_22050", "pcm_24000", "pcm_44100", "ulaw_8000"], typing.Any +] diff --git a/src/elevenlabs/types/asr_provider.py b/src/elevenlabs/types/asr_provider.py new file mode 100644 index 00000000..af99d4a7 --- /dev/null +++ b/src/elevenlabs/types/asr_provider.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrProvider = typing.Literal["elevenlabs"] diff --git a/src/elevenlabs/types/asr_quality.py b/src/elevenlabs/types/asr_quality.py new file mode 100644 index 00000000..b0f39063 --- /dev/null +++ b/src/elevenlabs/types/asr_quality.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrQuality = typing.Literal["high"] diff --git a/src/elevenlabs/types/auth_settings.py b/src/elevenlabs/types/auth_settings.py new file mode 100644 index 00000000..f673dd81 --- /dev/null +++ b/src/elevenlabs/types/auth_settings.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .allowlist_item import AllowlistItem +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AuthSettings(UncheckedBaseModel): + enable_auth: typing.Optional[bool] = None + allowlist: typing.Optional[typing.List[AllowlistItem]] = None + shareable_token: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/authorization_method.py b/src/elevenlabs/types/authorization_method.py new file mode 100644 index 00000000..7605e0df --- /dev/null +++ b/src/elevenlabs/types/authorization_method.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AuthorizationMethod = typing.Union[ + typing.Literal["public", "authorization_header", "signed_url", "shareable_link"], typing.Any +] diff --git a/src/elevenlabs/types/ban_reason_type.py b/src/elevenlabs/types/ban_reason_type.py new file mode 100644 index 00000000..81accd21 --- /dev/null +++ b/src/elevenlabs/types/ban_reason_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BanReasonType = typing.Union[typing.Literal["safety", "manual"], typing.Any] diff --git a/src/elevenlabs/types/breakdown_types.py b/src/elevenlabs/types/breakdown_types.py index addda63e..cc299406 100644 --- a/src/elevenlabs/types/breakdown_types.py +++ b/src/elevenlabs/types/breakdown_types.py @@ -3,5 +3,5 @@ import typing BreakdownTypes = typing.Union[ - typing.Literal["none", "voice", "user", "api_keys", "all_api_keys", "product_type", "model"], typing.Any + typing.Literal["none", "voice", "user", "api_keys", "all_api_keys", "product_type", "model", "resource"], typing.Any ] diff --git a/src/elevenlabs/types/chapter_response.py b/src/elevenlabs/types/chapter_response.py index 7193b118..192804d6 100644 --- a/src/elevenlabs/types/chapter_response.py +++ b/src/elevenlabs/types/chapter_response.py @@ -16,6 +16,7 @@ class ChapterResponse(UncheckedBaseModel): can_be_downloaded: bool state: ChapterState statistics: typing.Optional[ChapterStatisticsResponse] = None + last_conversion_error: typing.Optional[str] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/client_event.py b/src/elevenlabs/types/client_event.py new file mode 100644 index 00000000..5152c634 --- /dev/null +++ b/src/elevenlabs/types/client_event.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ClientEvent = typing.Union[ + typing.Literal[ + "conversation_initiation_metadata", + "asr_initiation_metadata", + "ping", + "audio", + "interruption", + "user_transcript", + "agent_response", + "agent_response_correction", + "client_tool_call", + "internal_vad_score", + "internal_turn_probability", + "internal_tentative_agent_response", + ], + typing.Any, +] diff --git a/src/elevenlabs/types/client_tool_config.py b/src/elevenlabs/types/client_tool_config.py new file mode 100644 index 00000000..72762e08 --- /dev/null +++ b/src/elevenlabs/types/client_tool_config.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class ClientToolConfig(UncheckedBaseModel): + """ + A client tool is one that sends an event to the user's client to trigger something client side + """ + + name: str + description: str + parameters: typing.Optional[ObjectJsonSchemaProperty] = None + expects_response: typing.Optional[bool] = None + response_timeout_secs: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(ArrayJsonSchemaProperty, ClientToolConfig=ClientToolConfig) +update_forward_refs(ObjectJsonSchemaProperty, ClientToolConfig=ClientToolConfig) diff --git a/src/elevenlabs/types/conv_ai_new_secret_config.py b/src/elevenlabs/types/conv_ai_new_secret_config.py new file mode 100644 index 00000000..4276a25e --- /dev/null +++ b/src/elevenlabs/types/conv_ai_new_secret_config.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ConvAiNewSecretConfig(UncheckedBaseModel): + name: str + value: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conv_ai_secret_locator.py b/src/elevenlabs/types/conv_ai_secret_locator.py new file mode 100644 index 00000000..9aa49a80 --- /dev/null +++ b/src/elevenlabs/types/conv_ai_secret_locator.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ConvAiSecretLocator(UncheckedBaseModel): + """ + Used to reference a secret from the agent's secret store. + """ + + secret_id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conv_ai_stored_secret_config.py b/src/elevenlabs/types/conv_ai_stored_secret_config.py new file mode 100644 index 00000000..316c978d --- /dev/null +++ b/src/elevenlabs/types/conv_ai_stored_secret_config.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ConvAiStoredSecretConfig(UncheckedBaseModel): + secret_id: str + name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_charging_common_model.py b/src/elevenlabs/types/conversation_charging_common_model.py new file mode 100644 index 00000000..cfbf5468 --- /dev/null +++ b/src/elevenlabs/types/conversation_charging_common_model.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationChargingCommonModel(UncheckedBaseModel): + dev_discount: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_config.py b/src/elevenlabs/types/conversation_config.py new file mode 100644 index 00000000..d0e80ee1 --- /dev/null +++ b/src/elevenlabs/types/conversation_config.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .client_event import ClientEvent +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationConfig(UncheckedBaseModel): + max_duration_seconds: typing.Optional[int] = None + client_events: typing.Optional[typing.List[ClientEvent]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_config_client_override.py b/src/elevenlabs/types/conversation_config_client_override.py new file mode 100644 index 00000000..d8a89f24 --- /dev/null +++ b/src/elevenlabs/types/conversation_config_client_override.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .agent_config_override import AgentConfigOverride +from .tts_conversational_config_override import TtsConversationalConfigOverride +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationConfigClientOverride(UncheckedBaseModel): + agent: typing.Optional[AgentConfigOverride] = None + tts: typing.Optional[TtsConversationalConfigOverride] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_analysis_common_model.py b/src/elevenlabs/types/conversation_history_analysis_common_model.py new file mode 100644 index 00000000..cfbbe147 --- /dev/null +++ b/src/elevenlabs/types/conversation_history_analysis_common_model.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conversation_history_evaluation_criteria_result_common_model import ( + ConversationHistoryEvaluationCriteriaResultCommonModel, +) +from .data_collection_result_common_model import DataCollectionResultCommonModel +from .evaluation_success_result import EvaluationSuccessResult +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationHistoryAnalysisCommonModel(UncheckedBaseModel): + evaluation_criteria_results: typing.Optional[ + typing.Dict[str, ConversationHistoryEvaluationCriteriaResultCommonModel] + ] = None + data_collection_results: typing.Optional[typing.Dict[str, DataCollectionResultCommonModel]] = None + call_successful: EvaluationSuccessResult + transcript_summary: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py b/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py new file mode 100644 index 00000000..af659a80 --- /dev/null +++ b/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluation_success_result import EvaluationSuccessResult +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ConversationHistoryEvaluationCriteriaResultCommonModel(UncheckedBaseModel): + criteria_id: str + result: EvaluationSuccessResult + rationale: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_metadata_common_model.py b/src/elevenlabs/types/conversation_history_metadata_common_model.py new file mode 100644 index 00000000..de108d43 --- /dev/null +++ b/src/elevenlabs/types/conversation_history_metadata_common_model.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .authorization_method import AuthorizationMethod +from .conversation_charging_common_model import ConversationChargingCommonModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationHistoryMetadataCommonModel(UncheckedBaseModel): + start_time_unix_secs: int + call_duration_secs: int + cost: typing.Optional[int] = None + authorization_method: typing.Optional[AuthorizationMethod] = None + charging: typing.Optional[ConversationChargingCommonModel] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_transcript_common_model.py b/src/elevenlabs/types/conversation_history_transcript_common_model.py new file mode 100644 index 00000000..3285d888 --- /dev/null +++ b/src/elevenlabs/types/conversation_history_transcript_common_model.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .conversation_history_transcript_common_model_role import ConversationHistoryTranscriptCommonModelRole +import typing +from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel +from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationHistoryTranscriptCommonModel(UncheckedBaseModel): + role: ConversationHistoryTranscriptCommonModelRole + message: typing.Optional[str] = None + tool_calls: typing.Optional[typing.List[ConversationHistoryTranscriptToolCallCommonModel]] = None + tool_results: typing.Optional[typing.List[ConversationHistoryTranscriptToolResultCommonModel]] = None + time_in_call_secs: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_transcript_common_model_role.py b/src/elevenlabs/types/conversation_history_transcript_common_model_role.py new file mode 100644 index 00000000..1964c6f2 --- /dev/null +++ b/src/elevenlabs/types/conversation_history_transcript_common_model_role.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ConversationHistoryTranscriptCommonModelRole = typing.Union[typing.Literal["user", "agent"], typing.Any] diff --git a/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py b/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py new file mode 100644 index 00000000..1afe0502 --- /dev/null +++ b/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ConversationHistoryTranscriptToolCallCommonModel(UncheckedBaseModel): + request_id: str + tool_name: str + params_as_json: str + tool_has_been_called: bool + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py b/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py new file mode 100644 index 00000000..15ef7ff3 --- /dev/null +++ b/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ConversationHistoryTranscriptToolResultCommonModel(UncheckedBaseModel): + request_id: str + tool_name: str + result_value: str + is_error: bool + tool_has_been_called: bool + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_initiation_client_data.py b/src/elevenlabs/types/conversation_initiation_client_data.py new file mode 100644 index 00000000..f98379ec --- /dev/null +++ b/src/elevenlabs/types/conversation_initiation_client_data.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conversation_config_client_override import ConversationConfigClientOverride +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationInitiationClientData(UncheckedBaseModel): + conversation_config_override: typing.Optional[ConversationConfigClientOverride] = None + custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_signed_url_response_model.py b/src/elevenlabs/types/conversation_signed_url_response_model.py new file mode 100644 index 00000000..b38e5f87 --- /dev/null +++ b/src/elevenlabs/types/conversation_signed_url_response_model.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ConversationSignedUrlResponseModel(UncheckedBaseModel): + signed_url: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_summary_response_model.py b/src/elevenlabs/types/conversation_summary_response_model.py new file mode 100644 index 00000000..19106299 --- /dev/null +++ b/src/elevenlabs/types/conversation_summary_response_model.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus +from .evaluation_success_result import EvaluationSuccessResult +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationSummaryResponseModel(UncheckedBaseModel): + agent_id: str + agent_name: typing.Optional[str] = None + conversation_id: str + start_time_unix_secs: int + call_duration_secs: int + message_count: int + status: ConversationSummaryResponseModelStatus + call_successful: EvaluationSuccessResult + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_summary_response_model_status.py b/src/elevenlabs/types/conversation_summary_response_model_status.py new file mode 100644 index 00000000..4baceca3 --- /dev/null +++ b/src/elevenlabs/types/conversation_summary_response_model_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ConversationSummaryResponseModelStatus = typing.Union[typing.Literal["processing", "done"], typing.Any] diff --git a/src/elevenlabs/types/conversation_token_db_model.py b/src/elevenlabs/types/conversation_token_db_model.py new file mode 100644 index 00000000..9107ab95 --- /dev/null +++ b/src/elevenlabs/types/conversation_token_db_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conversation_token_purpose import ConversationTokenPurpose +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationTokenDbModel(UncheckedBaseModel): + agent_id: str + conversation_token: str + expiration_time_unix_secs: typing.Optional[int] = None + purpose: typing.Optional[ConversationTokenPurpose] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_token_purpose.py b/src/elevenlabs/types/conversation_token_purpose.py new file mode 100644 index 00000000..bfaccef4 --- /dev/null +++ b/src/elevenlabs/types/conversation_token_purpose.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ConversationTokenPurpose = typing.Union[typing.Literal["signed_url", "shareable_link"], typing.Any] diff --git a/src/elevenlabs/types/conversational_config.py b/src/elevenlabs/types/conversational_config.py new file mode 100644 index 00000000..0fa91dc0 --- /dev/null +++ b/src/elevenlabs/types/conversational_config.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +import typing +from .agent_config import AgentConfig +from .asr_conversational_config import AsrConversationalConfig +from .turn_config import TurnConfig +from .tts_conversational_config import TtsConversationalConfig +from .conversation_config import ConversationConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class ConversationalConfig(UncheckedBaseModel): + agent: typing.Optional[AgentConfig] = None + asr: typing.Optional[AsrConversationalConfig] = None + turn: typing.Optional[TurnConfig] = None + tts: typing.Optional[TtsConversationalConfig] = None + conversation: typing.Optional[ConversationConfig] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(ArrayJsonSchemaProperty, ConversationalConfig=ConversationalConfig) +update_forward_refs(ObjectJsonSchemaProperty, ConversationalConfig=ConversationalConfig) diff --git a/src/elevenlabs/types/create_agent_response_model.py b/src/elevenlabs/types/create_agent_response_model.py new file mode 100644 index 00000000..48aede97 --- /dev/null +++ b/src/elevenlabs/types/create_agent_response_model.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CreateAgentResponseModel(UncheckedBaseModel): + agent_id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/custom_llm.py b/src/elevenlabs/types/custom_llm.py new file mode 100644 index 00000000..5c4a570b --- /dev/null +++ b/src/elevenlabs/types/custom_llm.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conv_ai_secret_locator import ConvAiSecretLocator +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class CustomLlm(UncheckedBaseModel): + url: str + model_id: typing.Optional[str] = None + api_key: typing.Optional[ConvAiSecretLocator] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/data_collection_result_common_model.py b/src/elevenlabs/types/data_collection_result_common_model.py new file mode 100644 index 00000000..1c4856ba --- /dev/null +++ b/src/elevenlabs/types/data_collection_result_common_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .literal_json_schema_property import LiteralJsonSchemaProperty +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class DataCollectionResultCommonModel(UncheckedBaseModel): + data_collection_id: str + value: typing.Optional[typing.Optional[typing.Any]] = None + json_schema: typing.Optional[LiteralJsonSchemaProperty] = None + rationale: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/embed_config.py b/src/elevenlabs/types/embed_config.py new file mode 100644 index 00000000..d67f2d24 --- /dev/null +++ b/src/elevenlabs/types/embed_config.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .embed_variant import EmbedVariant +from .embed_config_avatar import EmbedConfigAvatar +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EmbedConfig(UncheckedBaseModel): + variant: typing.Optional[EmbedVariant] = None + avatar: typing.Optional[EmbedConfigAvatar] = None + custom_avatar_path: typing.Optional[str] = None + bg_color: typing.Optional[str] = None + text_color: typing.Optional[str] = None + btn_color: typing.Optional[str] = None + btn_text_color: typing.Optional[str] = None + border_color: typing.Optional[str] = None + focus_color: typing.Optional[str] = None + border_radius: typing.Optional[int] = None + btn_radius: typing.Optional[int] = None + action_text: typing.Optional[str] = None + start_call_text: typing.Optional[str] = None + end_call_text: typing.Optional[str] = None + expand_text: typing.Optional[str] = None + listening_text: typing.Optional[str] = None + speaking_text: typing.Optional[str] = None + shareable_page_text: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/embed_config_avatar.py b/src/elevenlabs/types/embed_config_avatar.py new file mode 100644 index 00000000..13699ead --- /dev/null +++ b/src/elevenlabs/types/embed_config_avatar.py @@ -0,0 +1,58 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +import typing_extensions +from ..core.unchecked_base_model import UnionMetadata + + +class EmbedConfigAvatar_Orb(UncheckedBaseModel): + type: typing.Literal["orb"] = "orb" + color_1: typing.Optional[str] = None + color_2: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class EmbedConfigAvatar_Url(UncheckedBaseModel): + type: typing.Literal["url"] = "url" + custom_url: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class EmbedConfigAvatar_Image(UncheckedBaseModel): + type: typing.Literal["image"] = "image" + url: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +EmbedConfigAvatar = typing_extensions.Annotated[ + typing.Union[EmbedConfigAvatar_Orb, EmbedConfigAvatar_Url, EmbedConfigAvatar_Image], + UnionMetadata(discriminant="type"), +] diff --git a/src/elevenlabs/types/embed_variant.py b/src/elevenlabs/types/embed_variant.py new file mode 100644 index 00000000..3ad72931 --- /dev/null +++ b/src/elevenlabs/types/embed_variant.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EmbedVariant = typing.Union[typing.Literal["compact", "full", "expandable"], typing.Any] diff --git a/src/elevenlabs/types/evaluation_settings.py b/src/elevenlabs/types/evaluation_settings.py new file mode 100644 index 00000000..ed0dd534 --- /dev/null +++ b/src/elevenlabs/types/evaluation_settings.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .prompt_evaluation_criteria import PromptEvaluationCriteria +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluationSettings(UncheckedBaseModel): + """ + Settings to evaluate an agent's performance. + Agents are evaluated against a set of criteria, with success being defined as meeting some combination of those criteria. + """ + + criteria: typing.Optional[typing.List[PromptEvaluationCriteria]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/evaluation_success_result.py b/src/elevenlabs/types/evaluation_success_result.py new file mode 100644 index 00000000..3d18d896 --- /dev/null +++ b/src/elevenlabs/types/evaluation_success_result.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationSuccessResult = typing.Union[typing.Literal["success", "failure", "unknown"], typing.Any] diff --git a/src/elevenlabs/types/get_agent_embed_response_model.py b/src/elevenlabs/types/get_agent_embed_response_model.py new file mode 100644 index 00000000..760be2fc --- /dev/null +++ b/src/elevenlabs/types/get_agent_embed_response_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .embed_config import EmbedConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class GetAgentEmbedResponseModel(UncheckedBaseModel): + agent_id: str + widget_config: EmbedConfig + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/get_agent_link_response_model.py b/src/elevenlabs/types/get_agent_link_response_model.py new file mode 100644 index 00000000..95789179 --- /dev/null +++ b/src/elevenlabs/types/get_agent_link_response_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conversation_token_db_model import ConversationTokenDbModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class GetAgentLinkResponseModel(UncheckedBaseModel): + agent_id: str + token: typing.Optional[ConversationTokenDbModel] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/get_agent_response_model.py b/src/elevenlabs/types/get_agent_response_model.py new file mode 100644 index 00000000..b23ea801 --- /dev/null +++ b/src/elevenlabs/types/get_agent_response_model.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +from .conversational_config import ConversationalConfig +from .agent_metadata_response_model import AgentMetadataResponseModel +import typing +from .agent_platform_settings import AgentPlatformSettings +from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class GetAgentResponseModel(UncheckedBaseModel): + agent_id: str + name: str + conversation_config: ConversationalConfig + metadata: AgentMetadataResponseModel + platform_settings: typing.Optional[AgentPlatformSettings] = None + secrets: typing.List[ConvAiStoredSecretConfig] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(ArrayJsonSchemaProperty, GetAgentResponseModel=GetAgentResponseModel) +update_forward_refs(ObjectJsonSchemaProperty, GetAgentResponseModel=GetAgentResponseModel) diff --git a/src/elevenlabs/types/get_agents_page_response_model.py b/src/elevenlabs/types/get_agents_page_response_model.py new file mode 100644 index 00000000..5170a9ec --- /dev/null +++ b/src/elevenlabs/types/get_agents_page_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .agent_summary_response_model import AgentSummaryResponseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class GetAgentsPageResponseModel(UncheckedBaseModel): + agents: typing.List[AgentSummaryResponseModel] + next_cursor: typing.Optional[str] = None + has_more: bool + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/get_conversation_response_model.py b/src/elevenlabs/types/get_conversation_response_model.py new file mode 100644 index 00000000..1dc49d50 --- /dev/null +++ b/src/elevenlabs/types/get_conversation_response_model.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .get_conversation_response_model_status import GetConversationResponseModelStatus +import typing +from .conversation_history_transcript_common_model import ConversationHistoryTranscriptCommonModel +from .conversation_history_metadata_common_model import ConversationHistoryMetadataCommonModel +from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel +from .conversation_initiation_client_data import ConversationInitiationClientData +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class GetConversationResponseModel(UncheckedBaseModel): + agent_id: str + conversation_id: str + status: GetConversationResponseModelStatus + transcript: typing.List[ConversationHistoryTranscriptCommonModel] + metadata: ConversationHistoryMetadataCommonModel + analysis: typing.Optional[ConversationHistoryAnalysisCommonModel] = None + conversation_initiation_client_data: typing.Optional[ConversationInitiationClientData] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/get_conversation_response_model_status.py b/src/elevenlabs/types/get_conversation_response_model_status.py new file mode 100644 index 00000000..e104d5c0 --- /dev/null +++ b/src/elevenlabs/types/get_conversation_response_model_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GetConversationResponseModelStatus = typing.Union[typing.Literal["processing", "done"], typing.Any] diff --git a/src/elevenlabs/types/get_conversations_page_response_model.py b/src/elevenlabs/types/get_conversations_page_response_model.py new file mode 100644 index 00000000..4deefb52 --- /dev/null +++ b/src/elevenlabs/types/get_conversations_page_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conversation_summary_response_model import ConversationSummaryResponseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class GetConversationsPageResponseModel(UncheckedBaseModel): + conversations: typing.List[ConversationSummaryResponseModel] + next_cursor: typing.Optional[str] = None + has_more: bool + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/get_knowledge_base_reponse_model.py b/src/elevenlabs/types/get_knowledge_base_reponse_model.py new file mode 100644 index 00000000..2390d765 --- /dev/null +++ b/src/elevenlabs/types/get_knowledge_base_reponse_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .get_knowledge_base_reponse_model_type import GetKnowledgeBaseReponseModelType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class GetKnowledgeBaseReponseModel(UncheckedBaseModel): + id: str + type: GetKnowledgeBaseReponseModelType + extracted_inner_html: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py b/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py new file mode 100644 index 00000000..d8904ba3 --- /dev/null +++ b/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GetKnowledgeBaseReponseModelType = typing.Union[typing.Literal["file", "url"], typing.Any] diff --git a/src/elevenlabs/types/image_avatar.py b/src/elevenlabs/types/image_avatar.py new file mode 100644 index 00000000..5b5fed9c --- /dev/null +++ b/src/elevenlabs/types/image_avatar.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ImageAvatar(UncheckedBaseModel): + url: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/knowledge_base_locator.py b/src/elevenlabs/types/knowledge_base_locator.py new file mode 100644 index 00000000..95aa389c --- /dev/null +++ b/src/elevenlabs/types/knowledge_base_locator.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .knowledge_base_locator_type import KnowledgeBaseLocatorType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class KnowledgeBaseLocator(UncheckedBaseModel): + type: KnowledgeBaseLocatorType + name: str + id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/knowledge_base_locator_type.py b/src/elevenlabs/types/knowledge_base_locator_type.py new file mode 100644 index 00000000..074d02b7 --- /dev/null +++ b/src/elevenlabs/types/knowledge_base_locator_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +KnowledgeBaseLocatorType = typing.Union[typing.Literal["file", "url"], typing.Any] diff --git a/src/elevenlabs/types/literal_json_schema_property.py b/src/elevenlabs/types/literal_json_schema_property.py new file mode 100644 index 00000000..76fa90fa --- /dev/null +++ b/src/elevenlabs/types/literal_json_schema_property.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .literal_json_schema_property_type import LiteralJsonSchemaPropertyType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class LiteralJsonSchemaProperty(UncheckedBaseModel): + type: LiteralJsonSchemaPropertyType + description: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/literal_json_schema_property_type.py b/src/elevenlabs/types/literal_json_schema_property_type.py new file mode 100644 index 00000000..f3ddb1f4 --- /dev/null +++ b/src/elevenlabs/types/literal_json_schema_property_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LiteralJsonSchemaPropertyType = typing.Union[typing.Literal["boolean", "string", "integer", "number"], typing.Any] diff --git a/src/elevenlabs/types/llm.py b/src/elevenlabs/types/llm.py new file mode 100644 index 00000000..313f9d0a --- /dev/null +++ b/src/elevenlabs/types/llm.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Llm = typing.Union[ + typing.Literal[ + "gpt-4o-mini", + "gpt-4o", + "gpt-4", + "gpt-4-turbo", + "gpt-3.5-turbo", + "gemini-1.5-pro", + "gemini-1.5-flash", + "gemini-1.0-pro", + "claude-3-5-sonnet", + "claude-3-haiku", + "grok-beta", + "custom-llm", + ], + typing.Any, +] diff --git a/src/elevenlabs/types/object_json_schema_property.py b/src/elevenlabs/types/object_json_schema_property.py new file mode 100644 index 00000000..4ec5fd8b --- /dev/null +++ b/src/elevenlabs/types/object_json_schema_property.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class ObjectJsonSchemaProperty(UncheckedBaseModel): + type: typing.Optional[typing.Literal["object"]] = None + properties: typing.Optional[typing.Dict[str, "ObjectJsonSchemaPropertyPropertiesValue"]] = None + required: typing.Optional[typing.List[str]] = None + description: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue # noqa: E402 + +update_forward_refs(ObjectJsonSchemaProperty) diff --git a/src/elevenlabs/types/object_json_schema_property_properties_value.py b/src/elevenlabs/types/object_json_schema_property_properties_value.py new file mode 100644 index 00000000..20b89511 --- /dev/null +++ b/src/elevenlabs/types/object_json_schema_property_properties_value.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .literal_json_schema_property import LiteralJsonSchemaProperty +import typing + +if typing.TYPE_CHECKING: + from .object_json_schema_property import ObjectJsonSchemaProperty + from .array_json_schema_property import ArrayJsonSchemaProperty +ObjectJsonSchemaPropertyPropertiesValue = typing.Union[ + LiteralJsonSchemaProperty, "ObjectJsonSchemaProperty", "ArrayJsonSchemaProperty" +] diff --git a/src/elevenlabs/types/orb_avatar.py b/src/elevenlabs/types/orb_avatar.py new file mode 100644 index 00000000..ff39f856 --- /dev/null +++ b/src/elevenlabs/types/orb_avatar.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OrbAvatar(UncheckedBaseModel): + color_1: typing.Optional[str] = None + color_2: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/post_agent_avatar_response_model.py b/src/elevenlabs/types/post_agent_avatar_response_model.py new file mode 100644 index 00000000..3b56a774 --- /dev/null +++ b/src/elevenlabs/types/post_agent_avatar_response_model.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class PostAgentAvatarResponseModel(UncheckedBaseModel): + agent_id: str + avatar_url: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/project_extended_response_model.py b/src/elevenlabs/types/project_extended_response_model.py index 87b9dcab..9f9d4706 100644 --- a/src/elevenlabs/types/project_extended_response_model.py +++ b/src/elevenlabs/types/project_extended_response_model.py @@ -5,9 +5,11 @@ from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience from .project_state import ProjectState from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel +from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset from .chapter_response import ChapterResponse from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel +from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -35,9 +37,13 @@ class ProjectExtendedResponseModel(UncheckedBaseModel): volume_normalization: bool state: ProjectState access_level: ProjectExtendedResponseModelAccessLevel + fiction: typing.Optional[ProjectExtendedResponseModelFiction] = None + quality_check_on: bool + quality_check_on_when_bulk_convert: bool quality_preset: ProjectExtendedResponseModelQualityPreset chapters: typing.List[ChapterResponse] pronunciation_dictionary_versions: typing.List[PronunciationDictionaryVersionResponseModel] + apply_text_normalization: ProjectExtendedResponseModelApplyTextNormalization experimental: typing.Dict[str, typing.Optional[typing.Any]] if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py b/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py new file mode 100644 index 00000000..490a9abf --- /dev/null +++ b/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectExtendedResponseModelApplyTextNormalization = typing.Union[ + typing.Literal["auto", "on", "off", "apply_english"], typing.Any +] diff --git a/src/elevenlabs/types/project_extended_response_model_fiction.py b/src/elevenlabs/types/project_extended_response_model_fiction.py new file mode 100644 index 00000000..0c54e149 --- /dev/null +++ b/src/elevenlabs/types/project_extended_response_model_fiction.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectExtendedResponseModelFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any] diff --git a/src/elevenlabs/types/project_response.py b/src/elevenlabs/types/project_response.py index 5da3d836..dbc496b4 100644 --- a/src/elevenlabs/types/project_response.py +++ b/src/elevenlabs/types/project_response.py @@ -5,6 +5,7 @@ from .project_response_model_target_audience import ProjectResponseModelTargetAudience from .project_state import ProjectState from .project_response_model_access_level import ProjectResponseModelAccessLevel +from .project_response_model_fiction import ProjectResponseModelFiction from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -32,6 +33,9 @@ class ProjectResponse(UncheckedBaseModel): volume_normalization: bool state: ProjectState access_level: ProjectResponseModelAccessLevel + fiction: typing.Optional[ProjectResponseModelFiction] = None + quality_check_on: bool + quality_check_on_when_bulk_convert: bool if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_response_model_fiction.py b/src/elevenlabs/types/project_response_model_fiction.py new file mode 100644 index 00000000..04a90ca4 --- /dev/null +++ b/src/elevenlabs/types/project_response_model_fiction.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectResponseModelFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any] diff --git a/src/elevenlabs/types/prompt_agent.py b/src/elevenlabs/types/prompt_agent.py new file mode 100644 index 00000000..b3b60717 --- /dev/null +++ b/src/elevenlabs/types/prompt_agent.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +import typing +from .llm import Llm +from .prompt_agent_tools_item import PromptAgentToolsItem +from .knowledge_base_locator import KnowledgeBaseLocator +from .custom_llm import CustomLlm +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class PromptAgent(UncheckedBaseModel): + prompt: typing.Optional[str] = None + llm: typing.Optional[Llm] = None + temperature: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + tools: typing.Optional[typing.List[PromptAgentToolsItem]] = None + knowledge_base: typing.Optional[typing.List[KnowledgeBaseLocator]] = None + custom_llm: typing.Optional[CustomLlm] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(ArrayJsonSchemaProperty, PromptAgent=PromptAgent) +update_forward_refs(ObjectJsonSchemaProperty, PromptAgent=PromptAgent) diff --git a/src/elevenlabs/types/prompt_agent_override.py b/src/elevenlabs/types/prompt_agent_override.py new file mode 100644 index 00000000..2ca03954 --- /dev/null +++ b/src/elevenlabs/types/prompt_agent_override.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class PromptAgentOverride(UncheckedBaseModel): + prompt: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/prompt_agent_tools_item.py b/src/elevenlabs/types/prompt_agent_tools_item.py new file mode 100644 index 00000000..410df2c1 --- /dev/null +++ b/src/elevenlabs/types/prompt_agent_tools_item.py @@ -0,0 +1,56 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +import typing +from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +import typing_extensions +from ..core.unchecked_base_model import UnionMetadata +from ..core.pydantic_utilities import update_forward_refs + + +class PromptAgentToolsItem_Webhook(UncheckedBaseModel): + type: typing.Literal["webhook"] = "webhook" + name: str + description: str + api_schema: WebhookToolApiSchemaConfig + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class PromptAgentToolsItem_Client(UncheckedBaseModel): + type: typing.Literal["client"] = "client" + name: str + description: str + parameters: typing.Optional[ObjectJsonSchemaProperty] = None + expects_response: typing.Optional[bool] = None + response_timeout_secs: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +PromptAgentToolsItem = typing_extensions.Annotated[ + typing.Union[PromptAgentToolsItem_Webhook, PromptAgentToolsItem_Client], UnionMetadata(discriminant="type") +] +update_forward_refs(ArrayJsonSchemaProperty, PromptAgentToolsItem_Webhook=PromptAgentToolsItem_Webhook) +update_forward_refs(ObjectJsonSchemaProperty, PromptAgentToolsItem_Webhook=PromptAgentToolsItem_Webhook) +update_forward_refs(ArrayJsonSchemaProperty, PromptAgentToolsItem_Client=PromptAgentToolsItem_Client) +update_forward_refs(ObjectJsonSchemaProperty, PromptAgentToolsItem_Client=PromptAgentToolsItem_Client) diff --git a/src/elevenlabs/types/prompt_evaluation_criteria.py b/src/elevenlabs/types/prompt_evaluation_criteria.py new file mode 100644 index 00000000..23a56890 --- /dev/null +++ b/src/elevenlabs/types/prompt_evaluation_criteria.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class PromptEvaluationCriteria(UncheckedBaseModel): + """ + An evaluation using the transcript and a prompt for a yes/no achieved answer + """ + + id: str + name: typing.Optional[str] = None + type: typing.Optional[typing.Literal["prompt"]] = None + conversation_goal_prompt: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py b/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py new file mode 100644 index 00000000..e967a2f7 --- /dev/null +++ b/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class PydanticPronunciationDictionaryVersionLocator(UncheckedBaseModel): + """ + A locator for other documents to be able to reference a specific dictionary and it's version. + This is a pydantic version of PronunciationDictionaryVersionLocatorDBModel. + Required to ensure compat with the rest of the agent data models. + """ + + pronunciation_dictionary_id: str + version_id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/query_params_json_schema.py b/src/elevenlabs/types/query_params_json_schema.py new file mode 100644 index 00000000..0de3881a --- /dev/null +++ b/src/elevenlabs/types/query_params_json_schema.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .literal_json_schema_property import LiteralJsonSchemaProperty +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class QueryParamsJsonSchema(UncheckedBaseModel): + properties: typing.Dict[str, LiteralJsonSchemaProperty] + required: typing.Optional[typing.List[str]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/reader_resource_response_model.py b/src/elevenlabs/types/reader_resource_response_model.py new file mode 100644 index 00000000..e98b7096 --- /dev/null +++ b/src/elevenlabs/types/reader_resource_response_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ReaderResourceResponseModel(UncheckedBaseModel): + resource_type: ReaderResourceResponseModelResourceType + resource_id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/reader_resource_response_model_resource_type.py b/src/elevenlabs/types/reader_resource_response_model_resource_type.py new file mode 100644 index 00000000..937d9174 --- /dev/null +++ b/src/elevenlabs/types/reader_resource_response_model_resource_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ReaderResourceResponseModelResourceType = typing.Union[typing.Literal["read", "collection"], typing.Any] diff --git a/src/elevenlabs/types/sso_provider_response_model_provider_type.py b/src/elevenlabs/types/sso_provider_response_model_provider_type.py deleted file mode 100644 index 52c8f957..00000000 --- a/src/elevenlabs/types/sso_provider_response_model_provider_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SsoProviderResponseModelProviderType = typing.Union[typing.Literal["saml", "oidc"], typing.Any] diff --git a/src/elevenlabs/types/tts_conversational_config.py b/src/elevenlabs/types/tts_conversational_config.py new file mode 100644 index 00000000..3c219fb3 --- /dev/null +++ b/src/elevenlabs/types/tts_conversational_config.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .tts_conversational_model import TtsConversationalModel +from .tts_output_format import TtsOutputFormat +from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency +from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TtsConversationalConfig(UncheckedBaseModel): + model_id: typing.Optional[TtsConversationalModel] = None + voice_id: typing.Optional[str] = None + agent_output_audio_format: typing.Optional[TtsOutputFormat] = None + optimize_streaming_latency: typing.Optional[TtsOptimizeStreamingLatency] = None + stability: typing.Optional[float] = None + similarity_boost: typing.Optional[float] = None + pronunciation_dictionary_locators: typing.Optional[typing.List[PydanticPronunciationDictionaryVersionLocator]] = ( + None + ) + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/tts_conversational_config_override.py b/src/elevenlabs/types/tts_conversational_config_override.py new file mode 100644 index 00000000..db600b89 --- /dev/null +++ b/src/elevenlabs/types/tts_conversational_config_override.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TtsConversationalConfigOverride(UncheckedBaseModel): + voice_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/tts_conversational_model.py b/src/elevenlabs/types/tts_conversational_model.py new file mode 100644 index 00000000..3c9c0bc9 --- /dev/null +++ b/src/elevenlabs/types/tts_conversational_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TtsConversationalModel = typing.Union[typing.Literal["eleven_turbo_v2", "eleven_turbo_v2_5"], typing.Any] diff --git a/src/elevenlabs/types/tts_optimize_streaming_latency.py b/src/elevenlabs/types/tts_optimize_streaming_latency.py new file mode 100644 index 00000000..36429b8d --- /dev/null +++ b/src/elevenlabs/types/tts_optimize_streaming_latency.py @@ -0,0 +1,3 @@ +# This file was auto-generated by Fern from our API Definition. + +TtsOptimizeStreamingLatency = int diff --git a/src/elevenlabs/types/tts_output_format.py b/src/elevenlabs/types/tts_output_format.py new file mode 100644 index 00000000..aceaba22 --- /dev/null +++ b/src/elevenlabs/types/tts_output_format.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TtsOutputFormat = typing.Union[ + typing.Literal["pcm_16000", "pcm_22050", "pcm_24000", "pcm_44100", "ulaw_8000"], typing.Any +] diff --git a/src/elevenlabs/types/turn_config.py b/src/elevenlabs/types/turn_config.py new file mode 100644 index 00000000..50347f60 --- /dev/null +++ b/src/elevenlabs/types/turn_config.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .turn_mode import TurnMode +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TurnConfig(UncheckedBaseModel): + turn_timeout: typing.Optional[float] = None + mode: typing.Optional[TurnMode] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/turn_mode.py b/src/elevenlabs/types/turn_mode.py new file mode 100644 index 00000000..a82a3a3d --- /dev/null +++ b/src/elevenlabs/types/turn_mode.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TurnMode = typing.Union[typing.Literal["silence", "turn"], typing.Any] diff --git a/src/elevenlabs/types/url_avatar.py b/src/elevenlabs/types/url_avatar.py new file mode 100644 index 00000000..44069331 --- /dev/null +++ b/src/elevenlabs/types/url_avatar.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class UrlAvatar(UncheckedBaseModel): + custom_url: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/user.py b/src/elevenlabs/types/user.py index df7839d0..e90906c6 100644 --- a/src/elevenlabs/types/user.py +++ b/src/elevenlabs/types/user.py @@ -17,6 +17,8 @@ class User(UncheckedBaseModel): first_name: typing.Optional[str] = None is_api_key_hashed: typing.Optional[bool] = None xi_api_key_preview: typing.Optional[str] = None + referral_link_code: typing.Optional[str] = None + partnerstack_partner_default_link: typing.Optional[str] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice.py b/src/elevenlabs/types/voice.py index ea2a4222..08ee9932 100644 --- a/src/elevenlabs/types/voice.py +++ b/src/elevenlabs/types/voice.py @@ -32,6 +32,7 @@ class Voice(UncheckedBaseModel): is_owner: typing.Optional[bool] = None is_legacy: typing.Optional[bool] = None is_mixed: typing.Optional[bool] = None + created_at_unix: typing.Optional[int] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_preview_response_model.py b/src/elevenlabs/types/voice_preview_response_model.py index c4384182..a43c34b4 100644 --- a/src/elevenlabs/types/voice_preview_response_model.py +++ b/src/elevenlabs/types/voice_preview_response_model.py @@ -1,15 +1,16 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing import pydantic class VoicePreviewResponseModel(UncheckedBaseModel): audio_base_64: str generated_voice_id: str - media_type: typing.Optional[typing.Literal["audio/mpeg"]] = None + media_type: str + duration_secs: float if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_previews_response_model.py b/src/elevenlabs/types/voice_previews_response_model.py index 721505b4..d9b8b56d 100644 --- a/src/elevenlabs/types/voice_previews_response_model.py +++ b/src/elevenlabs/types/voice_previews_response_model.py @@ -9,6 +9,7 @@ class VoicePreviewsResponseModel(UncheckedBaseModel): previews: typing.List[VoicePreviewResponseModel] + text: str if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_sharing_response.py b/src/elevenlabs/types/voice_sharing_response.py index 365a560d..9fb5062d 100644 --- a/src/elevenlabs/types/voice_sharing_response.py +++ b/src/elevenlabs/types/voice_sharing_response.py @@ -6,6 +6,7 @@ from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory from .review_status import ReviewStatus from .voice_sharing_moderation_check_response_model import VoiceSharingModerationCheckResponseModel +from .reader_resource_response_model import ReaderResourceResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -42,6 +43,7 @@ class VoiceSharingResponse(UncheckedBaseModel): youtube_username: typing.Optional[str] = None tiktok_username: typing.Optional[str] = None moderation_check: typing.Optional[VoiceSharingModerationCheckResponseModel] = None + reader_restricted_on: typing.Optional[typing.List[ReaderResourceResponseModel]] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config.py b/src/elevenlabs/types/webhook_tool_api_schema_config.py new file mode 100644 index 00000000..ae3ad49f --- /dev/null +++ b/src/elevenlabs/types/webhook_tool_api_schema_config.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +import typing +from .webhook_tool_api_schema_config_method import WebhookToolApiSchemaConfigMethod +from .literal_json_schema_property import LiteralJsonSchemaProperty +from .query_params_json_schema import QueryParamsJsonSchema +from .webhook_tool_api_schema_config_request_headers_value import WebhookToolApiSchemaConfigRequestHeadersValue +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class WebhookToolApiSchemaConfig(UncheckedBaseModel): + """ + Configuration for a webhook that will be called by an LLM tool. + """ + + url: str + method: typing.Optional[WebhookToolApiSchemaConfigMethod] = None + path_params_schema: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None + query_params_schema: typing.Optional[QueryParamsJsonSchema] = None + request_body_schema: typing.Optional[ObjectJsonSchemaProperty] = None + request_headers: typing.Optional[typing.Dict[str, WebhookToolApiSchemaConfigRequestHeadersValue]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(ArrayJsonSchemaProperty, WebhookToolApiSchemaConfig=WebhookToolApiSchemaConfig) +update_forward_refs(ObjectJsonSchemaProperty, WebhookToolApiSchemaConfig=WebhookToolApiSchemaConfig) diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_method.py b/src/elevenlabs/types/webhook_tool_api_schema_config_method.py new file mode 100644 index 00000000..02708dff --- /dev/null +++ b/src/elevenlabs/types/webhook_tool_api_schema_config_method.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +WebhookToolApiSchemaConfigMethod = typing.Union[typing.Literal["GET", "POST", "PATCH", "DELETE"], typing.Any] diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py b/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py new file mode 100644 index 00000000..e4aae56c --- /dev/null +++ b/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .conv_ai_secret_locator import ConvAiSecretLocator + +WebhookToolApiSchemaConfigRequestHeadersValue = typing.Union[str, ConvAiSecretLocator] diff --git a/src/elevenlabs/types/webhook_tool_config.py b/src/elevenlabs/types/webhook_tool_config.py new file mode 100644 index 00000000..9d76e0dd --- /dev/null +++ b/src/elevenlabs/types/webhook_tool_config.py @@ -0,0 +1,34 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .array_json_schema_property import ArrayJsonSchemaProperty +from .object_json_schema_property import ObjectJsonSchemaProperty +from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class WebhookToolConfig(UncheckedBaseModel): + """ + A webhook tool is a tool that calls an external webhook from our server + """ + + name: str + description: str + api_schema: WebhookToolApiSchemaConfig + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(ArrayJsonSchemaProperty, WebhookToolConfig=WebhookToolConfig) +update_forward_refs(ObjectJsonSchemaProperty, WebhookToolConfig=WebhookToolConfig) diff --git a/src/elevenlabs/voice_generation/client.py b/src/elevenlabs/voice_generation/client.py index d79d9664..7503866a 100644 --- a/src/elevenlabs/voice_generation/client.py +++ b/src/elevenlabs/voice_generation/client.py @@ -85,7 +85,7 @@ def generate( Category code corresponding to the gender of the generated voice. Possible values: female, male. accent : str - Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian. + Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian. age : Age Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old. @@ -318,7 +318,7 @@ async def generate( Category code corresponding to the gender of the generated voice. Possible values: female, male. accent : str - Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian. + Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian. age : Age Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old. diff --git a/src/elevenlabs/workspace/client.py b/src/elevenlabs/workspace/client.py index b56b77b2..ff9411cc 100644 --- a/src/elevenlabs/workspace/client.py +++ b/src/elevenlabs/workspace/client.py @@ -3,7 +3,6 @@ import typing from ..core.client_wrapper import SyncClientWrapper from ..core.request_options import RequestOptions -from ..types.sso_provider_response_model import SsoProviderResponseModel from ..core.unchecked_base_model import construct_type from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError @@ -22,65 +21,6 @@ class WorkspaceClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def get_sso_provider_admin( - self, *, workspace_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SsoProviderResponseModel: - """ - Parameters - ---------- - workspace_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SsoProviderResponseModel - Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.workspace.get_sso_provider_admin( - workspace_id="workspace_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "admin/n8enylacgd/sso-provider", - method="GET", - params={ - "workspace_id": workspace_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SsoProviderResponseModel, - construct_type( - type_=SsoProviderResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - def invite_user( self, *, email: str, request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: @@ -288,73 +228,6 @@ class AsyncWorkspaceClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def get_sso_provider_admin( - self, *, workspace_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SsoProviderResponseModel: - """ - Parameters - ---------- - workspace_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SsoProviderResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.workspace.get_sso_provider_admin( - workspace_id="workspace_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "admin/n8enylacgd/sso-provider", - method="GET", - params={ - "workspace_id": workspace_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SsoProviderResponseModel, - construct_type( - type_=SsoProviderResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - async def invite_user( self, *, email: str, request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: From c9401951de9848dd1c3a9f8baac72e32c15d49f6 Mon Sep 17 00:00:00 2001 From: "Louis J." <132601011+louisjoecodes@users.noreply.github.com> Date: Mon, 2 Dec 2024 05:57:10 +0000 Subject: [PATCH 21/45] feat: clean up convai direct api call (#406) --- src/elevenlabs/conversational_ai/conversation.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py index 6913f99a..316b3637 100644 --- a/src/elevenlabs/conversational_ai/conversation.py +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -229,9 +229,5 @@ def _get_wss_url(https://github.com/self): return f"{base_ws_url}/v1/convai/conversation?agent_id={self.agent_id}" def _get_signed_url(https://github.com/self): - # TODO: Use generated SDK method once available. - response = self.client._client_wrapper.httpx_client.request( - f"v1/convai/conversation/get_signed_url?agent_id={self.agent_id}", - method="GET", - ) - return response.json()["signed_url"] \ No newline at end of file + response = self.client.get_signed_url(https://github.com/agent_id=self.agent_id) + return response.signed_url From bc167cb3f8bbfa98d4e99963020671dde13db15d Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 01:21:05 -0500 Subject: [PATCH 22/45] chore: release 1.13.2 --- pyproject.toml | 2 +- src/elevenlabs/chapters/client.py | 6 ++++ src/elevenlabs/conversational_ai/client.py | 18 ++++++++++++ src/elevenlabs/core/client_wrapper.py | 2 +- src/elevenlabs/core/file.py | 21 ++++++++------ src/elevenlabs/core/http_client.py | 28 +++++++++++++------ src/elevenlabs/history/client.py | 6 ++++ src/elevenlabs/projects/client.py | 24 ++++++++++++++++ .../pronunciation_dictionary/client.py | 12 ++++++++ .../text_to_sound_effects/client.py | 6 ++++ src/elevenlabs/text_to_speech/client.py | 24 ++++++++++++++++ src/elevenlabs/text_to_voice/client.py | 12 ++++++++ src/elevenlabs/voice_generation/client.py | 12 ++++++++ src/elevenlabs/voices/client.py | 6 ++++ src/elevenlabs/workspace/client.py | 18 ++++++++++++ 15 files changed, 179 insertions(+), 18 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3a07be86..1a84e6ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.13.1" +version = "1.13.2" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/chapters/client.py b/src/elevenlabs/chapters/client.py index af4de411..cd01796a 100644 --- a/src/elevenlabs/chapters/client.py +++ b/src/elevenlabs/chapters/client.py @@ -385,6 +385,9 @@ def stream_snapshot( json={ "convert_to_mpeg": convert_to_mpeg, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -822,6 +825,9 @@ async def main() -> None: json={ "convert_to_mpeg": convert_to_mpeg, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/conversational_ai/client.py b/src/elevenlabs/conversational_ai/client.py index da92c7f9..1ef2a544 100644 --- a/src/elevenlabs/conversational_ai/client.py +++ b/src/elevenlabs/conversational_ai/client.py @@ -154,6 +154,9 @@ def create_agent( ), "name": name, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -363,6 +366,9 @@ def update_agent( ), "name": name, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -693,6 +699,9 @@ def add_agent_secret( "name": name, "secret_value": secret_value, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1197,6 +1206,9 @@ async def main() -> None: ), "name": name, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1430,6 +1442,9 @@ async def main() -> None: ), "name": name, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1800,6 +1815,9 @@ async def main() -> None: "name": name, "secret_value": secret_value, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 6a14c535..f3d7b3a0 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.13.1", + "X-Fern-SDK-Version": "1.13.2", } if self._api_key is not None: headers["xi-api-key"] = self._api_key diff --git a/src/elevenlabs/core/file.py b/src/elevenlabs/core/file.py index b4cbba30..44b0d27c 100644 --- a/src/elevenlabs/core/file.py +++ b/src/elevenlabs/core/file.py @@ -43,20 +43,25 @@ def convert_file_dict_to_httpx_tuples( return httpx_tuples -def with_content_type(*, file: File, content_type: str) -> File: - """ """ +def with_content_type(*, file: File, default_content_type: str) -> File: + """ + This function resolves to the file's content type, if provided, and defaults + to the default_content_type value if not. + """ if isinstance(file, tuple): if len(file) == 2: filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore - return (filename, content, content_type) + return (filename, content, default_content_type) elif len(file) == 3: - filename, content, _ = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore - return (filename, content, content_type) + filename, content, file_content_type = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore + out_content_type = file_content_type or default_content_type + return (filename, content, out_content_type) elif len(file) == 4: - filename, content, _, headers = cast( # type: ignore + filename, content, file_content_type, headers = cast( # type: ignore Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], file ) - return (filename, content, content_type, headers) + out_content_type = file_content_type or default_content_type + return (filename, content, out_content_type, headers) else: raise ValueError(f"Unexpected tuple length: {len(file)}") - return (None, file, content_type) + return (None, file, default_content_type) diff --git a/src/elevenlabs/core/http_client.py b/src/elevenlabs/core/http_client.py index eb4e8943..1a1a1311 100644 --- a/src/elevenlabs/core/http_client.py +++ b/src/elevenlabs/core/http_client.py @@ -227,9 +227,11 @@ def request( json=json_body, data=data_body, content=content, - files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) - if (files is not None and files is not omit) - else None, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if (files is not None and files is not omit) + else None + ), timeout=timeout, ) @@ -311,9 +313,11 @@ def stream( json=json_body, data=data_body, content=content, - files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) - if (files is not None and files is not omit) - else None, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if (files is not None and files is not omit) + else None + ), timeout=timeout, ) as stream: yield stream @@ -400,7 +404,11 @@ async def request( json=json_body, data=data_body, content=content, - files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if files is not None + else None + ), timeout=timeout, ) @@ -481,7 +489,11 @@ async def stream( json=json_body, data=data_body, content=content, - files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if files is not None + else None + ), timeout=timeout, ) as stream: yield stream diff --git a/src/elevenlabs/history/client.py b/src/elevenlabs/history/client.py index cd7367b4..c87c0fe1 100644 --- a/src/elevenlabs/history/client.py +++ b/src/elevenlabs/history/client.py @@ -326,6 +326,9 @@ def download( "history_item_ids": history_item_ids, "output_format": output_format, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -696,6 +699,9 @@ async def main() -> None: "history_item_ids": history_item_ids, "output_format": output_format, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py index 9c249e19..a286da04 100644 --- a/src/elevenlabs/projects/client.py +++ b/src/elevenlabs/projects/client.py @@ -403,6 +403,9 @@ def edit_basic_project_info( "volume_normalization": volume_normalization, "quality_check_on": quality_check_on, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -643,6 +646,9 @@ def stream_audio( json={ "convert_to_mpeg": convert_to_mpeg, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -773,6 +779,9 @@ def add_chapter_to_a_project( "name": name, "from_url": from_url, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -853,6 +862,9 @@ def update_pronunciation_dictionaries( direction="write", ), }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1290,6 +1302,9 @@ async def main() -> None: "volume_normalization": volume_normalization, "quality_check_on": quality_check_on, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1554,6 +1569,9 @@ async def stream_audio( json={ "convert_to_mpeg": convert_to_mpeg, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -1700,6 +1718,9 @@ async def main() -> None: "name": name, "from_url": from_url, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1788,6 +1809,9 @@ async def main() -> None: direction="write", ), }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/pronunciation_dictionary/client.py b/src/elevenlabs/pronunciation_dictionary/client.py index 536cb817..40356617 100644 --- a/src/elevenlabs/pronunciation_dictionary/client.py +++ b/src/elevenlabs/pronunciation_dictionary/client.py @@ -172,6 +172,9 @@ def add_rules_to_the_pronunciation_dictionary( object_=rules, annotation=typing.Sequence[PronunciationDictionaryRule], direction="write" ), }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -243,6 +246,9 @@ def remove_rules_from_the_pronunciation_dictionary( json={ "rule_strings": rule_strings, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -617,6 +623,9 @@ async def main() -> None: object_=rules, annotation=typing.Sequence[PronunciationDictionaryRule], direction="write" ), }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -696,6 +705,9 @@ async def main() -> None: json={ "rule_strings": rule_strings, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/text_to_sound_effects/client.py b/src/elevenlabs/text_to_sound_effects/client.py index cbf8c36c..4913029d 100644 --- a/src/elevenlabs/text_to_sound_effects/client.py +++ b/src/elevenlabs/text_to_sound_effects/client.py @@ -56,6 +56,9 @@ def convert( "duration_seconds": duration_seconds, "prompt_influence": prompt_influence, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -124,6 +127,9 @@ async def convert( "duration_seconds": duration_seconds, "prompt_influence": prompt_influence, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py index d58c083d..641a95f6 100644 --- a/src/elevenlabs/text_to_speech/client.py +++ b/src/elevenlabs/text_to_speech/client.py @@ -169,6 +169,9 @@ def convert( "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -320,6 +323,9 @@ def convert_with_timestamps( "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -480,6 +486,9 @@ def convert_as_stream( "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -630,6 +639,9 @@ def stream_with_timestamps( "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -797,6 +809,9 @@ async def main() -> None: "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -956,6 +971,9 @@ async def main() -> None: "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1124,6 +1142,9 @@ async def main() -> None: "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -1282,6 +1303,9 @@ async def main() -> None: "use_pvc_as_ivc": use_pvc_as_ivc, "apply_text_normalization": apply_text_normalization, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/text_to_voice/client.py b/src/elevenlabs/text_to_voice/client.py index 53a00fb5..ce03c69f 100644 --- a/src/elevenlabs/text_to_voice/client.py +++ b/src/elevenlabs/text_to_voice/client.py @@ -89,6 +89,9 @@ def create_previews( "text": text, "auto_generate_text": auto_generate_text, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -177,6 +180,9 @@ def create_voice_from_preview( "labels": labels, "played_not_selected_voice_ids": played_not_selected_voice_ids, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -285,6 +291,9 @@ async def main() -> None: "text": text, "auto_generate_text": auto_generate_text, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -381,6 +390,9 @@ async def main() -> None: "labels": labels, "played_not_selected_voice_ids": played_not_selected_voice_ids, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/voice_generation/client.py b/src/elevenlabs/voice_generation/client.py index 7503866a..05e5fb94 100644 --- a/src/elevenlabs/voice_generation/client.py +++ b/src/elevenlabs/voice_generation/client.py @@ -129,6 +129,9 @@ def generate( "accent_strength": accent_strength, "text": text, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -215,6 +218,9 @@ def create_a_previously_generated_voice( "played_not_selected_voice_ids": played_not_selected_voice_ids, "labels": labels, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -370,6 +376,9 @@ async def main() -> None: "accent_strength": accent_strength, "text": text, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) as _response: @@ -464,6 +473,9 @@ async def main() -> None: "played_not_selected_voice_ids": played_not_selected_voice_ids, "labels": labels, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/voices/client.py b/src/elevenlabs/voices/client.py index e0e5a7e0..c3775165 100644 --- a/src/elevenlabs/voices/client.py +++ b/src/elevenlabs/voices/client.py @@ -613,6 +613,9 @@ def add_sharing_voice( json={ "new_name": new_name, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -1570,6 +1573,9 @@ async def main() -> None: json={ "new_name": new_name, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) diff --git a/src/elevenlabs/workspace/client.py b/src/elevenlabs/workspace/client.py index ff9411cc..dade871f 100644 --- a/src/elevenlabs/workspace/client.py +++ b/src/elevenlabs/workspace/client.py @@ -57,6 +57,9 @@ def invite_user( json={ "email": email, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -120,6 +123,9 @@ def delete_existing_invitation( json={ "email": email, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -196,6 +202,9 @@ def update_member( "is_locked": is_locked, "workspace_role": workspace_role, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -272,6 +281,9 @@ async def main() -> None: json={ "email": email, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -343,6 +355,9 @@ async def main() -> None: json={ "email": email, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) @@ -427,6 +442,9 @@ async def main() -> None: "is_locked": is_locked, "workspace_role": workspace_role, }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) From a65875a763131b2dbbdc5883397d94c636c0742c Mon Sep 17 00:00:00 2001 From: Oswin Kruger Ruiz <42069573+OsKrg@users.noreply.github.com> Date: Wed, 4 Dec 2024 10:28:19 +0100 Subject: [PATCH 23/45] Update get_signed_url for conversation.py (#414) Was running into an error when using the Python convAI example code -> AttributeError: 'ElevenLabs' object has no attribute 'get_signed_url' Bug reported by user: https://github.com/elevenlabs/elevenlabs-python/issues/413 This change seems to fix it. --- src/elevenlabs/conversational_ai/conversation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py index 316b3637..51475e67 100644 --- a/src/elevenlabs/conversational_ai/conversation.py +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -229,5 +229,5 @@ def _get_wss_url(https://github.com/self): return f"{base_ws_url}/v1/convai/conversation?agent_id={self.agent_id}" def _get_signed_url(https://github.com/self): - response = self.client.get_signed_url(https://github.com/agent_id=self.agent_id) + response = self.client.conversational_ai.get_signed_url(https://github.com/agent_id=self.agent_id) return response.signed_url From 0a5a4e0767db958c69f64b381cdeb7446b9422c5 Mon Sep 17 00:00:00 2001 From: Laco Date: Wed, 4 Dec 2024 10:31:47 +0100 Subject: [PATCH 24/45] Bump to v1.13.3 (#415) --- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1a84e6ec..6c118601 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.13.2" +version = "1.13.3" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index f3d7b3a0..cb61254c 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.13.2", + "X-Fern-SDK-Version": "1.13.3", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From c19ad6ea9f835db84d4f653db232aa4657e364ee Mon Sep 17 00:00:00 2001 From: Tudor Date: Tue, 10 Dec 2024 00:20:59 +0200 Subject: [PATCH 25/45] Move _should_stop initialization to __init__ (#426) Avoid using mutable default values for _should_stop by initializing it in the __init__ method to ensure instance-level independence. --- src/elevenlabs/conversational_ai/conversation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py index 51475e67..b164674e 100644 --- a/src/elevenlabs/conversational_ai/conversation.py +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -74,7 +74,7 @@ class Conversation: callback_latency_measurement: Optional[Callable[[int], None]] _thread: Optional[threading.Thread] = None - _should_stop: threading.Event = threading.Event() + _should_stop: threading.Event _conversation_id: Optional[str] = None _last_interrupt_id: int = 0 @@ -119,6 +119,7 @@ def __init__( self.callback_agent_response_correction = callback_agent_response_correction self.callback_user_transcript = callback_user_transcript self.callback_latency_measurement = callback_latency_measurement + self._should_stop = threading.Event() def start_session(self): """Starts the conversation session. From 85f1658121c328d75bacff6642f702afee9f3d01 Mon Sep 17 00:00:00 2001 From: Laco Date: Mon, 9 Dec 2024 22:26:43 +0000 Subject: [PATCH 26/45] Bump version to 1.13.4 (#428) --- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6c118601..e9abd966 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.13.3" +version = "1.13.4" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index cb61254c..a581a62a 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.13.3", + "X-Fern-SDK-Version": "1.13.4", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From ec0c1b3487c98bcda663e775553102a2b08dad45 Mon Sep 17 00:00:00 2001 From: Nickronomicon <60560304+Nickronomicon@users.noreply.github.com> Date: Mon, 9 Dec 2024 17:42:30 -0800 Subject: [PATCH 27/45] Removes the hardcoded websocket api path. (#427) Removes the hardcoded websocket api path. --- src/elevenlabs/realtime_tts.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/elevenlabs/realtime_tts.py b/src/elevenlabs/realtime_tts.py index 146431d7..f4d4bc58 100644 --- a/src/elevenlabs/realtime_tts.py +++ b/src/elevenlabs/realtime_tts.py @@ -9,6 +9,7 @@ from websockets.sync.client import connect from .core.api_error import ApiError +from .core.client_wrapper import SyncClientWrapper from .core.jsonable_encoder import jsonable_encoder from .core.remove_none_from_dict import remove_none_from_dict from .core.request_options import RequestOptions @@ -39,6 +40,9 @@ def text_chunker(chunks: typing.Iterator[str]) -> typing.Iterator[str]: class RealtimeTextToSpeechClient(TextToSpeechClient): + def __init__(self, *, client_wrapper: SyncClientWrapper): + super().__init__(client_wrapper=client_wrapper) + self._ws_base_url = urllib.parse.urlparse(self._client_wrapper.get_base_url())._replace(scheme="wss").geturl() def convert_realtime( self, @@ -88,7 +92,7 @@ def get_text() -> typing.Iterator[str]: """ with connect( urllib.parse.urljoin( - "wss://api.elevenlabs.io/", + self._ws_base_url, f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream-input?model_id={model_id}&output_format={output_format}" ), additional_headers=jsonable_encoder( From b5063c5dd0931a918b331e0c7588bd581da3e88e Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:16:43 +0000 Subject: [PATCH 28/45] SDK regeneration --- poetry.lock | 26 ++-- pyproject.toml | 2 +- reference.md | 4 +- src/elevenlabs/__init__.py | 6 + src/elevenlabs/core/client_wrapper.py | 2 +- src/elevenlabs/text_to_speech/__init__.py | 6 + src/elevenlabs/text_to_speech/client.py | 128 +++++++++++------- .../text_to_speech/types/__init__.py | 8 ++ ..._speech_stream_with_timestamps_response.py | 33 +++++ ...ream_with_timestamps_response_alignment.py | 32 +++++ ...imestamps_response_normalized_alignment.py | 32 +++++ 11 files changed, 217 insertions(+), 62 deletions(-) create mode 100644 src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py create mode 100644 src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py create mode 100644 src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py diff --git a/poetry.lock b/poetry.lock index 31db4bac..475c1afc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -220,13 +220,13 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.28.0" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.28.0-py3-none-any.whl", hash = "sha256:dc0b419a0cfeb6e8b34e85167c0da2671206f5095f1baa9663d23bcfd6b535fc"}, - {file = "httpx-0.28.0.tar.gz", hash = "sha256:0858d3bab51ba7e386637f22a61d8ccddaeec5f3fe4209da3a6168dbb91573e0"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -377,13 +377,13 @@ test = ["numpy"] [[package]] name = "pydantic" -version = "2.10.2" +version = "2.10.3" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, - {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, + {file = "pydantic-2.10.3-py3-none-any.whl", hash = "sha256:be04d85bbc7b65651c5f8e6b9976ed9c6f41782a55524cef079a34a0bb82144d"}, + {file = "pydantic-2.10.3.tar.gz", hash = "sha256:cb5ac360ce894ceacd69c403187900a02c4b20b693a9dd1d643e1effab9eadf9"}, ] [package.dependencies] @@ -611,13 +611,13 @@ files = [ [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] @@ -685,13 +685,13 @@ files = [ [[package]] name = "types-python-dateutil" -version = "2.9.0.20241003" +version = "2.9.0.20241206" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, - {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, + {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"}, + {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index e9abd966..86161b22 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.13.4" +version = "1.13.5" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 5ab4d727..45231398 100644 --- a/reference.md +++ b/reference.md @@ -1176,10 +1176,12 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.text_to_speech.stream_with_timestamps( +response = client.text_to_speech.stream_with_timestamps( voice_id="21m00Tcm4TlvDq8ikWAM", text="text", ) +for chunk in response: + yield chunk ``` diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py index 98bee6ba..4b7a806b 100644 --- a/src/elevenlabs/__init__.py +++ b/src/elevenlabs/__init__.py @@ -240,6 +240,9 @@ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization, BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, + TextToSpeechStreamWithTimestampsResponse, + TextToSpeechStreamWithTimestampsResponseAlignment, + TextToSpeechStreamWithTimestampsResponseNormalizedAlignment, ) from .text_to_voice import TextToVoiceCreatePreviewsRequestOutputFormat from .version import __version__ @@ -428,6 +431,9 @@ "SubscriptionResponseModelCurrency", "SubscriptionStatus", "TextToSpeechAsStreamRequest", + "TextToSpeechStreamWithTimestampsResponse", + "TextToSpeechStreamWithTimestampsResponseAlignment", + "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment", "TextToVoiceCreatePreviewsRequestOutputFormat", "TtsConversationalConfig", "TtsConversationalConfigOverride", diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index a581a62a..8c4081c2 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.13.4", + "X-Fern-SDK-Version": "1.13.5", } if self._api_key is not None: headers["xi-api-key"] = self._api_key diff --git a/src/elevenlabs/text_to_speech/__init__.py b/src/elevenlabs/text_to_speech/__init__.py index 3fadc417..2ec7be88 100644 --- a/src/elevenlabs/text_to_speech/__init__.py +++ b/src/elevenlabs/text_to_speech/__init__.py @@ -5,6 +5,9 @@ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization, BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, + TextToSpeechStreamWithTimestampsResponse, + TextToSpeechStreamWithTimestampsResponseAlignment, + TextToSpeechStreamWithTimestampsResponseNormalizedAlignment, ) __all__ = [ @@ -12,4 +15,7 @@ "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization", "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization", "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization", + "TextToSpeechStreamWithTimestampsResponse", + "TextToSpeechStreamWithTimestampsResponseAlignment", + "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment", ] diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py index 641a95f6..865cef6b 100644 --- a/src/elevenlabs/text_to_speech/client.py +++ b/src/elevenlabs/text_to_speech/client.py @@ -26,6 +26,8 @@ from .types.body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization import ( BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, ) +from .types.text_to_speech_stream_with_timestamps_response import TextToSpeechStreamWithTimestampsResponse +import json from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -538,7 +540,7 @@ def stream_with_timestamps( BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization ] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> None: + ) -> typing.Iterator[TextToSpeechStreamWithTimestampsResponse]: """ Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken. @@ -595,9 +597,10 @@ def stream_with_timestamps( request_options : typing.Optional[RequestOptions] Request-specific configuration. - Returns - ------- - None + Yields + ------ + typing.Iterator[TextToSpeechStreamWithTimestampsResponse] + Stream of JSON objects containing audio chunks and character timing information Examples -------- @@ -606,12 +609,14 @@ def stream_with_timestamps( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.text_to_speech.stream_with_timestamps( + response = client.text_to_speech.stream_with_timestamps( voice_id="21m00Tcm4TlvDq8ikWAM", text="text", ) + for chunk in response: + yield chunk """ - _response = self._client_wrapper.httpx_client.request( + with self._client_wrapper.httpx_client.stream( f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream/with-timestamps", method="POST", params={ @@ -644,24 +649,38 @@ def stream_with_timestamps( }, request_options=request_options, omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), + ) as _response: + try: + if 200 <= _response.status_code < 300: + for _text in _response.iter_lines(): + try: + if len(_text) == 0: + continue + yield typing.cast( + TextToSpeechStreamWithTimestampsResponse, + construct_type( + type_=TextToSpeechStreamWithTimestampsResponse, # type: ignore + object_=json.loads(_text), + ), + ) + except: + pass + return + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) class AsyncTextToSpeechClient: @@ -1194,7 +1213,7 @@ async def stream_with_timestamps( BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization ] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> None: + ) -> typing.AsyncIterator[TextToSpeechStreamWithTimestampsResponse]: """ Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken. @@ -1251,9 +1270,10 @@ async def stream_with_timestamps( request_options : typing.Optional[RequestOptions] Request-specific configuration. - Returns - ------- - None + Yields + ------ + typing.AsyncIterator[TextToSpeechStreamWithTimestampsResponse] + Stream of JSON objects containing audio chunks and character timing information Examples -------- @@ -1267,15 +1287,17 @@ async def stream_with_timestamps( async def main() -> None: - await client.text_to_speech.stream_with_timestamps( + response = await client.text_to_speech.stream_with_timestamps( voice_id="21m00Tcm4TlvDq8ikWAM", text="text", ) + async for chunk in response: + yield chunk asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( + async with self._client_wrapper.httpx_client.stream( f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream/with-timestamps", method="POST", params={ @@ -1308,21 +1330,35 @@ async def main() -> None: }, request_options=request_options, omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), + ) as _response: + try: + if 200 <= _response.status_code < 300: + async for _text in _response.aiter_lines(): + try: + if len(_text) == 0: + continue + yield typing.cast( + TextToSpeechStreamWithTimestampsResponse, + construct_type( + type_=TextToSpeechStreamWithTimestampsResponse, # type: ignore + object_=json.loads(_text), + ), + ) + except: + pass + return + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/elevenlabs/text_to_speech/types/__init__.py b/src/elevenlabs/text_to_speech/types/__init__.py index b05354ee..527c7242 100644 --- a/src/elevenlabs/text_to_speech/types/__init__.py +++ b/src/elevenlabs/text_to_speech/types/__init__.py @@ -12,10 +12,18 @@ from .body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import ( BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization, ) +from .text_to_speech_stream_with_timestamps_response import TextToSpeechStreamWithTimestampsResponse +from .text_to_speech_stream_with_timestamps_response_alignment import TextToSpeechStreamWithTimestampsResponseAlignment +from .text_to_speech_stream_with_timestamps_response_normalized_alignment import ( + TextToSpeechStreamWithTimestampsResponseNormalizedAlignment, +) __all__ = [ "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization", "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization", "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization", "BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization", + "TextToSpeechStreamWithTimestampsResponse", + "TextToSpeechStreamWithTimestampsResponseAlignment", + "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment", ] diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py new file mode 100644 index 00000000..07c25399 --- /dev/null +++ b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing_extensions +import typing +from ...core.serialization import FieldMetadata +import pydantic +from .text_to_speech_stream_with_timestamps_response_alignment import TextToSpeechStreamWithTimestampsResponseAlignment +from .text_to_speech_stream_with_timestamps_response_normalized_alignment import ( + TextToSpeechStreamWithTimestampsResponseNormalizedAlignment, +) +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class TextToSpeechStreamWithTimestampsResponse(UncheckedBaseModel): + audio_base_64: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="audio_base64")] = ( + pydantic.Field(default=None) + ) + """ + Base64 encoded audio chunk + """ + + alignment: typing.Optional[TextToSpeechStreamWithTimestampsResponseAlignment] = None + normalized_alignment: typing.Optional[TextToSpeechStreamWithTimestampsResponseNormalizedAlignment] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py new file mode 100644 index 00000000..f8230552 --- /dev/null +++ b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class TextToSpeechStreamWithTimestampsResponseAlignment(UncheckedBaseModel): + characters: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of individual characters from the input text + """ + + character_start_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None) + """ + Array of start times (in seconds) for each character + """ + + character_end_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None) + """ + Array of end times (in seconds) for each character + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py new file mode 100644 index 00000000..2982e649 --- /dev/null +++ b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class TextToSpeechStreamWithTimestampsResponseNormalizedAlignment(UncheckedBaseModel): + characters: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of individual characters from the normalized text + """ + + character_start_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None) + """ + Array of start times (in seconds) for each normalized character + """ + + character_end_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None) + """ + Array of end times (in seconds) for each normalized character + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow From 0a282eb5d90c02ba7165d39a1b35eb2088f40183 Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Thu, 12 Dec 2024 11:18:18 +0000 Subject: [PATCH 29/45] # This is a combination of 2 commits. # This is the 1st commit message: feat: add e2e tests # This is the commit message #2: feat: update tests --- tests/test_async_generation.py | 31 ------------- tests/test_client.py | 39 ---------------- tests/test_history.py | 32 +------------ tests/test_model.py | 10 ---- tests/test_models.py | 9 ++++ tests/test_tts.py | 83 ++++++++++++++++++++++++++++++++++ tests/test_voice.py | 78 -------------------------------- tests/test_voices.py | 24 ++++++++++ tests/utils.py | 10 ++-- 9 files changed, 122 insertions(+), 194 deletions(-) delete mode 100644 tests/test_async_generation.py delete mode 100644 tests/test_client.py delete mode 100644 tests/test_model.py create mode 100644 tests/test_models.py create mode 100644 tests/test_tts.py delete mode 100644 tests/test_voice.py create mode 100644 tests/test_voices.py diff --git a/tests/test_async_generation.py b/tests/test_async_generation.py deleted file mode 100644 index 1ed11ab6..00000000 --- a/tests/test_async_generation.py +++ /dev/null @@ -1,31 +0,0 @@ -import asyncio -import pytest - -from .utils import IN_GITHUB, async_client -from elevenlabs import play - -def test_async_generation(): - async def main(): - results = await async_client.generate( - voice='Rachel', - model='eleven_multilingual_v2', - text='This is an example sentence', - ) - out = b'' - async for value in results: - out += value - if not IN_GITHUB: - play(out) - - results = await async_client.generate( - voice='Rachel', - model='eleven_multilingual_v2', - text='This is an example sentence with streaming', - stream=True - ) - out = b'' - async for value in results: - out += value - if not IN_GITHUB: - play(out) - asyncio.run(main()) diff --git a/tests/test_client.py b/tests/test_client.py deleted file mode 100644 index 32be1f41..00000000 --- a/tests/test_client.py +++ /dev/null @@ -1,39 +0,0 @@ -from elevenlabs import play, \ - Voice, VoiceSettings, stream -from .utils import IN_GITHUB, client - - -def test_voices() -> None: - print("Voices are...", client.voices.get_all()) - - -def test_generate() -> None: - audio = client.generate( - text="Hello! My name is Bella.", - voice=Voice( - voice_id='EXAVITQu4vr4xnSDxMaL', - settings=VoiceSettings( - stability=0.71, - similarity_boost=0.5, - style=0.0, - use_speaker_boost=True - ) - )) - if not IN_GITHUB: - play(audio) # type: ignore - - -def test_generate_stream() -> None: - def text_stream(): - yield "Hi there, I'm Eleven " - yield "I'm a text to speech API " - - audio_stream = client.generate( - text=text_stream(), - voice="Nicole", - model="eleven_monolingual_v1", - stream=True - ) - - if not IN_GITHUB: - stream(audio_stream) # type: ignore diff --git a/tests/test_history.py b/tests/test_history.py index de1291c1..1dc83e45 100644 --- a/tests/test_history.py +++ b/tests/test_history.py @@ -1,36 +1,8 @@ -import time -from random import randint - -from elevenlabs import GetSpeechHistoryResponse, \ - play - -from .utils import IN_GITHUB, client +from elevenlabs import GetSpeechHistoryResponse, ElevenLabs def test_history(): + client = ElevenLabs() page_size = 5 history = client.history.get_all(page_size=page_size) assert isinstance(history, GetSpeechHistoryResponse) - - -def test_history_item_delete(): - text = f"Test {randint(0, 1000)}" - audio = client.generate(text=text) - if not IN_GITHUB: - play(audio) # type: ignore - - time.sleep(1) - - history = client.history.get_all().history - print(history) - history_item = history[0] - - assert history_item.text != None - - # Check that item matches - # assert history_item.text == text - # client.history.delete(history_item.history_item_id) - - # Test that the history item was deleted - # history = client.history.get_all(page_size=1).history - # assert len(history) == 0 or history[0].text != text diff --git a/tests/test_model.py b/tests/test_model.py deleted file mode 100644 index 9ea570ff..00000000 --- a/tests/test_model.py +++ /dev/null @@ -1,10 +0,0 @@ -from elevenlabs import Model -from .utils import client - - -def test_model(): - # Test that we can get all models - models = client.models.get_all() - print(models) - assert len(models) > 0 - assert isinstance(models[0], Model) \ No newline at end of file diff --git a/tests/test_models.py b/tests/test_models.py new file mode 100644 index 00000000..6bbb2dac --- /dev/null +++ b/tests/test_models.py @@ -0,0 +1,9 @@ +from elevenlabs import Model +from elevenlabs.client import ElevenLabs + + +def test_models(): + client = ElevenLabs() + models = client.models.get_all() + assert len(models) > 0 + assert isinstance(models[0], Model) diff --git a/tests/test_tts.py b/tests/test_tts.py new file mode 100644 index 00000000..df076cce --- /dev/null +++ b/tests/test_tts.py @@ -0,0 +1,83 @@ +import asyncio + +from elevenlabs import VoiceSettings, play +from elevenlabs.client import AsyncElevenLabs, ElevenLabs + +from .utils import IN_GITHUB, DEFAULT_TEXT, DEFAULT_VOICE, DEFAULT_MODEL +import base64 + + +def test_tts_convert() -> None: + """Test basic text-to-speech generation.""" + client = ElevenLabs() + audio_generator = client.text_to_speech.convert(text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL) + audio = b"".join(audio_generator) + assert isinstance(audio, bytes), "TTS should return bytes" + if not IN_GITHUB: + play(audio) + + +def test_tts_convert_with_voice_settings() -> None: + """Test TTS with custom voice settings.""" + client = ElevenLabs() + audio_generator = client.text_to_speech.convert( + text=DEFAULT_TEXT, + voice_id=DEFAULT_VOICE, + model_id=DEFAULT_MODEL, + voice_settings=VoiceSettings(stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True), + ) + audio = b"".join(audio_generator) + assert isinstance(audio, bytes), "TTS with voice settings should return bytes" + if not IN_GITHUB: + play(audio) + + +def test_tts_convert_as_stream(): + async def main(): + async_client = AsyncElevenLabs() + results = async_client.text_to_speech.convert_as_stream( + text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL + ) + out = b"" + async for value in results: + assert isinstance(value, bytes), "Stream chunks should be bytes" + out += value + if not IN_GITHUB: + play(out) + + asyncio.run(main()) + + +def test_tts_convert_with_timestamps() -> None: + """Test TTS generation with timestamps.""" + client = ElevenLabs() + result = client.text_to_speech.convert_with_timestamps( + text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL + ) + + assert "alignment" in result # type: ignore + assert "characters" in result["alignment"] # type: ignore + + if not IN_GITHUB: + audio_bytes = base64.b64decode(result["audio_base64"]) # type: ignore + play(audio_bytes) + + +def test_tts_stream_with_timestamps(): + async def main(): + async_client = AsyncElevenLabs() + audio_data = b"" + async_stream = async_client.text_to_speech.stream_with_timestamps( + voice_id=DEFAULT_VOICE, + text=DEFAULT_TEXT, + model_id=DEFAULT_MODEL, + ) + async for chunk in async_stream: + if hasattr(chunk, "audio_base_64"): + audio_bytes = base64.b64decode(chunk.audio_base_64) + audio_data += audio_bytes + + if not IN_GITHUB: + play(audio_data) + + asyncio.run(main()) diff --git a/tests/test_voice.py b/tests/test_voice.py deleted file mode 100644 index 2e9f6e1d..00000000 --- a/tests/test_voice.py +++ /dev/null @@ -1,78 +0,0 @@ -import pytest - -from elevenlabs import Voice, \ - VoiceSettings, play -from .utils import IN_GITHUB, as_local_files, client - - -def test_voice_from_id(): - - # Test that we can get a voice from id - voice_id = "21m00Tcm4TlvDq8ikWAM" - - voice = client.voices.get(voice_id) - assert isinstance(voice, Voice) - - assert voice.voice_id == voice_id - assert voice.name == "Rachel" - assert voice.category == "premade" - if voice.settings is not None: - assert isinstance(voice.settings, VoiceSettings) - -@pytest.mark.skip(reason="subscription limit reached") -def test_voice_clone(): - voice_file_urls = [ - "https://user-images.githubusercontent.com/12028621/235474694-584f7103-dab2-4c39-bb9a-8e5f00be85da.webm", - ] - - for file in as_local_files(voice_file_urls): - voice = client.clone( - name="Alex", - description=( - "An old American male voice with a slight hoarseness in his throat." - " Perfect for news" - ), - files=[file], - ) - - assert isinstance(voice, Voice) # type: ignore - assert voice.voice_id is not None - assert voice.name == "Alex" - assert voice.category == "cloned" - assert len(voice.samples or []) == len(voice_file_urls) - - audio = client.generate( - text="Voice clone test successful.", - voice=voice, - ) - - if not IN_GITHUB: - play(audio) - - client.voices.delete(voice.voice_id) - - -def test_voice_design(): - audio = client.voice_generation.generate( - text=( - "Hi! My name is Lexa, I'm a voice design test. I should have a middle aged" - " female voice with a british accent. " - ), - gender="female", - age="middle_aged", - accent="british", - accent_strength=1.5, - ) - - if not IN_GITHUB: - play(audio) - - -def test_voices(): - # Test that we can get voices from api - response = client.voices.get_all() - - assert len(response.voices) > 0 - - for voice in response.voices: - assert isinstance(voice, Voice) diff --git a/tests/test_voices.py b/tests/test_voices.py new file mode 100644 index 00000000..6690792e --- /dev/null +++ b/tests/test_voices.py @@ -0,0 +1,24 @@ +from elevenlabs import Voice, VoiceSettings, ElevenLabs +from .utils import DEFAULT_VOICE + + +def test_get_voice(): + client = ElevenLabs() + voice_id = DEFAULT_VOICE + + voice = client.voices.get(voice_id) + assert isinstance(voice, Voice) + + assert voice.voice_id == voice_id + if voice.settings is not None: + assert isinstance(voice.settings, VoiceSettings) + + +def test_get_voices(): + client = ElevenLabs() + response = client.voices.get_all() + + assert len(response.voices) > 0 + + for voice in response.voices: + assert isinstance(voice, Voice) diff --git a/tests/utils.py b/tests/utils.py index b51b44b6..9467e440 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -3,14 +3,12 @@ import httpx from typing import Sequence, Generator -from elevenlabs.client import ElevenLabs, \ - AsyncElevenLabs IN_GITHUB = "GITHUB_ACTIONS" in os.environ -client = ElevenLabs() - -async_client = AsyncElevenLabs() +DEFAULT_VOICE = "21m00Tcm4TlvDq8ikWAM" +DEFAULT_TEXT = "Hello" +DEFAULT_MODEL = "eleven_multilingual_v2" def as_local_files(urls: Sequence[str]) -> Generator[str, None, None]: @@ -25,4 +23,4 @@ def as_local_files(urls: Sequence[str]) -> Generator[str, None, None]: yield temp_file.name # Remove the files for temp_file in temp_files: - temp_file.close() \ No newline at end of file + temp_file.close() From 418c3ccd7e07987f19c3c02dd4c41428dab19795 Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Thu, 12 Dec 2024 17:18:15 +0000 Subject: [PATCH 30/45] feat: add e2e tests --- .gitignore | 1 + tests/fixtures/voice_sample.mp3 | Bin 0 -> 25040 bytes tests/test_audio_isolation.py | 32 ++++++++++++++++++++++++++++++++ tests/test_models.py | 2 +- tests/test_sts.py | 31 +++++++++++++++++++++++++++++++ tests/test_ttsfx.py | 17 +++++++++++++++++ tests/test_ttv.py | 17 +++++++++++++++++ tests/utils.py | 1 + 8 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 tests/fixtures/voice_sample.mp3 create mode 100644 tests/test_audio_isolation.py create mode 100644 tests/test_sts.py create mode 100644 tests/test_ttsfx.py create mode 100644 tests/test_ttv.py diff --git a/.gitignore b/.gitignore index 0da665fe..83bacf16 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ dist/ __pycache__/ poetry.toml .ruff_cache/ +.DS_Store diff --git a/tests/fixtures/voice_sample.mp3 b/tests/fixtures/voice_sample.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..cbecc9ac9fb13540780e09d083159e4404230838 GIT binary patch literal 25040 zcmW(+RahL$65fS{;0}uhC%C)2LvVN3K!W???(XjH65O5O1ot38gX`tod6{qKsiu4C zm#S(206=Kr?BS@uEHWJOb^_jGg~`4*=-t0Db{J0m;C%Sgh0n zPp|9J3GzftF2hVh;_95*IIjSSBSN9Ct{VZfhU1$JVfY&$`sa9j`oQWn+QdDH(h|9+ zpAZ2Z16`vtE0LsO4ZA?4g~5ei?qsoL&Mu8+h*)I{}sQ*+lyQWb!}{C z-;-4|FaE_a*1Faqwnz5$u^yp9rkHjfGQ?@WO6^!)#xtzElf`bSxNx!!+t{&YqQFh} z%fIpvTg-}pE5hYD%b?hyx|4W?(}%sPra%$LT4^~ZahR`Iv8@u zXNWyMs(o;hbe!~h7n@JNrf~_Gf#Cx94jY2ec5V(A$5aSiS0~bnRCwxGH-fpO$t<>X zyGg+mrj%DDS9cyKd&>kOBljrVvEMz&bFP+d9^9O0`8Z#F8KQ6APv88S|NLaajB2q& zw_e9tL>xM}U{=3f@tJU%S*|{?zu|18HCZZTAleJ}&3)kQk=t9+S>mi&Mm}Bw`|N0JS|bVWYL5BE#pJo{ITXJmHoIBf zgN2n-_j9u0#HLi`%e%n%?6o?kp6{Z*f{Q+ZLBm<%+!vXf4$g8^kCxndSIkjDk9%L` z@I8a9#brjGp;DHpTcG#x@20;{Fx7|Xf_PtFym-fZ7~fURnP~Ch%c}LuA|(qIfwbWC zvIeDaC^F%2VMPEYT0hb>v~osgYHA$l(&4lwGoGajE9e41=E0NwZQn*jG3G2Ybl~FY zI)@faz1f&HjXXYNrRU*eI3ltZI*Dna^BkEUXsI=fL6WzrxR|w=Z?R1{R0DVQy1959ngBY$g^ZHvsM;hD zkJ4yyq+b~~B_t_iu+fT{v5-0({4JG;3~r0X#~d|Kyi~d=!+$;HNdNJd zXh>;tH9D_sz04&)1VJ;a#@z48)VLHi!GF(wQ%W@}rvMWWcfu}4D~o8;hOE`GSw~{) z;#4-J>IWZ*h2u&#eepM*5Eb`5T{>s!WXhBgS5Z{Q_&`rya7~jqCPL5vq%Mq%f+Pr( z2(xb0Ud^%PH|eV{TFy%CZy#C2Q_zqlx=6n-hrV~?ameBS2kLv>;IVl9&`X*Ik%Hn` z2W4D@593?3i3*1#6hOiVqny6raGXaOC%o#115(S|Q_y*Va+2sUa|jcXq@&D2yQ(C~ z^4@%6E?lO0Vn#WOrzt}p%`QfcKiRC%1>Te;WIKJG`^MgZC3mH5l0G_M2JZw!&_6jN zx8Wm_WIm^vv+nN|!&;*i9)9sNBG?0=i9O!tm=7!QUGjJJ?iFR5%oUhQ8p)qZoKZr5@=$bx zqt)`Q@5UPiYy$-~tK)PS5@1XUw2UOt;DQ51{HcZI4@R?fdpOOKo;Q8mkWWsl_7~9z zrN5Je!<3r)r;$O2V|?W@%0F*9nsDg-b8R65fsVJVTo+eD3eiNQ7sIeKbL=@=zoimqh36|Lb! z?Q2w5_Itr;ccG_ylgQCn$Oum!oO$>>^v)cc<0oxP1Wq1QWa%d=-L zeO`K!6+)nF?kXm`S#&)*f}P?m^7X$M2^x<0cyR@4p3SM_T| zPI154v>doZp7#Ah5hhD*Jijf>=rZ7pSMP0H3+3Z8-w@HMv+BuQ!%|+JrPz1h=&p`P zbpw{YIFaExpbxbVU3PJmc%l1P?6l#Mafe3QDQY37&33FT|e>u!G^v1MLseAbEpNVcQagj+dsIS7iknp$5xJ*D#_)VrP@` zv(Tpu%rgWNn0i~bq`SAR!__lu#iswg5DV^(kpRk(W@7yYc8t8d37Q1;XQV+T@J;dI zXokbS@s*Zy({E@bwV^X(d$zFqCFv&ZMjrG z!=^rYt5riOv7@Q>vn0Y2{0_GN?Y$Pe13f^GI3N8j5*5e~4pVcZM~{;gf$vq^n7eIG zLh8agK-Q{s1|kH1FTLtmZz}*n?c-AilkPq7_I-+dJb+9HXLX-%Kna8Iz@bHP6~znV zh!!r7o>8#5L~n)+giHEcC;AR4tb#vWzI!H{=w;h-{z3ngJ9SDR3bScSY_F7sU3~gii{s)Y*@( zH~*gRlIDxg@pU-E%Ecic{4EGBMIvJ^h9w1l?hU2505r7++EjD326$D}g_9 zLfC)NJTvCH5VD=*x;Ny?A=h<~t4$lfbC^{BJHv-Lltl&(Wz9y*$-oLrcmVCIyLA#tn7jS+(1!q zQB)xtnq(j={HQ1i`lj3L#`iFKaqC|mB;9NN7dsX%o3N+hm|vmZBL< zzB^g;?Qd0XtW6OqQrK`@nL{#eOR+ZzZ5Y!e9HC{LQxehNCs|qLYEqsDtSz)u=qy+uAfbm(jJ+2Ww|E9be)Ay|qSr`{Ys2hf=JDw=h`i|3Gyj#=hc6B{vsg&Vih%?G z&EMK!R=<^s*SgvI%dJ&3psDoIv(S<u=#Qc-w{inDJ%Xc^%xr zc;t-&Z3gPOOr70tIvT0XuGKVD>h{)jw$=?Lszo2h(Ii~AR{W+#lM18 zbGNRp^wR~A8YBiDGo>$uoDOksXC>asjMlS~xJMzHP4Ze-gXHYL+i(WOu)~^P*89Du ze%PZAPQc4Gq8}A;g!cB^q%?%KE)>CF`u9rh6Bm2r1=!NRJio6PUVUa5+m!i1-2~^B zKqBV<7?*LM24k4%1;aIn{nbv8K;6!*fK$eW!aZN!S*cmhL|vs001W}M=Ssp$76SSd z@sOa_RjFa&Wvc=8@<)5BbO&d9z89HjhE~JWv z`&_()gi9%BH)w!hPjwx4WqHG@eSt=$hR*}cYz7X(&YuwhOT(gEo#p>2hjCB?BH47# zrBSPw>PL#Rwo%QzIH-~`2K^s#?TMy+yfyzgMtUo4X77VUoY7FKP)jeU{g%*sf!xb+ zuK1$DvVG_PaT0VikT|rUS}onaa225|j46zXGb=ETLB0 zTpg=YFll!Er^{|8KTARx@!TG!WXEai945GhyZ$F#0Y;9MgX8*x&net_`xVaN=YoFL1IR6Q6_Pi z-cLbsJB#IxdDt9I!3BB3ZM-c9)O92%lJ3eQYJOp-WC z;n(lvG7cQ9kJK)VXYxS5= z!4u`y3*cEQ1&{#43t>P#$N&J`194KA3$Zt0QLBc$JnkCbb8`O>Nc&I`ZL301Q8PnP zYw;zKH~}J%e;3dgA`%{TccEeN8d`PV6!eKqxh=F5$LpxkTZ$%~t+xgg*91=_V;(I?JEe9k?feacD36mz{fQtk9`kRNQ9Xsc*p6Dm)iIHI2zWJ%N;1l;fNfYAf zZ@KN5;b3~xfBmHD@)|8$D~T2sEG`|MMMfH@41tAXFm`19TuTvJesxLrH}p?7Yv^JN zXB9dro4>kBj<96?!)wrS_h!R)@+0Ba^XA}IryuO7MMwxyRMl%#u2Ex&9Fc>!Yw7l% z*`p)8^<3vy<{o8GX1{ zQE-x@BrRG-5H@5cKAMqNyso!VhgZEHveEuFTLn+CWi<-5UuOR`^OP3LC6y0?4+Z>v ze&|J?+A`+lbEDPSx1J=_LBfuof;K%daHZ*3v6Qf;hN#I!kU&vk&Zh2>92+*^B6E>R z*z>u1p)vCD4eU2C}x$G04>7@mLSK91k9Rg8!{MiBl;# zYY5w?OpIIjh3*I|n!dQR;niB?HVO!iW?lW`IL$n^o5%6dTTk%p*TMq=eYJp{vWRNR zAn+Fcl6KC&w(xrOLnjM7n_K~4m2#apO80{>6wBMI?jMw~xaxczHu*`T{O&(M{^h)? zB&%#BO5kUr)jN(-Q7k4H)*q8CCH|5~q@3^fIox59yjbEuk+MM&x2ZG%hHsr=Et@0) z+!29isNYU)cLsHcu843;_Lvj+=*l!IhG-4FG`)Ket3skx_Ie1KQYpX1v$-&=S=fFd z{2ShoT0>%)L06-6yEF57#7*5Um$Ps)!Zu)aN+S|BKwIQ)c__`_IDDr+q^W*Fjg`{Q zS8?UXnwx1HYlyJ&;ZP(=t=i?vaM}mFh=Ze?wau~N`oVyx^I*~%1jUdEdG?{I6ciyC zCU|)(k^uPR&yGyQV(xu{d#_Vb;?|x*$1gnUKisYGTm)O20U%5EVOh`(!2DS*;8O<} z-F7c?U|U4a<`&n!M&|XFnnDaL+)Xk}vQ?&&{LJg>z9_Ih&^Rc)IoP1&*?6QgU@eqP zNcY!1B-2|^%layrTzB4t9Xj{=(s{pdY5uI!67fLeWM9Y5B&crdy5wuB-9~QL&H%p- z$)vNRGyrM%&ZVWtYZy-J<5PISVXokOyZ|Y5QV5iK0GD;=Wl*LXfbTM@^aLPB(xHoKvq&F-kdX;As8Z;~gXc#c!0 zwBwI;WHMO8%Q23-p;6YnhaXA8k)N2tyIowUuaO7%;;pxM8PI~;_^W!fo>a6>;A*8h(KVn za7qED({PihR4&g*KrPeJBY+O%<<=xiWJVkROYxbafj79=n@PD$n@DsV9j1z+MuOy$ zs2w+IG$rP(=HPliAv0^zEMbOD%t_vHA)PN5Of*{5<6b2R7p`(QK=~ zHCWWiOA*sYG8wOViwIaF$#itIX*kuUDCa-15)&MgRqo&+AGIdJTz@ZISPWN2UL9DB zD;N@7%H(J>ZKIa=&*wiUTC?{4byr=k)#MnS&g}T|*krqm+&IQ`IiZF+9i3h4##&z0 z7T@hVKasW`LkAH1JkI=ULDZE$5NSlHzO!UsfTv$T|LwnAE&0=-h(l$@kc1e>{c@Vp z{f1L53Qi|O+#|OT=ju~+o5@`Mc|jb*0SLnDs-_lJNe9U1LyH)u*XAqXBSK$*VZW_W zbjQZ6SU1;`EVNsKb>{iQtd)b^v@_E!?;QVPejaxa`G+g&r~M$4AT0-7X^Q@msj(oo zZ&@4CT{C)LgV?&B-C^|UwsoJ+*hrUIMbL(2>2`XWeD5BnD+>7YZt zZrCSSFTZ2oIPo{@IP>76K4$c^XfDoVkSjPptBLIPN>hGSeX#7t{z=ae!$(pCkm9tg zb+=6@j9MS*ZDk?K5VOnZ*~J&Kfj7Dp!-(iDlo6xrqY1o#yMl$f`Bh{}_{Brc=k}d} z$LY}|5(dHEHht{MdGre@aFn|gGg&bUOBjknGgDpgmed!34&aJ`V zqz=i4hUcM%4KLc(L8G}cS=U?YIrH^I>2UV@Ju+|kNin3t+C7I`#+PT5b7gisWgp|m zl-GH&WJPjPd0_5uEG_+M19(-FH!`wbabv(8+U6#MQ+dv8k{OtwH*ll%;Rdqix{kZjBV+Gd;nUzo zQ^8oJrajpB4L6>woF-1^%g-67pnvKf&PJ7zN|SM881OU``W**QXb9QXSyyvaaFR_!Vv=P$ zksBsJHzJf8kdk~HAYn==ppPwNkNoD9tx&F6j*i2riBo*VY2xTwjCj8n1Enj9)ZqZ` zkS(aYFb=t>4Jf~yLg5M*jX)2C9$Ost&*SIC0D}OQJnyw{m*m9!CXVgHfJrk`TsK!s zf!_ef5C{AFXE*9Kdg2_ndh*z*ob8)`c6Rd_ z%?e38v=2G@gjEvP>jxhLWt3^)0FLO}Kx%Nhgt8;tR|*;^e;lMBR!L#dGMO1|NP;ed z6&%)yO-}pW_*}h)kcOUpA2h(ZJVM#vi{?uJbp{65W&Yo@`j!q#Yz*Da|wC$u1|8THtu z^>yQaF!gl{T|V)5I3P2G+L2I)pv+CE$);mUK32DwGMPg!FDGcqp)lUv z)*dHTIJ}sPNjgZK;X0Z^UR_@bX7B{8RExeXCxj{CPM2MLiy8XdcVGU)hC=dGY%5lh zdQ>S|5uPF2HjH6VpLU)OStEGiX82w(NgXj?LrwK@sDO1un%oTpt0g)C_&|Fqa5|;3 zJ6yRHG7O}*xWA|$AiF`jNm_p+6`C3C*hpdt;s59J< zQlsnqHQN-+0w86#_&!1^mWw-ok zA)MyrTD#|%g*eVL5jfxC`Bi>7!yFr+|FV$vk}FNLH{hc9?Vv&Nmjv-kZ}IPXy0XoH zpBHj@idR`k)M)fsNgwM(4UQl3=Z2g3qyi$RFCu{o!+dQ_iIl)C^q{O#-OAyKfS=Al zwEL$UwAG)qYrm%mZU{#UTa0d3OT^Q+i^rz)v1Y?adQ-{#l}O+q9g6lfC~Qzr1q7*| z7acf_&P1D)Hm1q(Z!$sBqpVA=W0+AO8=$kjz?VZ_Zs%5L1oo5tV(L;)l`(&tXqKQw z54K>uNdy3U|BnG8jz~k-_Q<&nN=`KMcKUQdTg-U5gFmS#%<@?i-$1~-u#Tl#2~cUC0`vD6$MD9yq<4XO1T2&&EK3htCbL6zQVbBR-?Lii#G z>j_q~2qaFP=pD*ub>SVSi;wH8145mtSyCvYQqy!i|K!J{&CQwzIVu_d3#ORG3a%cR zo)m!8l@9<|TPo~+@+kTdH>r3n9r(jK;}wpRO&t}}lL+IWkL#{|cK5`4*YtQ}(P8u6 z=Vgv`&+uAiTPMXt2_wA2(D2Hbk1Ecw;~6j6Uv|SqBTe)rk&~*xd6uz~M?7Lw-&O~w zDluDf&zQPWx1`f7L&OE&GOJ)VECce9^HkvE(E_b9P<=}@-drzpRNTa*=1{&o#O+Hqn201$88t4U<3B-P8Mj(2$SHVXdJVHG;P2zCC5 z@!hV}^4=BkOTQq2->|rl%kp%|T(?3QClf#k&gP`;xad>Pb~YkB`R!%0XR{kv?fq@F zrUt%`+45026EgFUOl66?kR}H`DF5%e` z2R#Og_~Dh01%=8{t}F458J`8nm|8;}Wjosy-0iz$XIVGV(qD5Qa-1d5&Xyx5HB2sQ z@UUStG>Fo&rA_pFbGI_+OkyJC*!jeOUacOF4m#6Bmczqdu8Ca@+Z~Q&7EjOz%?Z7b zWG$0OQhZoqdT{*5rol^w!l8h00klAvzAPBtz_ zj~(?r_X`Hx5zn{X(1D^-ZGh%h+YPrR# z8|E4k=Jfg)nL626Ekj)Ba>DZ;kcvRuRsjnl(uSuDb+ehJT8#+_Mk{6Utkp&sB0Sg? zDHU-LqN7I0JFa{#x+w(6s+ktNqXqX#0VNHpFW{2C5@veqb)nYfT?l0c)JJuLq`5Yq z5dUvWf5jdy(c@MR6YN#wAdnsGN95yC8-v*|at+kXl*^#PF?Sm&U za_Tco@~kRkxskBLVf`!8%&3MS4Pt8pklV)-BG>Npkr(qV?V7U%!>3E#ZDJk2TW9_P z!}aLwvF+0hGo|DeW|M`N2C3azgdHXcmq8+|Fa*T&lP;(2I}@eJ&ic)UDdu;fA{X4O z+E?dpbq%<{mQ+czre>|zoMZlf`1A=O87D@KE>C58~h%>f|LUudg8r257`MZiG9V+Cocr+B^NjrB1DJ&$4 zApKq|5TN2SM-V4{pqXE*ml>2FjECV<6HWgcD=&(^-(2x$!%z*Hd9LgjD1G{yR%l$* z>ScO#aXzU%GP+QjJ@()7^u?MP~=SN{X*3Ma)ntRHd0VrjRe$!ZS@n>YWKZj4vVA3E1T z#{<7X!pGPXNx^pHc;?UvmrM8rdW;&B{OFPnO8|<+SXGw{a95c3?Mj4S&RhD9tUew? zFS_xV!Ak{zOGb2mvN6XNx@Q-tiTPGM4xoSh9iuPqhu7doCRg5nw7uAi&Mm=oopGNe zptqMfVZHQd98EbpJcNd3Oh7wJjV)+l*hbeDeSn`|i4#@wH-xXOPXD9+o!dPz)ZhDC zMFtZ)ptsvIW%E?G$Mos@nU|&SbV(qO!kBGp?1+BU-GzlELxGJ<^p)K(`AlWaTPiw> zD!K!`9UT1mm^S|O=J^IV2vOit96gI1QRg2hS-#0%uMgp1N3ZRRI+!}KZ~L+q;j$3F zF^t5YO}Pb?hAUY{s1;_Fy{k{hpp~a>y>LoexBG+a$xS0NbfH;f5-XfI2c7KK*Tpbo zq0~M-l_>$}NgAyHb=yGB+??@*qZI<{Z2}tEd@PvPx1&bA6YIUN0-;}!UpO}Z@a;og zE-_K4Bw}xGhq`{@6;*8AD3J|8U7v#|oBe^)>@l`R=OcJDg@X1puRzYr& z9BLohYH^o0UY&a+N=8&;xOoOK+Ulnv3rJMtPik>B$?xs9hOML|yBPvE^xC1T+-hTn zsbbM$OwIpP5VDiXXr+PbRF|XjA%m`|Z%hR1U&9v?&mV1qU*R0lzO->u`^U`%0l>uN zy+D=axH@*}xG)>haA8rRYz1yk$(U8nV1mIWhWaziHzE>#q=V6Qd1FEZec?kADH(!y zl9)I@f4v5g&#V6Xk7)uz=g-InxutErvu3}v*eMrRH_SNMX8lVUxD73ElTF}wIOLnS z=mt>hHPXV~Cs7TD|F-y!+s-qREoR}j;J~iZFBXY7HlS&`JJv9vlsAZ{H;xenGnZpC z2L(q)*5h=JPgh0&pdmnca9Er$5u5}NT^PcQDGM460Kx-|9xq>&-FNsL6MwMSdMp7P zBzEMjNB?T;2?KIw21ZhwNR{I%Lgt!&w|rQSgKxA{&H&G5XrCI4YeHb#Hd5S32m19? zK@i37ca~AT?f2g20O-;^Soc6eSfaL{)=ErKH-e;P$mW@44*(^@0(^^~xOFzKCMW>T z$;g)v3kxn84kAqgQ2_D&ZB2iw1Fz1|Hr#Pb4LC|1D34`im=de30;>Jg4vdX(-F%{Y zxpM;_MlcC`yA-zq8vz!B;1^sj7ewa`2$AH24;8~9`ABG|r{$@v8=&i%ai3o>p;;5Q zS0}G2-ufrgySiSh3*qTXFHmc!{r9jXa$9oqX@d|>49`oN=p---{2>m-!P#->SRe@~ zj=WxRQaCtCXuO=WSPQ0&`7WuH2=mU$H4?@`J{q0(`C$kwYnwrpUGLGt?z_>Bm}=ts zzU?JSk2qo=5;2|<* zu10I=T%s4@hgX0rokb08Im!KCGQcURd9cMtnL;zHj)eeT&6*4WU@`-|+T=Acl{u?k zqs#)Dx5!9?(wfW=R~Giq4f>%u8J2f8R-rL!D}Vos!%khizE}r<*TLNzx=(YW4GK`m4Op8_7{0sY}pEvvAb`ABP!=e&ZhvX2c-MU z#n+{=PRh3xgjWJ7RMWLU{poM1t$(JK}s)qW$@k}tYD67yt6a_9Ux8ka9G@N0R# zTOo~ICQCleB}g2oQz>pNi@Bu=*g1{jVz#}X5H@Gs#U}{W%ATpO-+{mI>Y!Fhu$YQI zQP(G(SnY4A;2OxZDAM-V8op1|@E$JK{vVHC&a7!>r;4zIRV>&YxaZK~g+EY(LxzyB57pur@OkFZK~>DvS+}f;6e(Fx z1kjMlC8j>}dE1#Ih1frb>-W6cGNtVN`2#b3jC7)lz4vMZL2}};1boxp_t?f`37fH*2!3Yv~I2CB9hrc#=V}P70b#%&=rsyw__ncV4loLwa@iD3r zp9l_(&Cw1KNlU1)fWIR;@Vh^YElp9*F6kwCQwotKOZkUY99M?l*IJdcjKhG8Ak%e6 zG5~}8J6h@#V6}Al#@Vt|T(M%&)630#e45d9V+zU``z>|!fwh|7g?T_dcK?b=dH__< zy^&ZR4!8 z1Ra27`m!CKbB}qnV7=M}2@?C_07q?ep!5@~gAJQrN#}>L!$8=txA{3fpDJ{oqrXMJ zMe3AQFT75neXW4KcoEv@=!7l>RIz~WJIF1CWH)w@oDU)0Sau4+)(8|;a{6+mDP&Q7 z5W<50Fh>-e3&lYKBWe!B4HHE}|LK1J*S97%LU&#)W`B=Ys^u@@Kyvcjwt$Ze0Ezbj zKzemk4~VKI#(aI;!|JfO;SCYsTHMQ^JG+{qdDdl;D#<_7{5z{H+gsj&!3qGNri~y@GH2+W=tfP8B<|Fbc8J zr+8dnCa`Jj^>j#ODRo#_n6PzAGA6Du1R7X7|K$F)><8{h&fV>&Q%`R<|RU( zZ7j@bIaf27dgl83Q@(YWrGrbtuEq>r+P?{c1*OtN)7%l3z}*j8lkNtm%6%(7MQPly$G$J!*)T@WegCT`sNvWPJmj^0A0Q3O0?T_|)I2I`p z91Ijoop=x@2An7m9U=~4L)r@cf||?iLB#9EhJUnp;6HES=ey;7BiP@$^K}=!oKda) z@+gR^B4TAh?rQLfF`3_~!x`Fe8gO_VQ$n#$6lvZIfM&zoahX2mdkEBf?(~X(+kxiJ) zcZO|Ht+Wq!muE$K5Wy|frqpw|w5)xo#PX5FgZJ#lgp&#mRiBOLaYqbVxK1dW`mYG{ z`?i{HFLB`RTidmkB^!S_Yd?pX&olfNHGZKx_MUPuqE-(wVa=)kIeT_+T#GOkGS(Ln zKoA8BI!F?V;KuW!G!U{ScIjMxSp|oexx0A5rgMxsXN~Y`cAA8~?&Ft1$5zmhOJ3;?aF7Oi21)NF~JufA|d1L86Mkx4^j zM0N4HeHEn7@4r9D3bvctkbieOOseaG0aBQ91}n3J5YIrH0>h zKR?T4D^4N$EI&VHCyilT?uFFi^T}P<5`kvu zEg{%XcnE*4#dF+U`2_NVAjCm_+dod9=tipKa&&x|c2^isq+km=M&nsX3gJw8LoEz( z&|GzD$7HY-{g+N}A`Ae?B*XB)(|)9*G&p%CPbqn zIy)f)l`A=h?-HSd*Frr4xHRwpaOv<|-;{m3Ek_MgL*{!*7%(hIIwdThvwj0JXPAfXU*5Wx z`|oq$r0EMWI^oPRdn4M-I#I=ylCp!lh}v<2B$ew7#m4LagI9UTZd)CnE>EGl?gV)E^UynhgAu3546)7 z1_!$|-npCYUy!$G)@)-Kf3at+G*o%ZUp8Y>U`ACPnaJK-q7~x%NeP?MRhg_Eb&ASR z2CDpWf0Y5=K!?Ni>JVXPC3*~o#~bTU&+F~!!+LCo!i!R=6kFTP53T3I*ojfIh-b|K zq|=gNR08NDhA@Sd0f6oQf(sWF&I+Iea@|1v4dMlu(qnQ!$c$rd&e@lEW^;%=X(8Rv z$^9wVo8SyNyr&5P$aNkhOd{gwivV=KBUxA|lSTG3b#z0ASNdX~yeS^;p#;C^o#~#M zHP2b`6EjPG!cEiTl#BsBL&=im*0drN>sF71Gs73`YZS2_@U)IZDw<-#`|PC`=RwJ< z#c;cHQqQrVLUd|_z+?WBeT81xM=6C7952zw4Zj)L98C%>tTF^w1}kX`JVp;9214** zy`NCZCT<(D6QIIwzd*;PpKr}Y^OZKVP5jn_YArc!I(xl?Lg-{pX_|El-4$taM^w;x zGghp74=ajJ^!(aqB?@Pt0px*p2CY)Ck*o1dug+=tSDC zIOk2CvS*6xIwaQ}C#1>RT$>0@_OVn9$JB{-c+_?Vo-oT0K(mX7-t zLI(=SVUdRtIo(e@W!;5;Ll<8G`aHVAbYsLSZ_0*nkIx$}IxfE!708;@NRDF+_}Ny` zqpA^xF~2Z>t7TZ0To&b$ysBt!U=MIkzaw{77oq&M>({yiA3_4srd`5vwy-M*)S)() zUz-bFQn0Ek%TTFUQXFeg5V02*{@7mXkG6$5I*}+XkXbz?#bm-rxK0Gfd%b7qdQv%Z zOYB4b{M?}-Kiya!p-(on_29bUkCZqec&V1jM|85siYk1tKhEy>I3TwIrHN3Tqp{+b z?yS(N#FEq}Dl2R0&)vkV$eMF((D2v)^Nr}=(fgIb81Wm``GE7TBj1xSZEl^`AZ2?u zdvtnT1sYx>x43m#+YMQgze@kF(I=Ya3&RI0AtS;V2a) zF&v=_g|S>xk+Q&PCErWti1V!fN-FOpr#VHmB7aD?IO@x6RmsQDh#A)AxNWagEE2Q| zbBT}nl&?dqHAY9=q)s|aTdx8;v^v4CG+km|KAWR}6>uqjV9J3-(iQe&`?TST4quYx ztCBxE*5A~)dn5F0#NS#8AN??Ta019#1WgSJiIM&n4ldNFIM9#qRp6xF!)uq$_*;lf zrM9U)N+t0e3$Jy;@$>*nzRD+_BsM(^78s#H*a|tU!^&}&ns20uh*JJda8t0~G=pjU zD5PA1kbL9xm-46O+yCwv^DXT^z(R|E%u&eOKlOEsBD*cUT& zi?5jQO8;j_^*8RQU-~mQOC&1Yi7t&d%=W$LK&XzAL#NLcwVE(#u?N~Yz2!hv+9ZLi zS9}CJN!N`phS(56ogqAwO0r59W-R~w)IoW`b$>KVe7u>J3O^j}7*BjqO3Mj(!*{Hk z1igi9;Z4b--*xg~z+ND;W;xAwE4U7MvK_|WsT6z>Cp5ILGm5Jfc7Og{`54aLb?V3X zwD-j3mfaIeD}O7-`R++0)eb;JQj*MK3BY(GeDJ9ct?6*uhGNw})$@2tIep!HjC>N( z3}Ge)B8qJ;{Sp=WMf5~^J^#jz{r*WVj8Y#C3th2i-kF;*C_#-wfQxoBQmmml*7w)^ z!?B|1-M2q#ej^V;>iZy&?L05MW~bNxjX!5j_moDUeY2z;a+-8nAq7bGqu*`AuJxAv zY_wp#_+Ilzs@f1W2AZit1JpA#Z$gYTQZ~MM)g|!^h*4Rq4S@6QX(-k|%M*buA(No$`v!o4$ zrW?S)vl`UpPpxzgU?SVhOj^<7Jc?eP|yiK7=Np5wY^ zmS@$X#TrS`FC7UjpUEzUSA)R*oV)R-Ha0!d8~h$C`~{Ce-WmNt1nb zQ+Yq?*|-pSbPVx%%zN`agNT24gPb>vACqcWvz^^SG`e>%B|Oyg zxxO{B8k|y!!t3&uD!w$xFR5d5#R%yOgMuZnJ2_>Se$KfSG}^0iY-oTy6W6%u#TpXb z?p&sNkYZcNepI&-@O9taCr-S5)}y111wUsxtcfsaOX(!T68m)dcz5_-DBS6`s^-e( zzvrb|{vPmuu+t(Q?B*Qzrpd<{qf=mGvy znZmhm;{{O?=r}Ns@0?DZcb|oog)QHbX~QZw(=mNXzOJ!VOBX94`BO`CwzpqLE1l>6}m=JOk9St7=AJ+UjR({y945<0M@ejk7_A$7bsMzR$QBoIT5*R`1BF26n7OK z36kDZAcOi6`i}1w-0b37-AH9;jWgdn=GD__=wJ}>tVa=J61gCdjH5R-UG=Tkf z>}bm|Zp;6UC7q9{_bY`rq(_L*3pbulIt*Mi?$uswJq~}dzxOKxvjiXE9a-m4_7xEq zlVE~sJe|Z6sx2}HfozuPsLfs`XmETtZqsO;=5f*xh7-v)vDBuY^Kr3J6;7b!h zVjI^T7EFr!f+VOu_fGQDAPSyFpiw}|NOJv}2UJugR)G-$%i9Igy@B1u2Xa$Tzj`Gmr;ZOE$7#h8UiW1q1|?frINb zv@j_2X_h5S>2ar4P~RrJRa{ENkj-~uykQ-&2%{DziG{qG$a&4mz&v>fq# zX<2Z92StKVDI8Vnqq~~yl!yu~ZtN$CVb(Gd&a`HpY-%oX;xD7SL`79#SB@lnIIhuR zv`IX#W~&GH+~+1-@0DadPa&jajQkD9-SrDwwI#h_))JO7vuP?WE&HSsVgI;-5B~fZ z+BjxqP6kP!L6S^`(e|}FnOW=AVUW=zdPe(5DPiIgPT}UCMEEa1P1ZUJ@dAQUp+j?H z;zI#S0K9uIj~*e^>DHbkkD-$dHwXD`#ej&@XSlFh zReHc=En-P*8@ypYLT;+GFc(Ry`Usj6T#H6V2oEQY2}q>$-J^PctcLsIy4_GAv&|Ot??F^HI+ns+a58fT zWoe$c5-pfMsEy0GVk~-e>X4+AT)L=N)KXFiRl6N6n%&gO?cvi8edE-=YjAWbNU!&A z2`kf|w@rDoKY|MZJ<~`b*E~YgwV@6osr|c_)3>17vldq6XvNykuW-@X1Frg)BntYS z!N22PiDiQKlmYnH9-oIe|AJAO1(v^uX(NLn_<%HP60j?%-0#%t>oc!Mnc)8O{=}z5 zxEj)!3Q2^0rr>?KAHGydm;nA@=7)&Rcm&9YGUNa7!XMi--lwc2KUe}64k8CK(Ji^6 z4TulO;I`9CsXJnqDt02hZi?t*$Uo{2)#^f>#`_u17XmcA{*I7(D(_V__H1ap&pdrQ zjY07<1qQa%VVv82-D$n;AoSw()e#g#Ziya}J&CXoF-ZM5p~N5c#Am7Uze>&ms*3Mh z{Btjt?hud?0qK?yq(PAGZlptyln^f60)ljRcOw!K(h`CQNOws|3U998@Av7=Yfs zM}Xwv^5ey+DT6f(D=A;={ICFR$E7->AmnMj)H|+nTKVr;Lh-8`Ro?v#Vt=m9W9vzB zm|!&GctO;(O@1?tWXw-uo%)awqDYi@d3; z5m3EYvTTDSu;%82)uoquzmv0zdYirE6K+hJgW_@>$49b!&mmSdqc^RJL%*}0#i)*E z$1)idsYNw#db5)#;I)-8_oAhuO|}*tc$+F$p;R;QbOJ=Az_(}|<2K|Oufw(Wq@8Ma z?>r*`Vq6${rxyE06UtdlBbr2Ew8F1_Z`D%CBwiLmA8EwXT5v6EA~hviuQ*S&O1A_x zEQA)qcj()kONnKU=2~l|6H65Ui~htKCuTajE5gK#<$AnFKf@1Sbo9b!JG^C&B75Tb zR5PW|$9}TE$Cub*yotAwD_FH2hU6e)d}G(WcGK}58=4zmL+u6?-2iwJBJ|-Ig=@?uHgNGzD$eao9c>n>RddaRR6I_Th=qLv@=II&cR-m+T-kk`Dd<$x}a2AAa%a?!$dp4*3xpnodir{j2X$gS_C6|PmTXnh&QaDJ?!q<|V6ro0 zBACK*4(5VG+Cx+JJipyW8~iDc@2;u4GG&)^ zieOQsU+$=`ZyYo&avR?4>hv|+aB$1_`si`?sVK}yXW=XwmN2L8BuC|J#Q0*hx~>e} z&7RT6A>n8yCFIwg>o=2J41A)R8uo0PnKtf9f!Z;V||+HPOt= z{xSmd@~yy=|1oWN?&e#BysQ@ap=9;zciSp#@GYz?-+>}0OnEyImx*SnhsEn}^myHd z(fF!DbHj&}zu)CX(p|LU{_JRw0(BXgE{nGaH+AV^ zlplo1`%5}oQfF7P{HluHvK{@^GDkMZTERm!Ag)m%*1G_4YW3O@RrY=v6ngIM-^Zu>`SirQ2Z4d6D?yxPd#_MV9i#ABd;?e5G{*BvFz_&E)5h(yKI8|tPCCEd7 zk}JNzuPg(3wXO2=%cy4D5$71puub)vtZ{zWU7JdeJjHz#w+7|W5+*dMw9Z-;Qe=!4A*-vV6jjlJh7K2o9F{) z254!VZEp!z^1sSq=+*fgYGJNip#fBFlrNFr7#RX1Tlf0{MR9K7<*O=>e;zK^80|N% z3lSJyMlJk+EW8RSzU(Ys`*GiR76KQ-IX~=TP{VE9ZB~09$$MAm*lr`t=mb;)=w&(Q zUkw3y0jX})q(RxOC^~e^NAe8hWhmFD57SkR!Iw9)uxQ1~VSQe1Z(4b_%bnpLrr$Xk zHL2*on0REOtr~iWX0_scK~tr?_WLwMNh8?cdHUn23HlT_PUKDoDy1-Zk!wiERugMy z77ighVaM%m2IegbIqQ;TB+d@~OY1?W-}}+e&6wG^)#aIkeQJ`7H|#FRHl}Bspgc!& z0mtLPidxvt2i3i5O>#K3xX)!_akYs!&U+bR1i~+C)#1IbmCkXG!N09o0*XF z<>Iezsu1$CW9zd|sW1#L+#6sCYLKO}?J0047Gn?yPX08~!cOe7y)NR|3%N5KdsYzx zTXQ4)s5I)Jmo3#oNvDB%*|(GkJkiiGFx(upGD{<}MU%9IC$4SCmy1?y64Pa%U@Qv; zOT8Ki#isSKm(4Aj9bLft+IE`BI7bfUhR!b{Qw6CuO;8ZUM%{PAfly}&uy9*;Hp(er zCPb-Gnq^RW%0T32&e({Tly9@%lCsMR?%owkm~w|}aaSudbCMm8S*v@O}4tGSl@{E7XUu{4tX zbz0I8G~8X7D2YA%ZEj?Lp3->G90l>WuY7w%fEl`%w$?7gJ<(4o`nxDYC(#eu=(~{p zJ0S{Lzc8G5!boL7HD|Fk!i2)$+hNQc5NCOFb#q9|#&aA#KQ2dnD?^ztdqZ=E zzW<`SxXNws9fMxvY8~p&i5hyqaED* z87NjCdcK~#-W*jw?d!vI!*2@eb_|L>-PUSuH!K9L{F>tpuhzVO=D0C2f8osXH>y`; zC}p+~dOp45uqlFdU|qzvA_^VZui1#%>3`cLSIA`^n|s85F^zw()M9 z!ai#eF=iEo?mt*d?k=|Yh%UCDJD`9#%T33rznA%$G zRlvtX*8pluQd@;3g4-z1*jJtT)UQeUTs&riF)LS8B^^cje2)V9F4%nSHwh>!ho>vNI~|AvzqecQj)@A?)iaxbfz&mBt@U+CRG5Z0aH6E>!t|mC5YHeSYil1 z7tSMafi;}A=3rbYUJ=qBn7g($1<8Dnc(X;daiJ|ct|(5bPC82#Z5_!5u-1%zec^5_ zBQ%URW??Nl%bUMo8O+Eb{-wCrBWG?iO?V~A$hsoOY_KKEkCJr!;A9X7s|)`vKen7B z<}(KjrKwL(hziUqU=7ljo5z>FuRS(?{?NiXNI6P3@nfwqqmBrQxEQGPmq(d+s?RS+N+^; za~|Jl0OlxAc9yB(AdfrT!fX~!6fXFK`$4!7mhS1_&bQ$Fx@9Ol0=pe=Nd=w`D2yIF zVnrrn!g#x~YSx?Dr7fVkKRxHh6dj6u-&8>On|U`g7K;xj6}=9pYcLUx7%CDLw$rUo z&B1;ml_YBwg=;ZNfH+jBbF$1tGskfXe1?@pjL*mLnN;d~0utptq~~cwFT^J3d+&eY z_<7^*F&)|QTl9VFd9={b-S*Ul;`HIz`@5}^O_YYp`**Tuj~;Km1%&1)$@|eDihD9| zYmJ_W94VHazGRY2dDu}2iIx%J-#I!-?3b~MU@uA&^Byd}Vt!ZpgAuP&itrIX{i_j4$tfAfYLzPS|Z_9C^(#==eQh z*BEa@_PoH}#O#P(dgg<0T!Vhu1B*ulr}TT{R>PxMyHCEM5FglUe^u`KVP`zf+D4z` zh?faL@ui+x^wSUP2Cu1Rf>gL8@S-3OF~rQ8J_^=&ag|X-l8u+@Wm88omzLVCt}ZWn zvyL(2n`z$Fh>Fs>v)-9=@wuZWHBEWjHAH8Cr5oqjIEPbDzcQI4Tb-g2Lt2^B=-tqMwYR{ZtW;1>6 zka*G{NlKZ7Jc9=~GmNvRQ%6VZ=Hd)fAjN>mL9r%Sai+HyYP%-bk?oKkK2^;NcEl)r zrFCkMYsmW*B5O&dd4=}v$HCg~FolIV>{VBWeEa$EX#KRPw+x$kjOJ2=@1Ku}2UIq{ zTEw+`#wS@(jWY;u*32l%*}Q64F~T0MY}eDzw*MM0F{JvwPA*hK{QhWgGCJ1J#qb%` zu5!}A)Ut1El<#V3g(V=?U9d=i3NNQ36z=zCC9FK51|@TlP#*=kD4Mel{ojTHy;tKCdV{oF8lRsOKo6WG@Y21h)fN51NLwk$0ZaX99 zHG8)|dlcy!J)GNPIys2;X}(UpW9Dqe;V&a5E8;^i6V?UlL?^78sbHY6-OOWma_QmG{`8)K_-)K5R6 zMqe((JQ^&Lvn~aaX=r)j0r!eWjO-M6BhsxF1DgRv8Ve zY$8ZmY8Z{qw=tuRS`Ux)q2Zdwr~8F7tWvyXk3UenH>p5dBS9~Eg*X}mS-J1%tp~!9 z(Bhq<6dSi<$wyTED^sw9ua14rB9gNNSEHLZFc&>bK78cUymi=nT^VCEb6d81u*O+E z7wlg#!c9iBMu5zVW<{BD=wEkHw~{g|fhx~4T4d0;pn7Hc^lgI7iX_p1pIlit!>Y=} zeSfkZGM7aiAa2Trud=nQG;Z~lT<2L{zd-ZUcIi^n=I8Yj!HR+OR*5$1n+#qb3G`jF z!W^jDUW2kwkYf=bBJ8BodJ#Hy{d~FIBKRUMstaHsQ)+rQ_Q0I4C$PZKs;KSGigg82 z*~Su^yLc{&X(lUppeOxn1HhdZ(U0OMv?$iJR?~pI06#l$P}E-_2Thhcg^Iu-*9mN+ zR%^}s->7N$(VaH@f*U#Bc=;6TP|&=V7*F&vg&aMg6b7F-i)0-P5h+Y#)uyu$Bd?C7 zjH*ZNH1A$}{dR14w%I3EBoZw51!Aa?XYihXia@t)IlI|9vq3J3B755M61gJ$^Q3g? zBu^oG-jDRy5Yt3P+imwqnxv0U#vF|LU!M=RTYvmP#DcBy2x_@mC)V<1laOGuHA1+7 z7%0cAL1&BEm^}|T{rHXG-K26tZdPjx|JnN;Kn#Lsaq`*D33o&g$!>g>MM2{r#Sp`I zUvlrNUq*KPI&EPrXr_>@Tj_|eGp9%bvn0ZdT9LiJ<2vJOv+1&((N3dSW) zQaTYx7U(?XK{-g~15V-L)C_t8jddgZi=7m8A{`IvlqH{P$A+#V^#&>Am0GRDua6y0 z7keY=_U?g8cu8ND25rgZcH7*z5E)N2nOx?jTZ#!M^H+TueW^HEDg2m+=Y6Yta|tX{ zj(rGPg3i^e>!!yMx%%cH<0^$HMsfJF#8h#`mClHU#A_4e{SEiADSo%6(32ZXEgTeI zlC1}yk+v{hVG@eEmo=kiH)sum#Ue>XT4=^Z5Du+0w4!x8oSy_6EDxh{zOf85B%2=x zJWSZ5TX1lPG6W%MAZMmxnVB4DG72SEfj97GQhH)B&=UPk@xiy}E!LB0Yw$)^Oy=x! zI`L<2P85CTXV`Nl?a89JzppUo$yU#QJo`SpK92K5T%MGEA!fYiO}k$g^m~X~<{3HK z7e;*PYL!G{uAK)0F()f9L*Xe(*=~lwkJ_z$rRZ`2En$-n7i{z}(KL0_F?Hq5WQH_k z4+Wjd3sTy$_+O;c*V`TdH6k! z@gbiTjqz<>A%Ew5p6}iF1&CAHyuG9yio}irn@F}}_C#Sky{}P2F8$oZM-rQVbW$4M zUGWyH@s(ZLiD&y*u1OT=CleHwx)nFF7m-yp6~0FvEp>xL!Qf;sE#bM~igo+5&Jy^YqY`FIO5}+`ZXis8lWCdT4SN`Y9N9ZnE=yp<-((iXYs4)O0PbkkLenm2Big)e=CdMgut_kif**FMfY!RQzqc#5}K`%6}5+B_wW!)1EjKi zS6u0^lUak~q|GDn=u`5vw$6ULxa<@4dRt*L6p3=4Ot46x zgJBtW(DYon{s~hkDi*1FmwFqnQMeF&Pxb1MOKai_PMb*4!S$@=x?)oj`sBeiV9IES zf$q1=)yz63=9dBcyo<;1>_=!a^4(nNUzZYa_uEVEy>)|?T{ zuKRdB{DsObquWii`p3;0^)3T&4m~?WGAK(%gJL@gx1%RMD64RbXqhqomg#7CmZW!b zO?dD^43}lbYu||B+3whc?^kan`3|N11_gb?Hr-Nxd5LE<8x_#puxX_YBdPO!C(IlZ zKZ4Yn2gAGv4=5(bNjRfFNWU$6E-)_sp4OeLXJZG$SjCxAUDWjJ!P$=bGS%^kdY{Yp z_!6N){IF)`S-fCQ7OHip9yYWOBtbOyMIF-ex*W%oqq#&>3?%d`hB*gICXH}9nj+-O z1HNtXmiaeqVGd?^)~>ZqPW_O(?gQ7XNo7?|)DNrF5UM^g3sa!CP=ink^`h2Y`<|zV zvbBD^+uRV5K417IPEp0EDWV`=FXxkT9lLZ(*p+seDY!3mGa$}NC3r5T25aBL9)1>z z#f{0Uuv*MG<(u}{|Jfb}%G5yiOO;5&$yB@g-wboD#eC1{zn=={!0#F4> z>WvV2M()IIXi5Q3%S__~EmZS8TGvD_Ti^z2dtyruWUEK011ZOl-#mTEq4BWzm@u!> zSF^kXiPJpqiJV8EcFe^EzDw>a0}O@4y1hr={lM-oWO%I{gxylSF0N8rE`03ho*I6Arm z0ATOnZfy=qU==>lfB*orz`b{g1MWe@-@obq`vIE&1+}6D=!qI!32NgAnvgggY+hP|3Z9F@Uycz4!9-{Nmz~1g*#eP|NQh0w za&xw){!53RxVW0xgSwTAtIMCx{>u(%gP(avY%C)E+e|2chcJM<0g)bCxR|>j6eKtt zjxfPMNhC%Dkb?S~KOln^C}9{1C?H@z1Eemlu79e4I>N0#SP2vWBN+Y#LID$^93t=D z-x3v+NPp>2u)d2IsQ;6|`hOg3fz=p5z5H)xKo=Sy>EQ28kQER_?{2BK9F-46wSmzCcJsLj1q50sua^->C%wfUOY#1j_(G zP67b*biuqUH*;50P#!y2{E?sv(ERr?2aePKH~tWre^QT)y^AZTeP9CrcO@w7VI_}ZZ9odz&xUtrVbW}S_q4}q6!!pT+MF!CmIA{r{?@S{iQtN;r!bo X{*Uq}pG#VT`(ML None: + """Test basic audio isolation.""" + client = ElevenLabs() + audio_file = open(DEFAULT_VOICE_FILE, "rb") + try: + audio_stream = client.audio_isolation.audio_isolation(audio=audio_file) + audio = b"".join(chunk for chunk in audio_stream) + assert isinstance(audio, bytes), "Combined audio should be bytes" + if not IN_GITHUB: + play(audio) + finally: + audio_file.close() + + +def test_audio_isolation_as_stream(): + """Test audio isolation with streaming.""" + client = ElevenLabs() + audio_file = open(DEFAULT_VOICE_FILE, "rb") + try: + audio_stream = client.audio_isolation.audio_isolation_stream(audio=audio_file) + audio = b"".join(chunk for chunk in audio_stream) + assert isinstance(audio, bytes), "Combined audio should be bytes" + if not IN_GITHUB: + play(audio) + finally: + audio_file.close() diff --git a/tests/test_models.py b/tests/test_models.py index 6bbb2dac..84f85d1c 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -2,7 +2,7 @@ from elevenlabs.client import ElevenLabs -def test_models(): +def test_models_get_all(): client = ElevenLabs() models = client.models.get_all() assert len(models) > 0 diff --git a/tests/test_sts.py b/tests/test_sts.py new file mode 100644 index 00000000..530e6fce --- /dev/null +++ b/tests/test_sts.py @@ -0,0 +1,31 @@ +from elevenlabs import play +from elevenlabs.client import ElevenLabs + +from .utils import IN_GITHUB, DEFAULT_VOICE, DEFAULT_VOICE_FILE + + +def test_sts() -> None: + """Test basic speech-to-speech generation.""" + client = ElevenLabs() + audio_file = open(DEFAULT_VOICE_FILE, "rb") + try: + audio_stream = client.speech_to_speech.convert(voice_id=DEFAULT_VOICE, audio=audio_file) + audio = b"".join(chunk for chunk in audio_stream) + assert isinstance(audio, bytes), "Combined audio should be bytes" + if not IN_GITHUB: + play(audio) + finally: + audio_file.close() + + +def test_sts_as_stream(): + client = ElevenLabs() + audio_file = open(DEFAULT_VOICE_FILE, "rb") + try: + audio_stream = client.speech_to_speech.convert_as_stream(voice_id=DEFAULT_VOICE, audio=audio_file) + audio = b"".join(chunk for chunk in audio_stream) + assert isinstance(audio, bytes), "Combined audio should be bytes" + if not IN_GITHUB: + play(audio) + finally: + audio_file.close() diff --git a/tests/test_ttsfx.py b/tests/test_ttsfx.py new file mode 100644 index 00000000..30de6fc1 --- /dev/null +++ b/tests/test_ttsfx.py @@ -0,0 +1,17 @@ +from elevenlabs import play +from elevenlabs.client import ElevenLabs + +from .utils import IN_GITHUB + + +def test_text_to_sound_effects_convert() -> None: + """Test basic sound-effect generation.""" + client = ElevenLabs() + audio_generator = client.text_to_sound_effects.convert( + text="Hypnotic throbbing sound effect. Increases in imtensity.", + duration_seconds=2, + ) + audio = b"".join(audio_generator) + assert isinstance(audio, bytes), "TTS should return bytes" + if not IN_GITHUB: + play(audio) diff --git a/tests/test_ttv.py b/tests/test_ttv.py new file mode 100644 index 00000000..5d08682b --- /dev/null +++ b/tests/test_ttv.py @@ -0,0 +1,17 @@ +from elevenlabs.client import ElevenLabs + + +def test_voice_preview_generation(): + """Test generating voice previews from description.""" + client = ElevenLabs() + + # Test parameters + description = "A warm and friendly female voice with a slight British accent, speaking clearly and professionally" + sample_text = "This is a test message that needs to be at least one hundred characters long to meet the API requirements. Here it is." + + previews = client.text_to_voice.create_previews(voice_description=description, text=sample_text) + + assert hasattr(previews, "previews"), "Response should have 'previews' attribute" + assert len(previews.previews) > 0, "Should receive at least one preview" + assert hasattr(previews.previews[0], "generated_voice_id"), "Preview should contain generated_voice_id" + assert hasattr(previews.previews[0], "audio_base_64"), "Preview should contain audio_base_64" diff --git a/tests/utils.py b/tests/utils.py index 9467e440..f2061ef0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -9,6 +9,7 @@ DEFAULT_VOICE = "21m00Tcm4TlvDq8ikWAM" DEFAULT_TEXT = "Hello" DEFAULT_MODEL = "eleven_multilingual_v2" +DEFAULT_VOICE_FILE = "tests/fixtures/voice_sample.mp3" def as_local_files(urls: Sequence[str]) -> Generator[str, None, None]: From 05a457d7bb34c95b20c572e9dd0820f5c05756a1 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Sat, 14 Dec 2024 12:35:26 +0000 Subject: [PATCH 31/45] SDK regeneration --- .gitignore | 1 - pyproject.toml | 2 +- reference.md | 650 ++++++- src/elevenlabs/__init__.py | 50 +- src/elevenlabs/chapters/client.py | 167 ++ src/elevenlabs/conversational_ai/client.py | 1647 +++++++++++++---- src/elevenlabs/core/client_wrapper.py | 2 +- src/elevenlabs/dubbing/__init__.py | 4 +- src/elevenlabs/dubbing/client.py | 16 +- src/elevenlabs/dubbing/types/__init__.py | 6 +- ..._transcript_for_dub_request_format_type.py | 5 + ...t_language_code_get_request_format_type.py | 7 - src/elevenlabs/projects/client.py | 167 -- .../pronunciation_dictionary/client.py | 16 +- src/elevenlabs/types/__init__.py | 46 + .../types/agent_config_override_config.py | 22 + .../types/agent_platform_settings.py | 6 + ...versation_config_client_override_config.py | 22 + ...versation_history_feedback_common_model.py | 22 + ...versation_history_metadata_common_model.py | 2 + ...rsation_history_transcript_common_model.py | 3 + ...versation_initiation_client_data_config.py | 21 + .../create_phone_number_response_model.py | 22 + src/elevenlabs/types/embed_config.py | 2 + .../types/get_phone_number_response_model.py | 37 + .../types/moderation_status_response_model.py | 28 + ...ion_status_response_model_safety_status.py | 5 + ...on_status_response_model_warning_status.py | 5 + .../types/phone_number_agent_info.py | 20 + src/elevenlabs/types/privacy_config.py | 19 + .../project_creation_meta_response_model.py | 23 + ...ect_creation_meta_response_model_status.py | 7 + ...oject_creation_meta_response_model_type.py | 5 + .../types/project_extended_response_model.py | 2 + src/elevenlabs/types/project_response.py | 2 + src/elevenlabs/types/project_state.py | 2 +- .../types/prompt_agent_override_config.py | 19 + src/elevenlabs/types/safety.py | 25 + src/elevenlabs/types/safety_evaluation.py | 28 + src/elevenlabs/types/safety_rule.py | 18 + src/elevenlabs/types/telephony_provider.py | 5 + ...s_conversational_config_override_config.py | 19 + src/elevenlabs/types/user.py | 1 + src/elevenlabs/types/user_feedback.py | 21 + src/elevenlabs/types/user_feedback_score.py | 5 + src/elevenlabs/types/widget_feedback_mode.py | 5 + 46 files changed, 2611 insertions(+), 598 deletions(-) create mode 100644 src/elevenlabs/dubbing/types/dubbing_get_transcript_for_dub_request_format_type.py delete mode 100644 src/elevenlabs/dubbing/types/get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type.py create mode 100644 src/elevenlabs/types/agent_config_override_config.py create mode 100644 src/elevenlabs/types/conversation_config_client_override_config.py create mode 100644 src/elevenlabs/types/conversation_history_feedback_common_model.py create mode 100644 src/elevenlabs/types/conversation_initiation_client_data_config.py create mode 100644 src/elevenlabs/types/create_phone_number_response_model.py create mode 100644 src/elevenlabs/types/get_phone_number_response_model.py create mode 100644 src/elevenlabs/types/moderation_status_response_model.py create mode 100644 src/elevenlabs/types/moderation_status_response_model_safety_status.py create mode 100644 src/elevenlabs/types/moderation_status_response_model_warning_status.py create mode 100644 src/elevenlabs/types/phone_number_agent_info.py create mode 100644 src/elevenlabs/types/privacy_config.py create mode 100644 src/elevenlabs/types/project_creation_meta_response_model.py create mode 100644 src/elevenlabs/types/project_creation_meta_response_model_status.py create mode 100644 src/elevenlabs/types/project_creation_meta_response_model_type.py create mode 100644 src/elevenlabs/types/prompt_agent_override_config.py create mode 100644 src/elevenlabs/types/safety.py create mode 100644 src/elevenlabs/types/safety_evaluation.py create mode 100644 src/elevenlabs/types/safety_rule.py create mode 100644 src/elevenlabs/types/telephony_provider.py create mode 100644 src/elevenlabs/types/tts_conversational_config_override_config.py create mode 100644 src/elevenlabs/types/user_feedback.py create mode 100644 src/elevenlabs/types/user_feedback_score.py create mode 100644 src/elevenlabs/types/widget_feedback_mode.py diff --git a/.gitignore b/.gitignore index 83bacf16..0da665fe 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,3 @@ dist/ __pycache__/ poetry.toml .ruff_cache/ -.DS_Store diff --git a/pyproject.toml b/pyproject.toml index 86161b22..493e7fd8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.13.5" +version = "1.20.0" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 45231398..6823081c 100644 --- a/reference.md +++ b/reference.md @@ -393,7 +393,7 @@ client.history.download( ## TextToSoundEffects ## AudioIsolation -## Samples +## samples
    client.samples.delete(...)
    @@ -3012,7 +3012,7 @@ client.voices.get_a_profile_page(
    -## Projects +## projects
    client.projects.get_all()
    @@ -3819,7 +3819,7 @@ client.projects.stream_archive(
    -
    client.projects.add_chapter_to_a_project(...) +
    client.projects.update_pronunciation_dictionaries(...)
    @@ -3831,7 +3831,7 @@ client.projects.stream_archive(
    -Creates a new chapter either as blank or from a URL. +Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
    @@ -3846,14 +3846,19 @@ Creates a new chapter either as blank or from a URL.
    ```python -from elevenlabs import ElevenLabs +from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.add_chapter_to_a_project( +client.projects.update_pronunciation_dictionaries( project_id="21m00Tcm4TlvDq8ikWAM", - name="name", + pronunciation_dictionary_locators=[ + PronunciationDictionaryVersionLocator( + pronunciation_dictionary_id="pronunciation_dictionary_id", + version_id="version_id", + ) + ], ) ``` @@ -3878,15 +3883,7 @@ client.projects.add_chapter_to_a_project(
    -**name:** `str` — The name of the chapter, used for identification only. - -
    -
    - -
    -
    - -**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. +**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
    @@ -3906,7 +3903,8 @@ client.projects.add_chapter_to_a_project(
    -
    client.projects.update_pronunciation_dictionaries(...) +## Chapters +
    client.chapters.get_all(...)
    @@ -3918,7 +3916,7 @@ client.projects.add_chapter_to_a_project(
    -Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does. +Returns a list of your chapters for a project together and its metadata.
    @@ -3933,19 +3931,13 @@ Updates the set of pronunciation dictionaries acting on a project. This will aut
    ```python -from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.update_pronunciation_dictionaries( +client.chapters.get_all( project_id="21m00Tcm4TlvDq8ikWAM", - pronunciation_dictionary_locators=[ - PronunciationDictionaryVersionLocator( - pronunciation_dictionary_id="pronunciation_dictionary_id", - version_id="version_id", - ) - ], ) ``` @@ -3970,14 +3962,6 @@ client.projects.update_pronunciation_dictionaries(
    -**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. - -
    -
    - -
    -
    - **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -3990,8 +3974,7 @@ client.projects.update_pronunciation_dictionaries(
    -## Chapters -
    client.chapters.get_all(...) +
    client.chapters.get(...)
    @@ -4003,7 +3986,7 @@ client.projects.update_pronunciation_dictionaries(
    -Returns a list of your chapters for a project together and its metadata. +Returns information about a specific chapter.
    @@ -4023,8 +4006,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.get_all( +client.chapters.get( project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -4049,6 +4033,14 @@ client.chapters.get_all(
    +**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. + +
    +
    + +
    +
    + **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
    @@ -4061,7 +4053,7 @@ client.chapters.get_all(
    -
    client.chapters.get(...) +
    client.chapters.delete(...)
    @@ -4073,7 +4065,7 @@ client.chapters.get_all(
    -Returns information about a specific chapter. +Delete a chapter by its chapter_id.
    @@ -4093,7 +4085,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.get( +client.chapters.delete( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", ) @@ -4140,7 +4132,7 @@ client.chapters.get(
    -
    client.chapters.delete(...) +
    client.chapters.create(...)
    @@ -4152,7 +4144,7 @@ client.chapters.get(
    -Delete a chapter by its chapter_id. +Creates a new chapter either as blank or from a URL.
    @@ -4172,9 +4164,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.chapters.delete( +client.chapters.create( project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", + name="name", ) ``` @@ -4199,7 +4191,15 @@ client.chapters.delete(
    -**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project. +**name:** `str` — The name of the chapter, used for identification only. + +
    +
    + +
    +
    + +**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
    @@ -4841,9 +4841,7 @@ client.dubbing.get_transcript_for_dub(
    -**format_type:** `typing.Optional[ - GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType -]` — Format to use for the subtitle file, either 'srt' or 'webvtt' +**format_type:** `typing.Optional[DubbingGetTranscriptForDubRequestFormatType]` — Format to use for the subtitle file, either 'srt' or 'webvtt'
    @@ -4863,7 +4861,7 @@ client.dubbing.get_transcript_for_dub(
    -## Models +## models
    client.models.get_all()
    @@ -5278,7 +5276,7 @@ typing.Optional[core.File]` — See core.File for more documentation
    -
    client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...) +
    client.pronunciation_dictionary.add_rules(...)
    @@ -5313,7 +5311,7 @@ from elevenlabs.pronunciation_dictionary import ( client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( +client.pronunciation_dictionary.add_rules( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rules=[ PronunciationDictionaryRule_Phoneme( @@ -5370,7 +5368,7 @@ List of pronunciation rules. Rule can be either:
    -
    client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...) +
    client.pronunciation_dictionary.remove_rules(...)
    @@ -5402,7 +5400,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( +client.pronunciation_dictionary.remove_rules( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rule_strings=["rule_strings"], ) @@ -6306,7 +6304,7 @@ client.conversational_ai.update_agent(
    -
    client.conversational_ai.get_widget(...) +
    client.conversational_ai.get_agent_widget(...)
    @@ -6338,7 +6336,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_widget( +client.conversational_ai.get_agent_widget( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -6384,7 +6382,7 @@ client.conversational_ai.get_widget(
    -
    client.conversational_ai.get_link(...) +
    client.conversational_ai.get_agent_link(...)
    @@ -6416,7 +6414,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_link( +client.conversational_ai.get_agent_link( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -6454,7 +6452,7 @@ client.conversational_ai.get_link(
    -
    client.conversational_ai.post_avatar(...) +
    client.conversational_ai.post_agent_avatar(...)
    @@ -6486,7 +6484,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.post_avatar( +client.conversational_ai.post_agent_avatar( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -6534,7 +6532,7 @@ core.File` — See core.File for more documentation
    -
    client.conversational_ai.get_knowledge_base_document(...) +
    client.conversational_ai.get_agent_knowledge_base_document_by_id(...)
    @@ -6566,7 +6564,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_knowledge_base_document( +client.conversational_ai.get_agent_knowledge_base_document_by_id( agent_id="21m00Tcm4TlvDq8ikWAM", documentation_id="21m00Tcm4TlvDq8ikWAM", ) @@ -6701,7 +6699,7 @@ client.conversational_ai.add_agent_secret(
    -
    client.conversational_ai.create_knowledge_base_document(...) +
    client.conversational_ai.add_to_agent_knowledge_base(...)
    @@ -6733,7 +6731,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.create_knowledge_base_document( +client.conversational_ai.add_to_agent_knowledge_base( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -7033,6 +7031,76 @@ client.conversational_ai.get_conversation(
    + +
    +
    + +
    client.conversational_ai.delete_conversation(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Delete a particular conversation +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.delete_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**conversation_id:** `str` — The id of the conversation you're taking the action on. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + +
    @@ -7107,3 +7175,457 @@ client.conversational_ai.get_conversation_audio(
    +
    client.conversational_ai.post_conversation_feedback(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Send the feedback for the given conversation +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.post_conversation_feedback( + conversation_id="21m00Tcm4TlvDq8ikWAM", + feedback="like", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**conversation_id:** `str` — The id of the conversation you're taking the action on. + +
    +
    + +
    +
    + +**feedback:** `UserFeedbackScore` — Either 'like' or 'dislike' to indicate the feedback for the conversation. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.conversational_ai.create_phone_number(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Import Phone Number from Twilio configuration +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.create_phone_number( + phone_number="phone_number", + label="label", + sid="sid", + token="token", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**phone_number:** `str` — Phone number + +
    +
    + +
    +
    + +**label:** `str` — Label for the phone number + +
    +
    + +
    +
    + +**sid:** `str` — Twilio Account SID + +
    +
    + +
    +
    + +**token:** `str` — Twilio Token + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.conversational_ai.get_phone_number(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Retrieve Phone Number details by ID +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.get_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.conversational_ai.delete_phone_number(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Delete Phone Number by ID +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.delete_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.conversational_ai.update_phone_number(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Update Phone Number details by ID +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.update_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. + +
    +
    + +
    +
    + +**agent_id:** `typing.Optional[str]` + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.conversational_ai.get_phone_numbers() +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Retrieve all Phone Numbers +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.get_phone_numbers() + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
    +
    +
    +
    + + +
    +
    +
    + diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py index 4b7a806b..1b45287f 100644 --- a/src/elevenlabs/__init__.py +++ b/src/elevenlabs/__init__.py @@ -14,6 +14,7 @@ AgentBan, AgentConfig, AgentConfigOverride, + AgentConfigOverrideConfig, AgentMetadataResponseModel, AgentPlatformSettings, AgentSummaryResponseModel, @@ -43,14 +44,17 @@ ConversationChargingCommonModel, ConversationConfig, ConversationConfigClientOverride, + ConversationConfigClientOverrideConfig, ConversationHistoryAnalysisCommonModel, ConversationHistoryEvaluationCriteriaResultCommonModel, + ConversationHistoryFeedbackCommonModel, ConversationHistoryMetadataCommonModel, ConversationHistoryTranscriptCommonModel, ConversationHistoryTranscriptCommonModelRole, ConversationHistoryTranscriptToolCallCommonModel, ConversationHistoryTranscriptToolResultCommonModel, ConversationInitiationClientData, + ConversationInitiationClientDataConfig, ConversationSignedUrlResponseModel, ConversationSummaryResponseModel, ConversationSummaryResponseModelStatus, @@ -58,6 +62,7 @@ ConversationTokenPurpose, ConversationalConfig, CreateAgentResponseModel, + CreatePhoneNumberResponseModel, Currency, CustomLlm, DataCollectionResultCommonModel, @@ -90,6 +95,7 @@ GetKnowledgeBaseReponseModel, GetKnowledgeBaseReponseModelType, GetLibraryVoicesResponse, + GetPhoneNumberResponseModel, GetProjectsResponse, GetPronunciationDictionariesMetadataResponseModel, GetPronunciationDictionaryMetadataResponse, @@ -115,13 +121,21 @@ Model, ModelRatesResponseModel, ModelResponseModelConcurrencyGroup, + ModerationStatusResponseModel, + ModerationStatusResponseModelSafetyStatus, + ModerationStatusResponseModelWarningStatus, ObjectJsonSchemaProperty, ObjectJsonSchemaPropertyPropertiesValue, OptimizeStreamingLatency, OrbAvatar, OutputFormat, + PhoneNumberAgentInfo, PostAgentAvatarResponseModel, + PrivacyConfig, ProfilePageResponseModel, + ProjectCreationMetaResponseModel, + ProjectCreationMetaResponseModelStatus, + ProjectCreationMetaResponseModelType, ProjectExtendedResponseModel, ProjectExtendedResponseModelAccessLevel, ProjectExtendedResponseModelApplyTextNormalization, @@ -139,6 +153,7 @@ ProjectState, PromptAgent, PromptAgentOverride, + PromptAgentOverrideConfig, PromptAgentToolsItem, PromptAgentToolsItem_Client, PromptAgentToolsItem_Webhook, @@ -154,6 +169,9 @@ RecordingResponse, RemovePronunciationDictionaryRulesResponseModel, ReviewStatus, + Safety, + SafetyEvaluation, + SafetyRule, SpeechHistoryItemResponse, SpeechHistoryItemResponseModelSource, SpeechHistoryItemResponseModelVoiceCategory, @@ -163,9 +181,11 @@ SubscriptionResponseModelCharacterRefreshPeriod, SubscriptionResponseModelCurrency, SubscriptionStatus, + TelephonyProvider, TextToSpeechAsStreamRequest, TtsConversationalConfig, TtsConversationalConfigOverride, + TtsConversationalConfigOverrideConfig, TtsConversationalModel, TtsOptimizeStreamingLatency, TtsOutputFormat, @@ -174,6 +194,8 @@ UrlAvatar, UsageCharactersResponseModel, User, + UserFeedback, + UserFeedbackScore, ValidationError, ValidationErrorLocItem, VerificationAttemptResponse, @@ -195,6 +217,7 @@ WebhookToolApiSchemaConfigMethod, WebhookToolApiSchemaConfigRequestHeadersValue, WebhookToolConfig, + WidgetFeedbackMode, ) from .errors import UnprocessableEntityError from . import ( @@ -224,7 +247,7 @@ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, ) -from .dubbing import GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType +from .dubbing import DubbingGetTranscriptForDubRequestFormatType from .environment import ElevenLabsEnvironment from .history import HistoryGetAllRequestSource from .play import play, save, stream @@ -262,6 +285,7 @@ "AgentBan", "AgentConfig", "AgentConfigOverride", + "AgentConfigOverrideConfig", "AgentMetadataResponseModel", "AgentPlatformSettings", "AgentSummaryResponseModel", @@ -300,14 +324,17 @@ "ConversationChargingCommonModel", "ConversationConfig", "ConversationConfigClientOverride", + "ConversationConfigClientOverrideConfig", "ConversationHistoryAnalysisCommonModel", "ConversationHistoryEvaluationCriteriaResultCommonModel", + "ConversationHistoryFeedbackCommonModel", "ConversationHistoryMetadataCommonModel", "ConversationHistoryTranscriptCommonModel", "ConversationHistoryTranscriptCommonModelRole", "ConversationHistoryTranscriptToolCallCommonModel", "ConversationHistoryTranscriptToolResultCommonModel", "ConversationInitiationClientData", + "ConversationInitiationClientDataConfig", "ConversationSignedUrlResponseModel", "ConversationSummaryResponseModel", "ConversationSummaryResponseModelStatus", @@ -315,10 +342,12 @@ "ConversationTokenPurpose", "ConversationalConfig", "CreateAgentResponseModel", + "CreatePhoneNumberResponseModel", "Currency", "CustomLlm", "DataCollectionResultCommonModel", "DoDubbingResponse", + "DubbingGetTranscriptForDubRequestFormatType", "DubbingMetadataResponse", "EditProjectResponseModel", "ElevenLabs", @@ -349,11 +378,11 @@ "GetKnowledgeBaseReponseModel", "GetKnowledgeBaseReponseModelType", "GetLibraryVoicesResponse", + "GetPhoneNumberResponseModel", "GetProjectsResponse", "GetPronunciationDictionariesMetadataResponseModel", "GetPronunciationDictionaryMetadataResponse", "GetSpeechHistoryResponse", - "GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType", "GetVoicesResponse", "History", "HistoryAlignmentResponseModel", @@ -376,13 +405,21 @@ "Model", "ModelRatesResponseModel", "ModelResponseModelConcurrencyGroup", + "ModerationStatusResponseModel", + "ModerationStatusResponseModelSafetyStatus", + "ModerationStatusResponseModelWarningStatus", "ObjectJsonSchemaProperty", "ObjectJsonSchemaPropertyPropertiesValue", "OptimizeStreamingLatency", "OrbAvatar", "OutputFormat", + "PhoneNumberAgentInfo", "PostAgentAvatarResponseModel", + "PrivacyConfig", "ProfilePageResponseModel", + "ProjectCreationMetaResponseModel", + "ProjectCreationMetaResponseModelStatus", + "ProjectCreationMetaResponseModelType", "ProjectExtendedResponseModel", "ProjectExtendedResponseModelAccessLevel", "ProjectExtendedResponseModelApplyTextNormalization", @@ -402,6 +439,7 @@ "ProjectsAddRequestTargetAudience", "PromptAgent", "PromptAgentOverride", + "PromptAgentOverrideConfig", "PromptAgentToolsItem", "PromptAgentToolsItem_Client", "PromptAgentToolsItem_Webhook", @@ -421,6 +459,9 @@ "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ReviewStatus", + "Safety", + "SafetyEvaluation", + "SafetyRule", "SpeechHistoryItemResponse", "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", @@ -430,6 +471,7 @@ "SubscriptionResponseModelCharacterRefreshPeriod", "SubscriptionResponseModelCurrency", "SubscriptionStatus", + "TelephonyProvider", "TextToSpeechAsStreamRequest", "TextToSpeechStreamWithTimestampsResponse", "TextToSpeechStreamWithTimestampsResponseAlignment", @@ -437,6 +479,7 @@ "TextToVoiceCreatePreviewsRequestOutputFormat", "TtsConversationalConfig", "TtsConversationalConfigOverride", + "TtsConversationalConfigOverrideConfig", "TtsConversationalModel", "TtsOptimizeStreamingLatency", "TtsOutputFormat", @@ -446,6 +489,8 @@ "UrlAvatar", "UsageCharactersResponseModel", "User", + "UserFeedback", + "UserFeedbackScore", "ValidationError", "ValidationErrorLocItem", "VerificationAttemptResponse", @@ -467,6 +512,7 @@ "WebhookToolApiSchemaConfigMethod", "WebhookToolApiSchemaConfigRequestHeadersValue", "WebhookToolConfig", + "WidgetFeedbackMode", "__version__", "audio_isolation", "audio_native", diff --git a/src/elevenlabs/chapters/client.py b/src/elevenlabs/chapters/client.py index cd01796a..9c064995 100644 --- a/src/elevenlabs/chapters/client.py +++ b/src/elevenlabs/chapters/client.py @@ -11,6 +11,7 @@ from json.decoder import JSONDecodeError from ..core.api_error import ApiError from ..types.chapter_response import ChapterResponse +from ..types.add_chapter_response_model import AddChapterResponseModel from ..types.chapter_snapshots_response import ChapterSnapshotsResponse from ..core.client_wrapper import AsyncClientWrapper @@ -207,6 +208,85 @@ def delete( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def create( + self, + project_id: str, + *, + name: str, + from_url: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddChapterResponseModel: + """ + Creates a new chapter either as blank or from a URL. + + Parameters + ---------- + project_id : str + The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + + name : str + The name of the chapter, used for identification only. + + from_url : typing.Optional[str] + An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddChapterResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.chapters.create( + project_id="21m00Tcm4TlvDq8ikWAM", + name="name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/chapters/add", + method="POST", + json={ + "name": name, + "from_url": from_url, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddChapterResponseModel, + construct_type( + type_=AddChapterResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def convert( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: @@ -623,6 +703,93 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + async def create( + self, + project_id: str, + *, + name: str, + from_url: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddChapterResponseModel: + """ + Creates a new chapter either as blank or from a URL. + + Parameters + ---------- + project_id : str + The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. + + name : str + The name of the chapter, used for identification only. + + from_url : typing.Optional[str] + An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddChapterResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.chapters.create( + project_id="21m00Tcm4TlvDq8ikWAM", + name="name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/chapters/add", + method="POST", + json={ + "name": name, + "from_url": from_url, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddChapterResponseModel, + construct_type( + type_=AddChapterResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def convert( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: diff --git a/src/elevenlabs/conversational_ai/client.py b/src/elevenlabs/conversational_ai/client.py index 1ef2a544..d3686ba5 100644 --- a/src/elevenlabs/conversational_ai/client.py +++ b/src/elevenlabs/conversational_ai/client.py @@ -29,6 +29,9 @@ from ..types.evaluation_success_result import EvaluationSuccessResult from ..types.get_conversations_page_response_model import GetConversationsPageResponseModel from ..types.get_conversation_response_model import GetConversationResponseModel +from ..types.user_feedback_score import UserFeedbackScore +from ..types.create_phone_number_response_model import CreatePhoneNumberResponseModel +from ..types.get_phone_number_response_model import GetPhoneNumberResponseModel from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -396,7 +399,7 @@ def update_agent( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_widget( + def get_agent_widget( self, agent_id: str, *, @@ -429,7 +432,7 @@ def get_widget( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.get_widget( + client.conversational_ai.get_agent_widget( agent_id="21m00Tcm4TlvDq8ikWAM", ) """ @@ -465,7 +468,7 @@ def get_widget( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_link( + def get_agent_link( self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> GetAgentLinkResponseModel: """ @@ -491,7 +494,7 @@ def get_link( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.get_link( + client.conversational_ai.get_agent_link( agent_id="21m00Tcm4TlvDq8ikWAM", ) """ @@ -524,7 +527,7 @@ def get_link( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_avatar( + def post_agent_avatar( self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None ) -> PostAgentAvatarResponseModel: """ @@ -553,7 +556,7 @@ def post_avatar( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.post_avatar( + client.conversational_ai.post_agent_avatar( agent_id="21m00Tcm4TlvDq8ikWAM", ) """ @@ -591,7 +594,7 @@ def post_avatar( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_knowledge_base_document( + def get_agent_knowledge_base_document_by_id( self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> GetKnowledgeBaseReponseModel: """ @@ -620,7 +623,7 @@ def get_knowledge_base_document( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.get_knowledge_base_document( + client.conversational_ai.get_agent_knowledge_base_document_by_id( agent_id="21m00Tcm4TlvDq8ikWAM", documentation_id="21m00Tcm4TlvDq8ikWAM", ) @@ -729,7 +732,7 @@ def add_agent_secret( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def create_knowledge_base_document( + def add_to_agent_knowledge_base( self, agent_id: str, *, @@ -766,7 +769,7 @@ def create_knowledge_base_document( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.create_knowledge_base_document( + client.conversational_ai.add_to_agent_knowledge_base( agent_id="21m00Tcm4TlvDq8ikWAM", ) """ @@ -1018,6 +1021,65 @@ def get_conversation( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def delete_conversation( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Delete a particular conversation + + Parameters + ---------- + conversation_id : str + The id of the conversation you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.delete_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def get_conversation_audio( self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> None: @@ -1054,7 +1116,959 @@ def get_conversation_audio( ) try: if 200 <= _response.status_code < 300: - return + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_conversation_feedback( + self, + conversation_id: str, + *, + feedback: UserFeedbackScore, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Send the feedback for the given conversation + + Parameters + ---------- + conversation_id : str + The id of the conversation you're taking the action on. + + feedback : UserFeedbackScore + Either 'like' or 'dislike' to indicate the feedback for the conversation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.post_conversation_feedback( + conversation_id="21m00Tcm4TlvDq8ikWAM", + feedback="like", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/feedback", + method="POST", + json={ + "feedback": feedback, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_phone_number( + self, + *, + phone_number: str, + label: str, + sid: str, + token: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreatePhoneNumberResponseModel: + """ + Import Phone Number from Twilio configuration + + Parameters + ---------- + phone_number : str + Phone number + + label : str + Label for the phone number + + sid : str + Twilio Account SID + + token : str + Twilio Token + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreatePhoneNumberResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.create_phone_number( + phone_number="phone_number", + label="label", + sid="sid", + token="token", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/convai/phone-numbers/create", + method="POST", + json={ + "phone_number": phone_number, + "label": label, + "sid": sid, + "token": token, + "provider": "twilio", + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreatePhoneNumberResponseModel, + construct_type( + type_=CreatePhoneNumberResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_phone_number( + self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetPhoneNumberResponseModel: + """ + Retrieve Phone Number details by ID + + Parameters + ---------- + phone_number_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetPhoneNumberResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetPhoneNumberResponseModel, + construct_type( + type_=GetPhoneNumberResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_phone_number( + self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Delete Phone Number by ID + + Parameters + ---------- + phone_number_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.delete_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_phone_number( + self, + phone_number_id: str, + *, + agent_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetPhoneNumberResponseModel: + """ + Update Phone Number details by ID + + Parameters + ---------- + phone_number_id : str + The id of an agent. This is returned on agent creation. + + agent_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetPhoneNumberResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.update_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", + method="PATCH", + json={ + "agent_id": agent_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetPhoneNumberResponseModel, + construct_type( + type_=GetPhoneNumberResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_phone_numbers( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[GetPhoneNumberResponseModel]: + """ + Retrieve all Phone Numbers + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[GetPhoneNumberResponseModel] + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.get_phone_numbers() + """ + _response = self._client_wrapper.httpx_client.request( + "v1/convai/phone-numbers/", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[GetPhoneNumberResponseModel], + construct_type( + type_=typing.List[GetPhoneNumberResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncConversationalAiClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_signed_url( + self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> ConversationSignedUrlResponseModel: + """ + Get a signed url to start a conversation with an agent with an agent that requires authorization + + Parameters + ---------- + agent_id : str + The id of the agent you're taking the action on. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConversationSignedUrlResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_signed_url( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/conversation/get_signed_url", + method="GET", + params={ + "agent_id": agent_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConversationSignedUrlResponseModel, + construct_type( + type_=ConversationSignedUrlResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_agent( + self, + *, + conversation_config: ConversationalConfig, + platform_settings: typing.Optional[AgentPlatformSettings] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentResponseModel: + """ + Create an agent from a config object + + Parameters + ---------- + conversation_config : ConversationalConfig + Conversation configuration for an agent + + platform_settings : typing.Optional[AgentPlatformSettings] + Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + + name : typing.Optional[str] + A name to make the agent easier to find + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs, ConversationalConfig + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.create_agent( + conversation_config=ConversationalConfig(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/agents/create", + method="POST", + json={ + "conversation_config": convert_and_respect_annotation_metadata( + object_=conversation_config, annotation=ConversationalConfig, direction="write" + ), + "platform_settings": convert_and_respect_annotation_metadata( + object_=platform_settings, annotation=AgentPlatformSettings, direction="write" + ), + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAgentResponseModel, + construct_type( + type_=CreateAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetAgentResponseModel: + """ + Retrieve config for an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentResponseModel, + construct_type( + type_=GetAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Dict[str, str]: + """ + Delete an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, str] + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.delete_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, str], + construct_type( + type_=typing.Dict[str, str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_agent( + self, + agent_id: str, + *, + conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + secrets: typing.Optional[ + typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem] + ] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentResponseModel: + """ + Patches an Agent settings + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Conversation configuration for an agent + + platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + + secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]] + A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones + + name : typing.Optional[str] + A name to make the agent easier to find + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.update_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="PATCH", + json={ + "conversation_config": conversation_config, + "platform_settings": platform_settings, + "secrets": convert_and_respect_annotation_metadata( + object_=secrets, + annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem], + direction="write", + ), + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentResponseModel, + construct_type( + type_=GetAgentResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_agent_widget( + self, + agent_id: str, + *, + conversation_signature: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentEmbedResponseModel: + """ + Retrieve the widget configuration for an agent + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + conversation_signature : typing.Optional[str] + An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentEmbedResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_agent_widget( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget", + method="GET", + params={ + "conversation_signature": conversation_signature, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentEmbedResponseModel, + construct_type( + type_=GetAgentEmbedResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_agent_link( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetAgentLinkResponseModel: + """ + Get the current link used to share the agent with others + + Parameters + ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetAgentLinkResponseModel + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.get_agent_link( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}/link", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetAgentLinkResponseModel, + construct_type( + type_=GetAgentLinkResponseModel, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -1070,28 +2084,26 @@ def get_conversation_audio( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - -class AsyncConversationalAiClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def get_signed_url( - self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ConversationSignedUrlResponseModel: + async def post_agent_avatar( + self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None + ) -> PostAgentAvatarResponseModel: """ - Get a signed url to start a conversation with an agent with an agent that requires authorization + Sets the avatar for an agent displayed in the widget Parameters ---------- agent_id : str - The id of the agent you're taking the action on. + The id of an agent. This is returned on agent creation. + + avatar_file : core.File + See core.File for more documentation request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ConversationSignedUrlResponseModel + PostAgentAvatarResponseModel Successful Response Examples @@ -1106,7 +2118,7 @@ async def get_signed_url( async def main() -> None: - await client.conversational_ai.get_signed_url( + await client.conversational_ai.post_agent_avatar( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -1114,19 +2126,21 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/conversation/get_signed_url", - method="GET", - params={ - "agent_id": agent_id, + f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar", + method="POST", + data={}, + files={ + "avatar_file": avatar_file, }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ConversationSignedUrlResponseModel, + PostAgentAvatarResponseModel, construct_type( - type_=ConversationSignedUrlResponseModel, # type: ignore + type_=PostAgentAvatarResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1145,41 +2159,33 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def create_agent( - self, - *, - conversation_config: ConversationalConfig, - platform_settings: typing.Optional[AgentPlatformSettings] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateAgentResponseModel: + async def get_agent_knowledge_base_document_by_id( + self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetKnowledgeBaseReponseModel: """ - Create an agent from a config object + Get details about a specific documentation making up the agent's knowledge base Parameters ---------- - conversation_config : ConversationalConfig - Conversation configuration for an agent - - platform_settings : typing.Optional[AgentPlatformSettings] - Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + agent_id : str + The id of an agent. This is returned on agent creation. - name : typing.Optional[str] - A name to make the agent easier to find + documentation_id : str + The id of a document from the agent's knowledge base. This is returned on document addition. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - CreateAgentResponseModel + GetKnowledgeBaseReponseModel Successful Response Examples -------- import asyncio - from elevenlabs import AsyncElevenLabs, ConversationalConfig + from elevenlabs import AsyncElevenLabs client = AsyncElevenLabs( api_key="YOUR_API_KEY", @@ -1187,37 +2193,25 @@ async def create_agent( async def main() -> None: - await client.conversational_ai.create_agent( - conversation_config=ConversationalConfig(), + await client.conversational_ai.get_agent_knowledge_base_document_by_id( + agent_id="21m00Tcm4TlvDq8ikWAM", + documentation_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/agents/create", - method="POST", - json={ - "conversation_config": convert_and_respect_annotation_metadata( - object_=conversation_config, annotation=ConversationalConfig, direction="write" - ), - "platform_settings": convert_and_respect_annotation_metadata( - object_=platform_settings, annotation=AgentPlatformSettings, direction="write" - ), - "name": name, - }, - headers={ - "content-type": "application/json", - }, + f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}", + method="GET", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CreateAgentResponseModel, + GetKnowledgeBaseReponseModel, construct_type( - type_=CreateAgentResponseModel, # type: ignore + type_=GetKnowledgeBaseReponseModel, # type: ignore object_=_response.json(), ), ) @@ -1236,23 +2230,29 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_agent( - self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetAgentResponseModel: + async def add_agent_secret( + self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None + ) -> AddAgentSecretResponseModel: """ - Retrieve config for an agent + Uploads a file or reference a webpage for the agent to use as part of it's knowledge base Parameters ---------- agent_id : str The id of an agent. This is returned on agent creation. + name : str + A name to help identify a particular agent secret + + secret_value : str + A value to be encrypted and used by the agent + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetAgentResponseModel + AddAgentSecretResponseModel Successful Response Examples @@ -1267,24 +2267,34 @@ async def get_agent( async def main() -> None: - await client.conversational_ai.get_agent( + await client.conversational_ai.add_agent_secret( agent_id="21m00Tcm4TlvDq8ikWAM", + name="name", + secret_value="secret_value", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}", - method="GET", + f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret", + method="POST", + json={ + "name": name, + "secret_value": secret_value, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetAgentResponseModel, + AddAgentSecretResponseModel, construct_type( - type_=GetAgentResponseModel, # type: ignore + type_=AddAgentSecretResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1303,23 +2313,34 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def delete_agent( - self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Dict[str, str]: + async def add_to_agent_knowledge_base( + self, + agent_id: str, + *, + url: typing.Optional[str] = OMIT, + file: typing.Optional[core.File] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddKnowledgeBaseResponseModel: """ - Delete an agent + Uploads a file or reference a webpage for the agent to use as part of it's knowledge base Parameters ---------- agent_id : str The id of an agent. This is returned on agent creation. + url : typing.Optional[str] + URL to a page of documentation that the agent will have access to in order to interact with users. + + file : typing.Optional[core.File] + See core.File for more documentation + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Dict[str, str] + AddKnowledgeBaseResponseModel Successful Response Examples @@ -1334,7 +2355,7 @@ async def delete_agent( async def main() -> None: - await client.conversational_ai.delete_agent( + await client.conversational_ai.add_to_agent_knowledge_base( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -1342,16 +2363,23 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}", - method="DELETE", + f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base", + method="POST", + data={ + "url": url, + }, + files={ + "file": file, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Dict[str, str], + AddKnowledgeBaseResponseModel, construct_type( - type_=typing.Dict[str, str], # type: ignore + type_=AddKnowledgeBaseResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1370,44 +2398,34 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def update_agent( + async def get_agents( self, - agent_id: str, *, - conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - secrets: typing.Optional[ - typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem] - ] = OMIT, - name: typing.Optional[str] = OMIT, + cursor: typing.Optional[str] = None, + page_size: typing.Optional[int] = None, + search: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> GetAgentResponseModel: + ) -> GetAgentsPageResponseModel: """ - Patches an Agent settings + Returns a page of your agents and their metadata. Parameters ---------- - agent_id : str - The id of an agent. This is returned on agent creation. - - conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Conversation configuration for an agent - - platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + cursor : typing.Optional[str] + Used for fetching next page. Cursor is returned in the response. - secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]] - A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones + page_size : typing.Optional[int] + How many Agents to return at maximum. Can not exceed 100, defaults to 30. - name : typing.Optional[str] - A name to make the agent easier to find + search : typing.Optional[str] + Search by agents name. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetAgentResponseModel + GetAgentsPageResponseModel Successful Response Examples @@ -1422,38 +2440,27 @@ async def update_agent( async def main() -> None: - await client.conversational_ai.update_agent( - agent_id="21m00Tcm4TlvDq8ikWAM", - ) + await client.conversational_ai.get_agents() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}", - method="PATCH", - json={ - "conversation_config": conversation_config, - "platform_settings": platform_settings, - "secrets": convert_and_respect_annotation_metadata( - object_=secrets, - annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem], - direction="write", - ), - "name": name, - }, - headers={ - "content-type": "application/json", + "v1/convai/agents", + method="GET", + params={ + "cursor": cursor, + "page_size": page_size, + "search": search, }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetAgentResponseModel, + GetAgentsPageResponseModel, construct_type( - type_=GetAgentResponseModel, # type: ignore + type_=GetAgentsPageResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1472,30 +2479,38 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_widget( + async def get_conversations( self, - agent_id: str, *, - conversation_signature: typing.Optional[str] = None, + cursor: typing.Optional[str] = None, + agent_id: typing.Optional[str] = None, + call_successful: typing.Optional[EvaluationSuccessResult] = None, + page_size: typing.Optional[int] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> GetAgentEmbedResponseModel: + ) -> GetConversationsPageResponseModel: """ - Retrieve the widget configuration for an agent + Get all conversations of agents that user owns. With option to restrict to a specific agent. Parameters ---------- - agent_id : str - The id of an agent. This is returned on agent creation. + cursor : typing.Optional[str] + Used for fetching next page. Cursor is returned in the response. - conversation_signature : typing.Optional[str] - An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint + agent_id : typing.Optional[str] + The id of the agent you're taking the action on. + + call_successful : typing.Optional[EvaluationSuccessResult] + The result of the success evaluation + + page_size : typing.Optional[int] + How many conversations to return at maximum. Can not exceed 100, defaults to 30. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetAgentEmbedResponseModel + GetConversationsPageResponseModel Successful Response Examples @@ -1510,7 +2525,7 @@ async def get_widget( async def main() -> None: - await client.conversational_ai.get_widget( + await client.conversational_ai.get_conversations( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -1518,19 +2533,22 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget", + "v1/convai/conversations", method="GET", params={ - "conversation_signature": conversation_signature, + "cursor": cursor, + "agent_id": agent_id, + "call_successful": call_successful, + "page_size": page_size, }, request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetAgentEmbedResponseModel, + GetConversationsPageResponseModel, construct_type( - type_=GetAgentEmbedResponseModel, # type: ignore + type_=GetConversationsPageResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1549,23 +2567,23 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_link( - self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetAgentLinkResponseModel: + async def get_conversation( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetConversationResponseModel: """ - Get the current link used to share the agent with others + Get the details of a particular conversation Parameters ---------- - agent_id : str - The id of an agent. This is returned on agent creation. + conversation_id : str + The id of the conversation you're taking the action on. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetAgentLinkResponseModel + GetConversationResponseModel Successful Response Examples @@ -1580,24 +2598,24 @@ async def get_link( async def main() -> None: - await client.conversational_ai.get_link( - agent_id="21m00Tcm4TlvDq8ikWAM", + await client.conversational_ai.get_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/link", + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}", method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetAgentLinkResponseModel, + GetConversationResponseModel, construct_type( - type_=GetAgentLinkResponseModel, # type: ignore + type_=GetConversationResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1616,26 +2634,23 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_avatar( - self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None - ) -> PostAgentAvatarResponseModel: + async def delete_conversation( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: """ - Sets the avatar for an agent displayed in the widget + Delete a particular conversation Parameters ---------- - agent_id : str - The id of an agent. This is returned on agent creation. - - avatar_file : core.File - See core.File for more documentation + conversation_id : str + The id of the conversation you're taking the action on. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - PostAgentAvatarResponseModel + typing.Optional[typing.Any] Successful Response Examples @@ -1650,29 +2665,24 @@ async def post_avatar( async def main() -> None: - await client.conversational_ai.post_avatar( - agent_id="21m00Tcm4TlvDq8ikWAM", + await client.conversational_ai.delete_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar", - method="POST", - data={}, - files={ - "avatar_file": avatar_file, - }, + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}", + method="DELETE", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - PostAgentAvatarResponseModel, + typing.Optional[typing.Any], construct_type( - type_=PostAgentAvatarResponseModel, # type: ignore + type_=typing.Optional[typing.Any], # type: ignore object_=_response.json(), ), ) @@ -1691,27 +2701,23 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_knowledge_base_document( - self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetKnowledgeBaseReponseModel: + async def get_conversation_audio( + self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Get details about a specific documentation making up the agent's knowledge base + Get the audio recording of a particular conversation Parameters ---------- - agent_id : str - The id of an agent. This is returned on agent creation. - - documentation_id : str - The id of a document from the agent's knowledge base. This is returned on document addition. + conversation_id : str + The id of the conversation you're taking the action on. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetKnowledgeBaseReponseModel - Successful Response + None Examples -------- @@ -1725,28 +2731,21 @@ async def get_knowledge_base_document( async def main() -> None: - await client.conversational_ai.get_knowledge_base_document( - agent_id="21m00Tcm4TlvDq8ikWAM", - documentation_id="21m00Tcm4TlvDq8ikWAM", + await client.conversational_ai.get_conversation_audio( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}", + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio", method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - GetKnowledgeBaseReponseModel, - construct_type( - type_=GetKnowledgeBaseReponseModel, # type: ignore - object_=_response.json(), - ), - ) + return if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -1762,29 +2761,30 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def add_agent_secret( - self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None - ) -> AddAgentSecretResponseModel: + async def post_conversation_feedback( + self, + conversation_id: str, + *, + feedback: UserFeedbackScore, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: """ - Uploads a file or reference a webpage for the agent to use as part of it's knowledge base + Send the feedback for the given conversation Parameters ---------- - agent_id : str - The id of an agent. This is returned on agent creation. - - name : str - A name to help identify a particular agent secret + conversation_id : str + The id of the conversation you're taking the action on. - secret_value : str - A value to be encrypted and used by the agent + feedback : UserFeedbackScore + Either 'like' or 'dislike' to indicate the feedback for the conversation. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - AddAgentSecretResponseModel + typing.Optional[typing.Any] Successful Response Examples @@ -1799,21 +2799,19 @@ async def add_agent_secret( async def main() -> None: - await client.conversational_ai.add_agent_secret( - agent_id="21m00Tcm4TlvDq8ikWAM", - name="name", - secret_value="secret_value", + await client.conversational_ai.post_conversation_feedback( + conversation_id="21m00Tcm4TlvDq8ikWAM", + feedback="like", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret", + f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/feedback", method="POST", json={ - "name": name, - "secret_value": secret_value, + "feedback": feedback, }, headers={ "content-type": "application/json", @@ -1824,9 +2822,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - AddAgentSecretResponseModel, + typing.Optional[typing.Any], construct_type( - type_=AddAgentSecretResponseModel, # type: ignore + type_=typing.Optional[typing.Any], # type: ignore object_=_response.json(), ), ) @@ -1845,34 +2843,38 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def create_knowledge_base_document( + async def create_phone_number( self, - agent_id: str, *, - url: typing.Optional[str] = OMIT, - file: typing.Optional[core.File] = OMIT, + phone_number: str, + label: str, + sid: str, + token: str, request_options: typing.Optional[RequestOptions] = None, - ) -> AddKnowledgeBaseResponseModel: + ) -> CreatePhoneNumberResponseModel: """ - Uploads a file or reference a webpage for the agent to use as part of it's knowledge base + Import Phone Number from Twilio configuration Parameters ---------- - agent_id : str - The id of an agent. This is returned on agent creation. + phone_number : str + Phone number - url : typing.Optional[str] - URL to a page of documentation that the agent will have access to in order to interact with users. + label : str + Label for the phone number - file : typing.Optional[core.File] - See core.File for more documentation + sid : str + Twilio Account SID + + token : str + Twilio Token request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - AddKnowledgeBaseResponseModel + CreatePhoneNumberResponseModel Successful Response Examples @@ -1887,21 +2889,28 @@ async def create_knowledge_base_document( async def main() -> None: - await client.conversational_ai.create_knowledge_base_document( - agent_id="21m00Tcm4TlvDq8ikWAM", + await client.conversational_ai.create_phone_number( + phone_number="phone_number", + label="label", + sid="sid", + token="token", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base", + "v1/convai/phone-numbers/create", method="POST", - data={ - "url": url, + json={ + "phone_number": phone_number, + "label": label, + "sid": sid, + "token": token, + "provider": "twilio", }, - files={ - "file": file, + headers={ + "content-type": "application/json", }, request_options=request_options, omit=OMIT, @@ -1909,9 +2918,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - AddKnowledgeBaseResponseModel, + CreatePhoneNumberResponseModel, construct_type( - type_=AddKnowledgeBaseResponseModel, # type: ignore + type_=CreatePhoneNumberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1930,34 +2939,23 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_agents( - self, - *, - cursor: typing.Optional[str] = None, - page_size: typing.Optional[int] = None, - search: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> GetAgentsPageResponseModel: + async def get_phone_number( + self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetPhoneNumberResponseModel: """ - Returns a page of your agents and their metadata. + Retrieve Phone Number details by ID Parameters ---------- - cursor : typing.Optional[str] - Used for fetching next page. Cursor is returned in the response. - - page_size : typing.Optional[int] - How many Agents to return at maximum. Can not exceed 100, defaults to 30. - - search : typing.Optional[str] - Search by agents name. + phone_number_id : str + The id of an agent. This is returned on agent creation. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetAgentsPageResponseModel + GetPhoneNumberResponseModel Successful Response Examples @@ -1972,27 +2970,24 @@ async def get_agents( async def main() -> None: - await client.conversational_ai.get_agents() + await client.conversational_ai.get_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/agents", + f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", method="GET", - params={ - "cursor": cursor, - "page_size": page_size, - "search": search, - }, request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetAgentsPageResponseModel, + GetPhoneNumberResponseModel, construct_type( - type_=GetAgentsPageResponseModel, # type: ignore + type_=GetPhoneNumberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2011,38 +3006,23 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_conversations( - self, - *, - cursor: typing.Optional[str] = None, - agent_id: typing.Optional[str] = None, - call_successful: typing.Optional[EvaluationSuccessResult] = None, - page_size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> GetConversationsPageResponseModel: + async def delete_phone_number( + self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: """ - Get all conversations of agents that user owns. With option to restrict to a specific agent. + Delete Phone Number by ID Parameters ---------- - cursor : typing.Optional[str] - Used for fetching next page. Cursor is returned in the response. - - agent_id : typing.Optional[str] - The id of the agent you're taking the action on. - - call_successful : typing.Optional[EvaluationSuccessResult] - The result of the success evaluation - - page_size : typing.Optional[int] - How many conversations to return at maximum. Can not exceed 100, defaults to 30. + phone_number_id : str + The id of an agent. This is returned on agent creation. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetConversationsPageResponseModel + typing.Optional[typing.Any] Successful Response Examples @@ -2057,30 +3037,24 @@ async def get_conversations( async def main() -> None: - await client.conversational_ai.get_conversations( - agent_id="21m00Tcm4TlvDq8ikWAM", + await client.conversational_ai.delete_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/conversations", - method="GET", - params={ - "cursor": cursor, - "agent_id": agent_id, - "call_successful": call_successful, - "page_size": page_size, - }, + f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", + method="DELETE", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetConversationsPageResponseModel, + typing.Optional[typing.Any], construct_type( - type_=GetConversationsPageResponseModel, # type: ignore + type_=typing.Optional[typing.Any], # type: ignore object_=_response.json(), ), ) @@ -2099,23 +3073,29 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_conversation( - self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetConversationResponseModel: + async def update_phone_number( + self, + phone_number_id: str, + *, + agent_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetPhoneNumberResponseModel: """ - Get the details of a particular conversation + Update Phone Number details by ID Parameters ---------- - conversation_id : str - The id of the conversation you're taking the action on. + phone_number_id : str + The id of an agent. This is returned on agent creation. + + agent_id : typing.Optional[str] request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetConversationResponseModel + GetPhoneNumberResponseModel Successful Response Examples @@ -2130,24 +3110,31 @@ async def get_conversation( async def main() -> None: - await client.conversational_ai.get_conversation( - conversation_id="21m00Tcm4TlvDq8ikWAM", + await client.conversational_ai.update_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/conversations/{jsonable_encoder(conversation_id)}", - method="GET", + f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", + method="PATCH", + json={ + "agent_id": agent_id, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetConversationResponseModel, + GetPhoneNumberResponseModel, construct_type( - type_=GetConversationResponseModel, # type: ignore + type_=GetPhoneNumberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2166,23 +3153,21 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_conversation_audio( - self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + async def get_phone_numbers( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[GetPhoneNumberResponseModel]: """ - Get the audio recording of a particular conversation + Retrieve all Phone Numbers Parameters ---------- - conversation_id : str - The id of the conversation you're taking the action on. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + typing.List[GetPhoneNumberResponseModel] + Successful Response Examples -------- @@ -2196,21 +3181,25 @@ async def get_conversation_audio( async def main() -> None: - await client.conversational_ai.get_conversation_audio( - conversation_id="21m00Tcm4TlvDq8ikWAM", - ) + await client.conversational_ai.get_phone_numbers() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio", + "v1/convai/phone-numbers/", method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return + return typing.cast( + typing.List[GetPhoneNumberResponseModel], + construct_type( + type_=typing.List[GetPhoneNumberResponseModel], # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 8c4081c2..7f749879 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.13.5", + "X-Fern-SDK-Version": "1.20.0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key diff --git a/src/elevenlabs/dubbing/__init__.py b/src/elevenlabs/dubbing/__init__.py index 6e3590aa..54967ecf 100644 --- a/src/elevenlabs/dubbing/__init__.py +++ b/src/elevenlabs/dubbing/__init__.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from .types import GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType +from .types import DubbingGetTranscriptForDubRequestFormatType -__all__ = ["GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType"] +__all__ = ["DubbingGetTranscriptForDubRequestFormatType"] diff --git a/src/elevenlabs/dubbing/client.py b/src/elevenlabs/dubbing/client.py index 5a429299..97be0722 100644 --- a/src/elevenlabs/dubbing/client.py +++ b/src/elevenlabs/dubbing/client.py @@ -12,9 +12,7 @@ from ..core.api_error import ApiError from ..types.dubbing_metadata_response import DubbingMetadataResponse from ..core.jsonable_encoder import jsonable_encoder -from .types.get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type import ( - GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType, -) +from .types.dubbing_get_transcript_for_dub_request_format_type import DubbingGetTranscriptForDubRequestFormatType from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -320,9 +318,7 @@ def get_transcript_for_dub( dubbing_id: str, language_code: str, *, - format_type: typing.Optional[ - GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType - ] = None, + format_type: typing.Optional[DubbingGetTranscriptForDubRequestFormatType] = None, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ @@ -336,7 +332,7 @@ def get_transcript_for_dub( language_code : str ID of the language. - format_type : typing.Optional[GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType] + format_type : typing.Optional[DubbingGetTranscriptForDubRequestFormatType] Format to use for the subtitle file, either 'srt' or 'webvtt' request_options : typing.Optional[RequestOptions] @@ -715,9 +711,7 @@ async def get_transcript_for_dub( dubbing_id: str, language_code: str, *, - format_type: typing.Optional[ - GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType - ] = None, + format_type: typing.Optional[DubbingGetTranscriptForDubRequestFormatType] = None, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ @@ -731,7 +725,7 @@ async def get_transcript_for_dub( language_code : str ID of the language. - format_type : typing.Optional[GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType] + format_type : typing.Optional[DubbingGetTranscriptForDubRequestFormatType] Format to use for the subtitle file, either 'srt' or 'webvtt' request_options : typing.Optional[RequestOptions] diff --git a/src/elevenlabs/dubbing/types/__init__.py b/src/elevenlabs/dubbing/types/__init__.py index 6692b1f7..0c667fb2 100644 --- a/src/elevenlabs/dubbing/types/__init__.py +++ b/src/elevenlabs/dubbing/types/__init__.py @@ -1,7 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from .get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type import ( - GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType, -) +from .dubbing_get_transcript_for_dub_request_format_type import DubbingGetTranscriptForDubRequestFormatType -__all__ = ["GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType"] +__all__ = ["DubbingGetTranscriptForDubRequestFormatType"] diff --git a/src/elevenlabs/dubbing/types/dubbing_get_transcript_for_dub_request_format_type.py b/src/elevenlabs/dubbing/types/dubbing_get_transcript_for_dub_request_format_type.py new file mode 100644 index 00000000..ed4d2ea2 --- /dev/null +++ b/src/elevenlabs/dubbing/types/dubbing_get_transcript_for_dub_request_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DubbingGetTranscriptForDubRequestFormatType = typing.Union[typing.Literal["srt", "webvtt"], typing.Any] diff --git a/src/elevenlabs/dubbing/types/get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type.py b/src/elevenlabs/dubbing/types/get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type.py deleted file mode 100644 index 7c651c60..00000000 --- a/src/elevenlabs/dubbing/types/get_transcript_for_dub_v_1_dubbing_dubbing_id_transcript_language_code_get_request_format_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType = typing.Union[ - typing.Literal["srt", "webvtt"], typing.Any -] diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py index a286da04..e0917f60 100644 --- a/src/elevenlabs/projects/client.py +++ b/src/elevenlabs/projects/client.py @@ -17,7 +17,6 @@ from ..core.jsonable_encoder import jsonable_encoder from ..types.edit_project_response_model import EditProjectResponseModel from ..types.project_snapshots_response import ProjectSnapshotsResponse -from ..types.add_chapter_response_model import AddChapterResponseModel from ..types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator from ..core.serialization import convert_and_respect_annotation_metadata from ..core.client_wrapper import AsyncClientWrapper @@ -730,85 +729,6 @@ def stream_archive( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def add_chapter_to_a_project( - self, - project_id: str, - *, - name: str, - from_url: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AddChapterResponseModel: - """ - Creates a new chapter either as blank or from a URL. - - Parameters - ---------- - project_id : str - The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. - - name : str - The name of the chapter, used for identification only. - - from_url : typing.Optional[str] - An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AddChapterResponseModel - Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.projects.add_chapter_to_a_project( - project_id="21m00Tcm4TlvDq8ikWAM", - name="name", - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"v1/projects/{jsonable_encoder(project_id)}/chapters/add", - method="POST", - json={ - "name": name, - "from_url": from_url, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - AddChapterResponseModel, - construct_type( - type_=AddChapterResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - def update_pronunciation_dictionaries( self, project_id: str, @@ -1661,93 +1581,6 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def add_chapter_to_a_project( - self, - project_id: str, - *, - name: str, - from_url: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AddChapterResponseModel: - """ - Creates a new chapter either as blank or from a URL. - - Parameters - ---------- - project_id : str - The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects. - - name : str - The name of the chapter, used for identification only. - - from_url : typing.Optional[str] - An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AddChapterResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.projects.add_chapter_to_a_project( - project_id="21m00Tcm4TlvDq8ikWAM", - name="name", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/projects/{jsonable_encoder(project_id)}/chapters/add", - method="POST", - json={ - "name": name, - "from_url": from_url, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - AddChapterResponseModel, - construct_type( - type_=AddChapterResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - async def update_pronunciation_dictionaries( self, project_id: str, diff --git a/src/elevenlabs/pronunciation_dictionary/client.py b/src/elevenlabs/pronunciation_dictionary/client.py index 40356617..8efb5601 100644 --- a/src/elevenlabs/pronunciation_dictionary/client.py +++ b/src/elevenlabs/pronunciation_dictionary/client.py @@ -115,7 +115,7 @@ def add_from_file( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def add_rules_to_the_pronunciation_dictionary( + def add_rules( self, pronunciation_dictionary_id: str, *, @@ -153,7 +153,7 @@ def add_rules_to_the_pronunciation_dictionary( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( + client.pronunciation_dictionary.add_rules( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rules=[ PronunciationDictionaryRule_Phoneme( @@ -202,7 +202,7 @@ def add_rules_to_the_pronunciation_dictionary( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def remove_rules_from_the_pronunciation_dictionary( + def remove_rules( self, pronunciation_dictionary_id: str, *, @@ -235,7 +235,7 @@ def remove_rules_from_the_pronunciation_dictionary( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( + client.pronunciation_dictionary.remove_rules( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rule_strings=["rule_strings"], ) @@ -558,7 +558,7 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def add_rules_to_the_pronunciation_dictionary( + async def add_rules( self, pronunciation_dictionary_id: str, *, @@ -601,7 +601,7 @@ async def add_rules_to_the_pronunciation_dictionary( async def main() -> None: - await client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( + await client.pronunciation_dictionary.add_rules( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rules=[ PronunciationDictionaryRule_Phoneme( @@ -653,7 +653,7 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def remove_rules_from_the_pronunciation_dictionary( + async def remove_rules( self, pronunciation_dictionary_id: str, *, @@ -691,7 +691,7 @@ async def remove_rules_from_the_pronunciation_dictionary( async def main() -> None: - await client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( + await client.pronunciation_dictionary.remove_rules( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rule_strings=["rule_strings"], ) diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py index 389cd770..86ca5244 100644 --- a/src/elevenlabs/types/__init__.py +++ b/src/elevenlabs/types/__init__.py @@ -13,6 +13,7 @@ from .agent_ban import AgentBan from .agent_config import AgentConfig from .agent_config_override import AgentConfigOverride +from .agent_config_override_config import AgentConfigOverrideConfig from .agent_metadata_response_model import AgentMetadataResponseModel from .agent_platform_settings import AgentPlatformSettings from .agent_summary_response_model import AgentSummaryResponseModel @@ -42,16 +43,19 @@ from .conversation_charging_common_model import ConversationChargingCommonModel from .conversation_config import ConversationConfig from .conversation_config_client_override import ConversationConfigClientOverride +from .conversation_config_client_override_config import ConversationConfigClientOverrideConfig from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel from .conversation_history_evaluation_criteria_result_common_model import ( ConversationHistoryEvaluationCriteriaResultCommonModel, ) +from .conversation_history_feedback_common_model import ConversationHistoryFeedbackCommonModel from .conversation_history_metadata_common_model import ConversationHistoryMetadataCommonModel from .conversation_history_transcript_common_model import ConversationHistoryTranscriptCommonModel from .conversation_history_transcript_common_model_role import ConversationHistoryTranscriptCommonModelRole from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel from .conversation_initiation_client_data import ConversationInitiationClientData +from .conversation_initiation_client_data_config import ConversationInitiationClientDataConfig from .conversation_signed_url_response_model import ConversationSignedUrlResponseModel from .conversation_summary_response_model import ConversationSummaryResponseModel from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus @@ -59,6 +63,7 @@ from .conversation_token_purpose import ConversationTokenPurpose from .conversational_config import ConversationalConfig from .create_agent_response_model import CreateAgentResponseModel +from .create_phone_number_response_model import CreatePhoneNumberResponseModel from .currency import Currency from .custom_llm import CustomLlm from .data_collection_result_common_model import DataCollectionResultCommonModel @@ -95,6 +100,7 @@ from .get_knowledge_base_reponse_model import GetKnowledgeBaseReponseModel from .get_knowledge_base_reponse_model_type import GetKnowledgeBaseReponseModelType from .get_library_voices_response import GetLibraryVoicesResponse +from .get_phone_number_response_model import GetPhoneNumberResponseModel from .get_projects_response import GetProjectsResponse from .get_pronunciation_dictionaries_metadata_response_model import GetPronunciationDictionariesMetadataResponseModel from .get_pronunciation_dictionary_metadata_response import GetPronunciationDictionaryMetadataResponse @@ -120,13 +126,21 @@ from .model import Model from .model_rates_response_model import ModelRatesResponseModel from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup +from .moderation_status_response_model import ModerationStatusResponseModel +from .moderation_status_response_model_safety_status import ModerationStatusResponseModelSafetyStatus +from .moderation_status_response_model_warning_status import ModerationStatusResponseModelWarningStatus from .object_json_schema_property import ObjectJsonSchemaProperty from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue from .optimize_streaming_latency import OptimizeStreamingLatency from .orb_avatar import OrbAvatar from .output_format import OutputFormat +from .phone_number_agent_info import PhoneNumberAgentInfo from .post_agent_avatar_response_model import PostAgentAvatarResponseModel +from .privacy_config import PrivacyConfig from .profile_page_response_model import ProfilePageResponseModel +from .project_creation_meta_response_model import ProjectCreationMetaResponseModel +from .project_creation_meta_response_model_status import ProjectCreationMetaResponseModelStatus +from .project_creation_meta_response_model_type import ProjectCreationMetaResponseModelType from .project_extended_response_model import ProjectExtendedResponseModel from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization @@ -144,6 +158,7 @@ from .project_state import ProjectState from .prompt_agent import PromptAgent from .prompt_agent_override import PromptAgentOverride +from .prompt_agent_override_config import PromptAgentOverrideConfig from .prompt_agent_tools_item import PromptAgentToolsItem, PromptAgentToolsItem_Client, PromptAgentToolsItem_Webhook from .prompt_evaluation_criteria import PromptEvaluationCriteria from .pronunciation_dictionary_alias_rule_request_model import PronunciationDictionaryAliasRuleRequestModel @@ -157,6 +172,9 @@ from .recording_response import RecordingResponse from .remove_pronunciation_dictionary_rules_response_model import RemovePronunciationDictionaryRulesResponseModel from .review_status import ReviewStatus +from .safety import Safety +from .safety_evaluation import SafetyEvaluation +from .safety_rule import SafetyRule from .speech_history_item_response import SpeechHistoryItemResponse from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory @@ -166,9 +184,11 @@ from .subscription_response_model_character_refresh_period import SubscriptionResponseModelCharacterRefreshPeriod from .subscription_response_model_currency import SubscriptionResponseModelCurrency from .subscription_status import SubscriptionStatus +from .telephony_provider import TelephonyProvider from .text_to_speech_as_stream_request import TextToSpeechAsStreamRequest from .tts_conversational_config import TtsConversationalConfig from .tts_conversational_config_override import TtsConversationalConfigOverride +from .tts_conversational_config_override_config import TtsConversationalConfigOverrideConfig from .tts_conversational_model import TtsConversationalModel from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency from .tts_output_format import TtsOutputFormat @@ -177,6 +197,8 @@ from .url_avatar import UrlAvatar from .usage_characters_response_model import UsageCharactersResponseModel from .user import User +from .user_feedback import UserFeedback +from .user_feedback_score import UserFeedbackScore from .validation_error import ValidationError from .validation_error_loc_item import ValidationErrorLocItem from .verification_attempt_response import VerificationAttemptResponse @@ -198,6 +220,7 @@ from .webhook_tool_api_schema_config_method import WebhookToolApiSchemaConfigMethod from .webhook_tool_api_schema_config_request_headers_value import WebhookToolApiSchemaConfigRequestHeadersValue from .webhook_tool_config import WebhookToolConfig +from .widget_feedback_mode import WidgetFeedbackMode __all__ = [ "Accent", @@ -213,6 +236,7 @@ "AgentBan", "AgentConfig", "AgentConfigOverride", + "AgentConfigOverrideConfig", "AgentMetadataResponseModel", "AgentPlatformSettings", "AgentSummaryResponseModel", @@ -242,14 +266,17 @@ "ConversationChargingCommonModel", "ConversationConfig", "ConversationConfigClientOverride", + "ConversationConfigClientOverrideConfig", "ConversationHistoryAnalysisCommonModel", "ConversationHistoryEvaluationCriteriaResultCommonModel", + "ConversationHistoryFeedbackCommonModel", "ConversationHistoryMetadataCommonModel", "ConversationHistoryTranscriptCommonModel", "ConversationHistoryTranscriptCommonModelRole", "ConversationHistoryTranscriptToolCallCommonModel", "ConversationHistoryTranscriptToolResultCommonModel", "ConversationInitiationClientData", + "ConversationInitiationClientDataConfig", "ConversationSignedUrlResponseModel", "ConversationSummaryResponseModel", "ConversationSummaryResponseModelStatus", @@ -257,6 +284,7 @@ "ConversationTokenPurpose", "ConversationalConfig", "CreateAgentResponseModel", + "CreatePhoneNumberResponseModel", "Currency", "CustomLlm", "DataCollectionResultCommonModel", @@ -289,6 +317,7 @@ "GetKnowledgeBaseReponseModel", "GetKnowledgeBaseReponseModelType", "GetLibraryVoicesResponse", + "GetPhoneNumberResponseModel", "GetProjectsResponse", "GetPronunciationDictionariesMetadataResponseModel", "GetPronunciationDictionaryMetadataResponse", @@ -314,13 +343,21 @@ "Model", "ModelRatesResponseModel", "ModelResponseModelConcurrencyGroup", + "ModerationStatusResponseModel", + "ModerationStatusResponseModelSafetyStatus", + "ModerationStatusResponseModelWarningStatus", "ObjectJsonSchemaProperty", "ObjectJsonSchemaPropertyPropertiesValue", "OptimizeStreamingLatency", "OrbAvatar", "OutputFormat", + "PhoneNumberAgentInfo", "PostAgentAvatarResponseModel", + "PrivacyConfig", "ProfilePageResponseModel", + "ProjectCreationMetaResponseModel", + "ProjectCreationMetaResponseModelStatus", + "ProjectCreationMetaResponseModelType", "ProjectExtendedResponseModel", "ProjectExtendedResponseModelAccessLevel", "ProjectExtendedResponseModelApplyTextNormalization", @@ -338,6 +375,7 @@ "ProjectState", "PromptAgent", "PromptAgentOverride", + "PromptAgentOverrideConfig", "PromptAgentToolsItem", "PromptAgentToolsItem_Client", "PromptAgentToolsItem_Webhook", @@ -353,6 +391,9 @@ "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ReviewStatus", + "Safety", + "SafetyEvaluation", + "SafetyRule", "SpeechHistoryItemResponse", "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", @@ -362,9 +403,11 @@ "SubscriptionResponseModelCharacterRefreshPeriod", "SubscriptionResponseModelCurrency", "SubscriptionStatus", + "TelephonyProvider", "TextToSpeechAsStreamRequest", "TtsConversationalConfig", "TtsConversationalConfigOverride", + "TtsConversationalConfigOverrideConfig", "TtsConversationalModel", "TtsOptimizeStreamingLatency", "TtsOutputFormat", @@ -373,6 +416,8 @@ "UrlAvatar", "UsageCharactersResponseModel", "User", + "UserFeedback", + "UserFeedbackScore", "ValidationError", "ValidationErrorLocItem", "VerificationAttemptResponse", @@ -394,4 +439,5 @@ "WebhookToolApiSchemaConfigMethod", "WebhookToolApiSchemaConfigRequestHeadersValue", "WebhookToolConfig", + "WidgetFeedbackMode", ] diff --git a/src/elevenlabs/types/agent_config_override_config.py b/src/elevenlabs/types/agent_config_override_config.py new file mode 100644 index 00000000..bdf830b3 --- /dev/null +++ b/src/elevenlabs/types/agent_config_override_config.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .prompt_agent_override_config import PromptAgentOverrideConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentConfigOverrideConfig(UncheckedBaseModel): + prompt: typing.Optional[PromptAgentOverrideConfig] = None + first_message: typing.Optional[bool] = None + language: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/agent_platform_settings.py b/src/elevenlabs/types/agent_platform_settings.py index 595bf41a..ac146a58 100644 --- a/src/elevenlabs/types/agent_platform_settings.py +++ b/src/elevenlabs/types/agent_platform_settings.py @@ -6,7 +6,10 @@ from .evaluation_settings import EvaluationSettings from .embed_config import EmbedConfig from .literal_json_schema_property import LiteralJsonSchemaProperty +from .conversation_initiation_client_data_config import ConversationInitiationClientDataConfig from .agent_ban import AgentBan +from .safety import Safety +from .privacy_config import PrivacyConfig from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -16,7 +19,10 @@ class AgentPlatformSettings(UncheckedBaseModel): evaluation: typing.Optional[EvaluationSettings] = None widget: typing.Optional[EmbedConfig] = None data_collection: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None + overrides: typing.Optional[ConversationInitiationClientDataConfig] = None ban: typing.Optional[AgentBan] = None + safety: typing.Optional[Safety] = None + privacy: typing.Optional[PrivacyConfig] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/conversation_config_client_override_config.py b/src/elevenlabs/types/conversation_config_client_override_config.py new file mode 100644 index 00000000..69071e68 --- /dev/null +++ b/src/elevenlabs/types/conversation_config_client_override_config.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .agent_config_override_config import AgentConfigOverrideConfig +from .tts_conversational_config_override_config import TtsConversationalConfigOverrideConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationConfigClientOverrideConfig(UncheckedBaseModel): + agent: typing.Optional[AgentConfigOverrideConfig] = None + tts: typing.Optional[TtsConversationalConfigOverrideConfig] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_feedback_common_model.py b/src/elevenlabs/types/conversation_history_feedback_common_model.py new file mode 100644 index 00000000..7b265e3c --- /dev/null +++ b/src/elevenlabs/types/conversation_history_feedback_common_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .user_feedback_score import UserFeedbackScore +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationHistoryFeedbackCommonModel(UncheckedBaseModel): + overall_score: typing.Optional[UserFeedbackScore] = None + likes: typing.Optional[int] = None + dislikes: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/conversation_history_metadata_common_model.py b/src/elevenlabs/types/conversation_history_metadata_common_model.py index de108d43..7db13c3c 100644 --- a/src/elevenlabs/types/conversation_history_metadata_common_model.py +++ b/src/elevenlabs/types/conversation_history_metadata_common_model.py @@ -2,6 +2,7 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing +from .conversation_history_feedback_common_model import ConversationHistoryFeedbackCommonModel from .authorization_method import AuthorizationMethod from .conversation_charging_common_model import ConversationChargingCommonModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -12,6 +13,7 @@ class ConversationHistoryMetadataCommonModel(UncheckedBaseModel): start_time_unix_secs: int call_duration_secs: int cost: typing.Optional[int] = None + feedback: typing.Optional[ConversationHistoryFeedbackCommonModel] = None authorization_method: typing.Optional[AuthorizationMethod] = None charging: typing.Optional[ConversationChargingCommonModel] = None diff --git a/src/elevenlabs/types/conversation_history_transcript_common_model.py b/src/elevenlabs/types/conversation_history_transcript_common_model.py index 3285d888..b391ceb8 100644 --- a/src/elevenlabs/types/conversation_history_transcript_common_model.py +++ b/src/elevenlabs/types/conversation_history_transcript_common_model.py @@ -5,6 +5,7 @@ import typing from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel +from .user_feedback import UserFeedback from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -14,7 +15,9 @@ class ConversationHistoryTranscriptCommonModel(UncheckedBaseModel): message: typing.Optional[str] = None tool_calls: typing.Optional[typing.List[ConversationHistoryTranscriptToolCallCommonModel]] = None tool_results: typing.Optional[typing.List[ConversationHistoryTranscriptToolResultCommonModel]] = None + feedback: typing.Optional[UserFeedback] = None time_in_call_secs: int + conversation_turn_metrics: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/conversation_initiation_client_data_config.py b/src/elevenlabs/types/conversation_initiation_client_data_config.py new file mode 100644 index 00000000..a92f1e28 --- /dev/null +++ b/src/elevenlabs/types/conversation_initiation_client_data_config.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .conversation_config_client_override_config import ConversationConfigClientOverrideConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConversationInitiationClientDataConfig(UncheckedBaseModel): + conversation_config_override: typing.Optional[ConversationConfigClientOverrideConfig] = None + custom_llm_extra_body: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/create_phone_number_response_model.py b/src/elevenlabs/types/create_phone_number_response_model.py new file mode 100644 index 00000000..86642902 --- /dev/null +++ b/src/elevenlabs/types/create_phone_number_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class CreatePhoneNumberResponseModel(UncheckedBaseModel): + phone_number_id: str = pydantic.Field() + """ + Phone entity id + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/embed_config.py b/src/elevenlabs/types/embed_config.py index d67f2d24..32fa8bd5 100644 --- a/src/elevenlabs/types/embed_config.py +++ b/src/elevenlabs/types/embed_config.py @@ -4,6 +4,7 @@ import typing from .embed_variant import EmbedVariant from .embed_config_avatar import EmbedConfigAvatar +from .widget_feedback_mode import WidgetFeedbackMode from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -11,6 +12,7 @@ class EmbedConfig(UncheckedBaseModel): variant: typing.Optional[EmbedVariant] = None avatar: typing.Optional[EmbedConfigAvatar] = None + feedback_mode: typing.Optional[WidgetFeedbackMode] = None custom_avatar_path: typing.Optional[str] = None bg_color: typing.Optional[str] = None text_color: typing.Optional[str] = None diff --git a/src/elevenlabs/types/get_phone_number_response_model.py b/src/elevenlabs/types/get_phone_number_response_model.py new file mode 100644 index 00000000..3917b8b3 --- /dev/null +++ b/src/elevenlabs/types/get_phone_number_response_model.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from .telephony_provider import TelephonyProvider +import typing +from .phone_number_agent_info import PhoneNumberAgentInfo +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class GetPhoneNumberResponseModel(UncheckedBaseModel): + phone_number: str = pydantic.Field() + """ + Phone number + """ + + provider: TelephonyProvider = pydantic.Field(default="twilio") + """ + Phone provider + """ + + label: str = pydantic.Field() + """ + Label for the phone number + """ + + phone_number_id: str + assigned_agent: typing.Optional[PhoneNumberAgentInfo] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/moderation_status_response_model.py b/src/elevenlabs/types/moderation_status_response_model.py new file mode 100644 index 00000000..c1055274 --- /dev/null +++ b/src/elevenlabs/types/moderation_status_response_model.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .moderation_status_response_model_safety_status import ModerationStatusResponseModelSafetyStatus +from .moderation_status_response_model_warning_status import ModerationStatusResponseModelWarningStatus +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ModerationStatusResponseModel(UncheckedBaseModel): + is_in_probation: bool + enterprise_check_nogo_voice: bool + enterprise_check_block_nogo_voice: bool + never_live_moderate: bool + nogo_voice_similar_voice_upload_count: int + enterprise_background_moderation_enabled: bool + safety_status: ModerationStatusResponseModelSafetyStatus + warning_status: ModerationStatusResponseModelWarningStatus + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/moderation_status_response_model_safety_status.py b/src/elevenlabs/types/moderation_status_response_model_safety_status.py new file mode 100644 index 00000000..23c22d37 --- /dev/null +++ b/src/elevenlabs/types/moderation_status_response_model_safety_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ModerationStatusResponseModelSafetyStatus = typing.Union[typing.Literal["appeal_approved", "appeal_denied"], typing.Any] diff --git a/src/elevenlabs/types/moderation_status_response_model_warning_status.py b/src/elevenlabs/types/moderation_status_response_model_warning_status.py new file mode 100644 index 00000000..3869962f --- /dev/null +++ b/src/elevenlabs/types/moderation_status_response_model_warning_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ModerationStatusResponseModelWarningStatus = typing.Union[typing.Literal["warning", "warning_cleared"], typing.Any] diff --git a/src/elevenlabs/types/phone_number_agent_info.py b/src/elevenlabs/types/phone_number_agent_info.py new file mode 100644 index 00000000..f20c291a --- /dev/null +++ b/src/elevenlabs/types/phone_number_agent_info.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class PhoneNumberAgentInfo(UncheckedBaseModel): + agent_id: str + agent_name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/privacy_config.py b/src/elevenlabs/types/privacy_config.py new file mode 100644 index 00000000..928860b0 --- /dev/null +++ b/src/elevenlabs/types/privacy_config.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class PrivacyConfig(UncheckedBaseModel): + record_voice: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/project_creation_meta_response_model.py b/src/elevenlabs/types/project_creation_meta_response_model.py new file mode 100644 index 00000000..fb16cd92 --- /dev/null +++ b/src/elevenlabs/types/project_creation_meta_response_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .project_creation_meta_response_model_status import ProjectCreationMetaResponseModelStatus +from .project_creation_meta_response_model_type import ProjectCreationMetaResponseModelType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ProjectCreationMetaResponseModel(UncheckedBaseModel): + creation_progress: float + status: ProjectCreationMetaResponseModelStatus + type: ProjectCreationMetaResponseModelType + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/project_creation_meta_response_model_status.py b/src/elevenlabs/types/project_creation_meta_response_model_status.py new file mode 100644 index 00000000..806a9a0f --- /dev/null +++ b/src/elevenlabs/types/project_creation_meta_response_model_status.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectCreationMetaResponseModelStatus = typing.Union[ + typing.Literal["pending", "creating", "finished", "failed"], typing.Any +] diff --git a/src/elevenlabs/types/project_creation_meta_response_model_type.py b/src/elevenlabs/types/project_creation_meta_response_model_type.py new file mode 100644 index 00000000..e4563898 --- /dev/null +++ b/src/elevenlabs/types/project_creation_meta_response_model_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectCreationMetaResponseModelType = typing.Union[typing.Literal["blank", "generate_podcast"], typing.Any] diff --git a/src/elevenlabs/types/project_extended_response_model.py b/src/elevenlabs/types/project_extended_response_model.py index 9f9d4706..b40a4039 100644 --- a/src/elevenlabs/types/project_extended_response_model.py +++ b/src/elevenlabs/types/project_extended_response_model.py @@ -6,6 +6,7 @@ from .project_state import ProjectState from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction +from .project_creation_meta_response_model import ProjectCreationMetaResponseModel from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset from .chapter_response import ChapterResponse from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel @@ -40,6 +41,7 @@ class ProjectExtendedResponseModel(UncheckedBaseModel): fiction: typing.Optional[ProjectExtendedResponseModelFiction] = None quality_check_on: bool quality_check_on_when_bulk_convert: bool + creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = None quality_preset: ProjectExtendedResponseModelQualityPreset chapters: typing.List[ChapterResponse] pronunciation_dictionary_versions: typing.List[PronunciationDictionaryVersionResponseModel] diff --git a/src/elevenlabs/types/project_response.py b/src/elevenlabs/types/project_response.py index dbc496b4..23936047 100644 --- a/src/elevenlabs/types/project_response.py +++ b/src/elevenlabs/types/project_response.py @@ -6,6 +6,7 @@ from .project_state import ProjectState from .project_response_model_access_level import ProjectResponseModelAccessLevel from .project_response_model_fiction import ProjectResponseModelFiction +from .project_creation_meta_response_model import ProjectCreationMetaResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -36,6 +37,7 @@ class ProjectResponse(UncheckedBaseModel): fiction: typing.Optional[ProjectResponseModelFiction] = None quality_check_on: bool quality_check_on_when_bulk_convert: bool + creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_state.py b/src/elevenlabs/types/project_state.py index 6e1398a5..d96580c0 100644 --- a/src/elevenlabs/types/project_state.py +++ b/src/elevenlabs/types/project_state.py @@ -2,4 +2,4 @@ import typing -ProjectState = typing.Union[typing.Literal["default", "converting", "in_queue"], typing.Any] +ProjectState = typing.Union[typing.Literal["creating", "default", "converting", "in_queue"], typing.Any] diff --git a/src/elevenlabs/types/prompt_agent_override_config.py b/src/elevenlabs/types/prompt_agent_override_config.py new file mode 100644 index 00000000..108ee790 --- /dev/null +++ b/src/elevenlabs/types/prompt_agent_override_config.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class PromptAgentOverrideConfig(UncheckedBaseModel): + prompt: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/safety.py b/src/elevenlabs/types/safety.py new file mode 100644 index 00000000..d5980e39 --- /dev/null +++ b/src/elevenlabs/types/safety.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .safety_evaluation import SafetyEvaluation +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Safety(UncheckedBaseModel): + """ + Safety object that has the information of safety evaluations based on used voice. + """ + + ivc: typing.Optional[SafetyEvaluation] = None + non_ivc: typing.Optional[SafetyEvaluation] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/safety_evaluation.py b/src/elevenlabs/types/safety_evaluation.py new file mode 100644 index 00000000..0ac8cfc7 --- /dev/null +++ b/src/elevenlabs/types/safety_evaluation.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .safety_rule import SafetyRule +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class SafetyEvaluation(UncheckedBaseModel): + """ + Safety evaluation of the agent. Prompt and first message is taken into account. + The unsafe reason is provided from the evaluation + """ + + is_unsafe: typing.Optional[bool] = None + llm_reason: typing.Optional[str] = None + safety_prompt_version: typing.Optional[int] = None + matched_rule_id: typing.Optional[typing.List[SafetyRule]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/safety_rule.py b/src/elevenlabs/types/safety_rule.py new file mode 100644 index 00000000..982a0945 --- /dev/null +++ b/src/elevenlabs/types/safety_rule.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SafetyRule = typing.Union[ + typing.Literal[ + "sexual_minors", + "forget_moderation", + "extremism", + "scam_fraud", + "political", + "self_harm", + "illegal_distribution_medical", + "sexual_adults", + "unknown", + ], + typing.Any, +] diff --git a/src/elevenlabs/types/telephony_provider.py b/src/elevenlabs/types/telephony_provider.py new file mode 100644 index 00000000..a678a2ca --- /dev/null +++ b/src/elevenlabs/types/telephony_provider.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TelephonyProvider = typing.Literal["twilio"] diff --git a/src/elevenlabs/types/tts_conversational_config_override_config.py b/src/elevenlabs/types/tts_conversational_config_override_config.py new file mode 100644 index 00000000..13b1d756 --- /dev/null +++ b/src/elevenlabs/types/tts_conversational_config_override_config.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TtsConversationalConfigOverrideConfig(UncheckedBaseModel): + voice_id: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/user.py b/src/elevenlabs/types/user.py index e90906c6..0dc3d1f4 100644 --- a/src/elevenlabs/types/user.py +++ b/src/elevenlabs/types/user.py @@ -9,6 +9,7 @@ class User(UncheckedBaseModel): subscription: SubscriptionResponse + subscription_extras: typing.Optional[typing.Any] = None is_new_user: bool xi_api_key: str can_use_delayed_payment_methods: bool diff --git a/src/elevenlabs/types/user_feedback.py b/src/elevenlabs/types/user_feedback.py new file mode 100644 index 00000000..47abecae --- /dev/null +++ b/src/elevenlabs/types/user_feedback.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .user_feedback_score import UserFeedbackScore +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class UserFeedback(UncheckedBaseModel): + score: UserFeedbackScore + time_in_call_secs: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/user_feedback_score.py b/src/elevenlabs/types/user_feedback_score.py new file mode 100644 index 00000000..5b8c3ec2 --- /dev/null +++ b/src/elevenlabs/types/user_feedback_score.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +UserFeedbackScore = typing.Union[typing.Literal["like", "dislike"], typing.Any] diff --git a/src/elevenlabs/types/widget_feedback_mode.py b/src/elevenlabs/types/widget_feedback_mode.py new file mode 100644 index 00000000..8dc295de --- /dev/null +++ b/src/elevenlabs/types/widget_feedback_mode.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +WidgetFeedbackMode = typing.Union[typing.Literal["none", "during", "end"], typing.Any] From f88124cb3a6b78cbaff7f66364932d7862f33f85 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Sat, 14 Dec 2024 12:43:19 +0000 Subject: [PATCH 32/45] SDK regeneration (#436) Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Co-authored-by: Louis J. <132601011+louisjoecodes@users.noreply.github.com> --- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 493e7fd8..3202ff68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.20.0" +version = "1.50.0" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 7f749879..5027805d 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.20.0", + "X-Fern-SDK-Version": "1.50.0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From 6d4f625649564be6bac3d3dc571c5b726d12494c Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Sat, 14 Dec 2024 12:55:39 +0000 Subject: [PATCH 33/45] chore: add generate test, bump to 1.50.0 & change default model --- src/elevenlabs/client.py | 4 ++-- tests/test_tts.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/src/elevenlabs/client.py b/src/elevenlabs/client.py index 348e2884..f4f66bce 100644 --- a/src/elevenlabs/client.py +++ b/src/elevenlabs/client.py @@ -122,7 +122,7 @@ def generate( text: Union[str, Iterator[str]], voice: Union[VoiceId, VoiceName, Voice] = DEFAULT_VOICE, voice_settings: typing.Optional[VoiceSettings] = DEFAULT_VOICE.settings, - model: Union[ModelId, Model] = "eleven_monolingual_v1", + model: Union[ModelId, Model] = "eleven_multilingual_v2", optimize_streaming_latency: typing.Optional[int] = 0, stream: bool = False, output_format: Optional[OutputFormat] = "mp3_44100_128", @@ -302,7 +302,7 @@ async def generate( text: str, voice: Union[VoiceId, VoiceName, Voice] = DEFAULT_VOICE, voice_settings: typing.Optional[VoiceSettings] = DEFAULT_VOICE.settings, - model: Union[ModelId, Model] = "eleven_monolingual_v1", + model: Union[ModelId, Model] = "eleven_multilingual_v2", optimize_streaming_latency: typing.Optional[int] = 0, stream: bool = False, output_format: Optional[OutputFormat] = "mp3_44100_128", diff --git a/tests/test_tts.py b/tests/test_tts.py index df076cce..1a2dbbd4 100644 --- a/tests/test_tts.py +++ b/tests/test_tts.py @@ -1,6 +1,6 @@ import asyncio -from elevenlabs import VoiceSettings, play +from elevenlabs import VoiceSettings, play, Voice from elevenlabs.client import AsyncElevenLabs, ElevenLabs from .utils import IN_GITHUB, DEFAULT_TEXT, DEFAULT_VOICE, DEFAULT_MODEL @@ -17,6 +17,33 @@ def test_tts_convert() -> None: play(audio) +def test_tts_generate() -> None: + """Test basic text-to-speech generation w/ custom generate.""" + client = ElevenLabs() + audio_generator = client.generate(text=DEFAULT_TEXT, voice="Brian", model=DEFAULT_MODEL) + audio = b"".join(audio_generator) + assert isinstance(audio, bytes), "TTS should return bytes" + if not IN_GITHUB: + play(audio) + + +def test_tts_generate_with_voice_settings() -> None: + """Test basic text-to-speech generation.""" + client = ElevenLabs() + audio_generator = client.generate( + text=DEFAULT_TEXT, + model=DEFAULT_MODEL, + voice=Voice( + voice_id="nPczCjzI2devNBz1zQrb", + settings=VoiceSettings(stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True), + ), + ) + audio = b"".join(audio_generator) + assert isinstance(audio, bytes), "TTS should return bytes" + if not IN_GITHUB: + play(audio) + + def test_tts_convert_with_voice_settings() -> None: """Test TTS with custom voice settings.""" client = ElevenLabs() From e576ed9c1972aedfddcdee09a68f7169e87a4dc2 Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Sat, 14 Dec 2024 13:46:07 +0000 Subject: [PATCH 34/45] chore: add generate stream test --- tests/test_tts.py | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/tests/test_tts.py b/tests/test_tts.py index 1a2dbbd4..0baf8312 100644 --- a/tests/test_tts.py +++ b/tests/test_tts.py @@ -7,16 +7,6 @@ import base64 -def test_tts_convert() -> None: - """Test basic text-to-speech generation.""" - client = ElevenLabs() - audio_generator = client.text_to_speech.convert(text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL) - audio = b"".join(audio_generator) - assert isinstance(audio, bytes), "TTS should return bytes" - if not IN_GITHUB: - play(audio) - - def test_tts_generate() -> None: """Test basic text-to-speech generation w/ custom generate.""" client = ElevenLabs() @@ -44,6 +34,30 @@ def test_tts_generate_with_voice_settings() -> None: play(audio) +def test_tts_generate_stream() -> None: + """Test streaming text-to-speech generation.""" + client = ElevenLabs() + audio_generator = client.generate( + stream=True, + text=DEFAULT_TEXT, + model=DEFAULT_MODEL, + ) + audio = b"".join(audio_generator) + assert isinstance(audio, bytes), "TTS should return bytes" + if not IN_GITHUB: + play(audio) + + +def test_tts_convert() -> None: + """Test basic text-to-speech generation.""" + client = ElevenLabs() + audio_generator = client.text_to_speech.convert(text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL) + audio = b"".join(audio_generator) + assert isinstance(audio, bytes), "TTS should return bytes" + if not IN_GITHUB: + play(audio) + + def test_tts_convert_with_voice_settings() -> None: """Test TTS with custom voice settings.""" client = ElevenLabs() From 8b3be4a77439b3ff27d009ae47803df7772b827d Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Sat, 14 Dec 2024 14:17:52 +0000 Subject: [PATCH 35/45] SDK regeneration --- poetry.lock | 6 +++--- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 475c1afc..5dc74a47 100644 --- a/poetry.lock +++ b/poetry.lock @@ -38,13 +38,13 @@ trio = ["trio (>=0.26.1)"] [[package]] name = "certifi" -version = "2024.8.30" +version = "2024.12.14" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, + {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, + {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index 3202ff68..d458074e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.50.0" +version = "1.50.1" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 5027805d..81697d1e 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.50.0", + "X-Fern-SDK-Version": "1.50.1", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From 9e38ea6f3d273bc02e0e2953bad65257782aefc8 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Sat, 14 Dec 2024 23:34:21 +0000 Subject: [PATCH 36/45] SDK regeneration --- pyproject.toml | 2 +- reference.md | 497 ++++++++++++++++-- src/elevenlabs/__init__.py | 6 - src/elevenlabs/core/client_wrapper.py | 2 +- src/elevenlabs/history/client.py | 26 +- src/elevenlabs/samples/client.py | 16 +- src/elevenlabs/speech_to_speech/client.py | 167 ++++-- .../text_to_sound_effects/client.py | 30 ++ src/elevenlabs/text_to_speech/client.py | 189 ++++--- src/elevenlabs/types/__init__.py | 6 - ...io_native_get_embed_code_response_model.py | 5 - src/elevenlabs/types/history.py | 5 - .../types/optimize_streaming_latency.py | 5 - src/elevenlabs/voices/client.py | 20 +- 14 files changed, 748 insertions(+), 228 deletions(-) delete mode 100644 src/elevenlabs/types/audio_native_get_embed_code_response_model.py delete mode 100644 src/elevenlabs/types/history.py delete mode 100644 src/elevenlabs/types/optimize_streaming_latency.py diff --git a/pyproject.toml b/pyproject.toml index d458074e..9d0acfa9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.50.1" +version = "1.50.2" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 6823081c..3b867cd7 100644 --- a/reference.md +++ b/reference.md @@ -32,10 +32,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.history.get_all( - page_size=1, - voice_id="pMsXgVXv3BLzUgSXRplE", -) +client.history.get_all() ``` @@ -136,7 +133,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.get( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) ``` @@ -206,7 +203,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.delete( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) ``` @@ -276,7 +273,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.get_audio( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) ``` @@ -346,7 +343,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.download( - history_item_ids=["ja9xsmfGhxYcymxGcOGB"], + history_item_ids=["HISTORY_ITEM_ID"], ) ``` @@ -392,6 +389,92 @@ client.history.download(
    ## TextToSoundEffects +
    client.text_to_sound_effects.convert(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Converts a text of your choice into sound +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.text_to_sound_effects.convert( + text="Spacious braam suitable for high-impact movie trailer moments", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**text:** `str` — The text that will get converted into a sound effect. + +
    +
    + +
    +
    + +**duration_seconds:** `typing.Optional[float]` — The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None. + +
    +
    + +
    +
    + +**prompt_influence:** `typing.Optional[float]` — A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + +
    +
    +
    +
    + + +
    +
    +
    + ## AudioIsolation ## samples
    client.samples.delete(...) @@ -427,8 +510,8 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.samples.delete( - voice_id="ja9xsmfGhxYcymxGcOGB", - sample_id="pMsXgVXv3BLzUgSXRplE", + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) ``` @@ -506,8 +589,8 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.samples.get_audio( - voice_id="ja9xsmfGhxYcymxGcOGB", - sample_id="pMsXgVXv3BLzUgSXRplE", + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) ``` @@ -580,21 +663,16 @@ Converts text into speech using a voice of your choice and returns audio.
    ```python -from elevenlabs import ElevenLabs, VoiceSettings +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert( - voice_id="pMsXgVXv3BLzUgSXRplE", - optimize_streaming_latency="0", - output_format="mp3_22050_32", - text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", - voice_settings=VoiceSettings( - stability=0.5, - similarity_boost=0.75, - style=0.0, - ), + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) ``` @@ -635,7 +713,16 @@ client.text_to_speech.convert(
    -**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. +**optimize_streaming_latency:** `typing.Optional[int]` + +You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + +Defaults to None.
    @@ -784,8 +871,10 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert_with_timestamps( - voice_id="21m00Tcm4TlvDq8ikWAM", - text="text", + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) ``` @@ -826,7 +915,16 @@ client.text_to_speech.convert_with_timestamps(
    -**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. +**optimize_streaming_latency:** `typing.Optional[int]` + +You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + +Defaults to None.
    @@ -971,21 +1069,16 @@ Converts text into speech using a voice of your choice and returns audio as an a
    ```python -from elevenlabs import ElevenLabs, VoiceSettings +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert_as_stream( - voice_id="pMsXgVXv3BLzUgSXRplE", - optimize_streaming_latency="0", - output_format="mp3_22050_32", - text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", - voice_settings=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, - ), + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) ``` @@ -1026,7 +1119,16 @@ client.text_to_speech.convert_as_stream(
    -**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. +**optimize_streaming_latency:** `typing.Optional[int]` + +You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + +Defaults to None.
    @@ -1177,8 +1279,10 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) response = client.text_to_speech.stream_with_timestamps( - voice_id="21m00Tcm4TlvDq8ikWAM", - text="text", + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) for chunk in response: yield chunk @@ -1221,7 +1325,16 @@ for chunk in response:
    -**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. +**optimize_streaming_latency:** `typing.Optional[int]` + +You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + +Defaults to None.
    @@ -1340,6 +1453,300 @@ for chunk in response:
    ## SpeechToSpeech +
    client.speech_to_speech.convert(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Create speech by combining the content and emotion of the uploaded audio with a voice of your choice. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.speech_to_speech.convert( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + model_id="eleven_multilingual_sts_v2", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. + +
    +
    + +
    +
    + +**audio:** `from __future__ import annotations + +core.File` — See core.File for more documentation + +
    +
    + +
    +
    + +**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. + +
    +
    + +
    +
    + +**optimize_streaming_latency:** `typing.Optional[int]` + +You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + +Defaults to None. + +
    +
    + +
    +
    + +**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio. + +
    +
    + +
    +
    + +**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. + +
    +
    + +
    +
    + +**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. + +
    +
    + +
    +
    + +**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. + +
    +
    + +
    +
    + +**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + +
    +
    +
    +
    + + +
    +
    +
    + +
    client.speech_to_speech.convert_as_stream(...) +
    +
    + +#### 📝 Description + +
    +
    + +
    +
    + +Create speech by combining the content and emotion of the uploaded audio with a voice of your choice and returns an audio stream. +
    +
    +
    +
    + +#### 🔌 Usage + +
    +
    + +
    +
    + +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.speech_to_speech.convert_as_stream( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + model_id="eleven_multilingual_sts_v2", +) + +``` +
    +
    +
    +
    + +#### ⚙️ Parameters + +
    +
    + +
    +
    + +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. + +
    +
    + +
    +
    + +**audio:** `from __future__ import annotations + +core.File` — See core.File for more documentation + +
    +
    + +
    +
    + +**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. + +
    +
    + +
    +
    + +**optimize_streaming_latency:** `typing.Optional[int]` + +You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + +Defaults to None. + +
    +
    + +
    +
    + +**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio. + +
    +
    + +
    +
    + +**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. + +
    +
    + +
    +
    + +**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. + +
    +
    + +
    +
    + +**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. + +
    +
    + +
    +
    + +**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + +
    +
    + +
    +
    + +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + +
    +
    +
    +
    + + +
    +
    +
    + ## VoiceGeneration
    client.voice_generation.generate_parameters()
    @@ -2107,7 +2514,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get_settings( - voice_id="2EiwWnXFnvU5JabPnv8n", + voice_id="JBFqnCBsd6RMkjVDRZzb", ) ``` @@ -2177,7 +2584,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="JBFqnCBsd6RMkjVDRZzb", ) ``` @@ -2255,7 +2662,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.delete( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="VOICE_ID", ) ``` @@ -2325,7 +2732,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.edit_settings( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="VOICE_ID", request=VoiceSettings( stability=0.1, similarity_boost=0.3, @@ -2512,7 +2919,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.edit( - voice_id="JBFqnCBsd6RMkjVDRZzb", + voice_id="VOICE_ID", name="George", ) diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py index 1b45287f..a25daaea 100644 --- a/src/elevenlabs/__init__.py +++ b/src/elevenlabs/__init__.py @@ -26,7 +26,6 @@ AsrProvider, AsrQuality, AudioNativeCreateProjectResponseModel, - AudioNativeGetEmbedCodeResponseModel, AuthSettings, AuthorizationMethod, BanReasonType, @@ -101,7 +100,6 @@ GetPronunciationDictionaryMetadataResponse, GetSpeechHistoryResponse, GetVoicesResponse, - History, HistoryAlignmentResponseModel, HistoryAlignmentsResponseModel, HistoryItem, @@ -126,7 +124,6 @@ ModerationStatusResponseModelWarningStatus, ObjectJsonSchemaProperty, ObjectJsonSchemaPropertyPropertiesValue, - OptimizeStreamingLatency, OrbAvatar, OutputFormat, PhoneNumberAgentInfo, @@ -298,7 +295,6 @@ "AsrQuality", "AsyncElevenLabs", "AudioNativeCreateProjectResponseModel", - "AudioNativeGetEmbedCodeResponseModel", "AuthSettings", "AuthorizationMethod", "BanReasonType", @@ -384,7 +380,6 @@ "GetPronunciationDictionaryMetadataResponse", "GetSpeechHistoryResponse", "GetVoicesResponse", - "History", "HistoryAlignmentResponseModel", "HistoryAlignmentsResponseModel", "HistoryGetAllRequestSource", @@ -410,7 +405,6 @@ "ModerationStatusResponseModelWarningStatus", "ObjectJsonSchemaProperty", "ObjectJsonSchemaPropertyPropertiesValue", - "OptimizeStreamingLatency", "OrbAvatar", "OutputFormat", "PhoneNumberAgentInfo", diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index 81697d1e..dfbcbfcc 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.50.1", + "X-Fern-SDK-Version": "1.50.2", } if self._api_key is not None: headers["xi-api-key"] = self._api_key diff --git a/src/elevenlabs/history/client.py b/src/elevenlabs/history/client.py index c87c0fe1..fdc84c72 100644 --- a/src/elevenlabs/history/client.py +++ b/src/elevenlabs/history/client.py @@ -67,10 +67,7 @@ def get_all( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.history.get_all( - page_size=1, - voice_id="pMsXgVXv3BLzUgSXRplE", - ) + client.history.get_all() """ _response = self._client_wrapper.httpx_client.request( "v1/history", @@ -135,7 +132,7 @@ def get( api_key="YOUR_API_KEY", ) client.history.get( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) """ _response = self._client_wrapper.httpx_client.request( @@ -194,7 +191,7 @@ def delete( api_key="YOUR_API_KEY", ) client.history.delete( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) """ _response = self._client_wrapper.httpx_client.request( @@ -253,7 +250,7 @@ def get_audio( api_key="YOUR_API_KEY", ) client.history.get_audio( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) """ with self._client_wrapper.httpx_client.stream( @@ -316,7 +313,7 @@ def download( api_key="YOUR_API_KEY", ) client.history.download( - history_item_ids=["ja9xsmfGhxYcymxGcOGB"], + history_item_ids=["HISTORY_ITEM_ID"], ) """ _response = self._client_wrapper.httpx_client.request( @@ -405,10 +402,7 @@ async def get_all( async def main() -> None: - await client.history.get_all( - page_size=1, - voice_id="pMsXgVXv3BLzUgSXRplE", - ) + await client.history.get_all() asyncio.run(main()) @@ -481,7 +475,7 @@ async def get( async def main() -> None: await client.history.get( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) @@ -548,7 +542,7 @@ async def delete( async def main() -> None: await client.history.delete( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) @@ -615,7 +609,7 @@ async def get_audio( async def main() -> None: await client.history.get_audio( - history_item_id="ja9xsmfGhxYcymxGcOGB", + history_item_id="HISTORY_ITEM_ID", ) @@ -686,7 +680,7 @@ async def download( async def main() -> None: await client.history.download( - history_item_ids=["ja9xsmfGhxYcymxGcOGB"], + history_item_ids=["HISTORY_ITEM_ID"], ) diff --git a/src/elevenlabs/samples/client.py b/src/elevenlabs/samples/client.py index 6014a8f8..96b8df90 100644 --- a/src/elevenlabs/samples/client.py +++ b/src/elevenlabs/samples/client.py @@ -46,8 +46,8 @@ def delete( api_key="YOUR_API_KEY", ) client.samples.delete( - voice_id="ja9xsmfGhxYcymxGcOGB", - sample_id="pMsXgVXv3BLzUgSXRplE", + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) """ _response = self._client_wrapper.httpx_client.request( @@ -109,8 +109,8 @@ def get_audio( api_key="YOUR_API_KEY", ) client.samples.get_audio( - voice_id="ja9xsmfGhxYcymxGcOGB", - sample_id="pMsXgVXv3BLzUgSXRplE", + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) """ with self._client_wrapper.httpx_client.stream( @@ -180,8 +180,8 @@ async def delete( async def main() -> None: await client.samples.delete( - voice_id="ja9xsmfGhxYcymxGcOGB", - sample_id="pMsXgVXv3BLzUgSXRplE", + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) @@ -251,8 +251,8 @@ async def get_audio( async def main() -> None: await client.samples.get_audio( - voice_id="ja9xsmfGhxYcymxGcOGB", - sample_id="pMsXgVXv3BLzUgSXRplE", + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py index d4e661b7..baa0c9fe 100644 --- a/src/elevenlabs/speech_to_speech/client.py +++ b/src/elevenlabs/speech_to_speech/client.py @@ -3,7 +3,6 @@ import typing from ..core.client_wrapper import SyncClientWrapper from .. import core -from ..types.optimize_streaming_latency import OptimizeStreamingLatency from ..types.output_format import OutputFormat from ..core.request_options import RequestOptions from ..core.jsonable_encoder import jsonable_encoder @@ -28,7 +27,7 @@ def convert( *, audio: core.File, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, @@ -50,8 +49,15 @@ def convert( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -75,6 +81,19 @@ def convert( ------ typing.Iterator[bytes] Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.speech_to_speech.convert( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + model_id="eleven_multilingual_sts_v2", + ) """ with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}", @@ -123,9 +142,9 @@ def convert_as_stream( voice_id: str, *, audio: core.File, - enable_logging: typing.Optional[OptimizeStreamingLatency] = None, - optimize_streaming_latency: typing.Optional[OutputFormat] = None, - output_format: typing.Optional[str] = None, + enable_logging: typing.Optional[bool] = None, + optimize_streaming_latency: typing.Optional[int] = None, + output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, seed: typing.Optional[int] = OMIT, @@ -143,25 +162,21 @@ def convert_as_stream( audio : core.File See core.File for more documentation - enable_logging : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + enable_logging : typing.Optional[bool] + When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OutputFormat] - The output format of the generated audio. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). - output_format : typing.Optional[str] - Output format of the generated audio. Must be one of: - mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. - mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. - mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. - mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. - mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. - mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. - pcm_16000 - PCM format (S16LE) with 16kHz sample rate. - pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. - pcm_24000 - PCM format (S16LE) with 24kHz sample rate. - pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. - ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. + Defaults to None. + + output_format : typing.Optional[OutputFormat] + The output format of the generated audio. model_id : typing.Optional[str] Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. @@ -182,6 +197,19 @@ def convert_as_stream( ------ typing.Iterator[bytes] Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.speech_to_speech.convert_as_stream( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + model_id="eleven_multilingual_sts_v2", + ) """ with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}/stream", @@ -236,7 +264,7 @@ async def convert( *, audio: core.File, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, @@ -258,8 +286,15 @@ async def convert( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -283,6 +318,27 @@ async def convert( ------ typing.AsyncIterator[bytes] Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.speech_to_speech.convert( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + model_id="eleven_multilingual_sts_v2", + ) + + + asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}", @@ -331,9 +387,9 @@ async def convert_as_stream( voice_id: str, *, audio: core.File, - enable_logging: typing.Optional[OptimizeStreamingLatency] = None, - optimize_streaming_latency: typing.Optional[OutputFormat] = None, - output_format: typing.Optional[str] = None, + enable_logging: typing.Optional[bool] = None, + optimize_streaming_latency: typing.Optional[int] = None, + output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, voice_settings: typing.Optional[str] = OMIT, seed: typing.Optional[int] = OMIT, @@ -351,25 +407,21 @@ async def convert_as_stream( audio : core.File See core.File for more documentation - enable_logging : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + enable_logging : typing.Optional[bool] + When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OutputFormat] - The output format of the generated audio. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). - output_format : typing.Optional[str] - Output format of the generated audio. Must be one of: - mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. - mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. - mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. - mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. - mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. - mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. - pcm_16000 - PCM format (S16LE) with 16kHz sample rate. - pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. - pcm_24000 - PCM format (S16LE) with 24kHz sample rate. - pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. - ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. + Defaults to None. + + output_format : typing.Optional[OutputFormat] + The output format of the generated audio. model_id : typing.Optional[str] Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. @@ -390,6 +442,27 @@ async def convert_as_stream( ------ typing.AsyncIterator[bytes] Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.speech_to_speech.convert_as_stream( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + model_id="eleven_multilingual_sts_v2", + ) + + + asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( f"v1/speech-to-speech/{jsonable_encoder(voice_id)}/stream", diff --git a/src/elevenlabs/text_to_sound_effects/client.py b/src/elevenlabs/text_to_sound_effects/client.py index 4913029d..f562fef6 100644 --- a/src/elevenlabs/text_to_sound_effects/client.py +++ b/src/elevenlabs/text_to_sound_effects/client.py @@ -47,6 +47,17 @@ def convert( ------ typing.Iterator[bytes] Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.text_to_sound_effects.convert( + text="Spacious braam suitable for high-impact movie trailer moments", + ) """ with self._client_wrapper.httpx_client.stream( "v1/sound-generation", @@ -118,6 +129,25 @@ async def convert( ------ typing.AsyncIterator[bytes] Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.text_to_sound_effects.convert( + text="Spacious braam suitable for high-impact movie trailer moments", + ) + + + asyncio.run(main()) """ async with self._client_wrapper.httpx_client.stream( "v1/sound-generation", diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py index 865cef6b..e35188d0 100644 --- a/src/elevenlabs/text_to_speech/client.py +++ b/src/elevenlabs/text_to_speech/client.py @@ -2,7 +2,6 @@ import typing from ..core.client_wrapper import SyncClientWrapper -from ..types.optimize_streaming_latency import OptimizeStreamingLatency from ..types.output_format import OutputFormat from ..types.voice_settings import VoiceSettings from ..types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator @@ -44,7 +43,7 @@ def convert( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -77,8 +76,15 @@ def convert( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -126,21 +132,16 @@ def convert( Examples -------- - from elevenlabs import ElevenLabs, VoiceSettings + from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert( - voice_id="pMsXgVXv3BLzUgSXRplE", - optimize_streaming_latency="0", - output_format="mp3_22050_32", - text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", - voice_settings=VoiceSettings( - stability=0.5, - similarity_boost=0.75, - style=0.0, - ), + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) """ with self._client_wrapper.httpx_client.stream( @@ -205,7 +206,7 @@ def convert_with_timestamps( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -238,8 +239,15 @@ def convert_with_timestamps( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -293,8 +301,10 @@ def convert_with_timestamps( api_key="YOUR_API_KEY", ) client.text_to_speech.convert_with_timestamps( - voice_id="21m00Tcm4TlvDq8ikWAM", - text="text", + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) """ _response = self._client_wrapper.httpx_client.request( @@ -361,7 +371,7 @@ def convert_as_stream( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -394,8 +404,15 @@ def convert_as_stream( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -443,21 +460,16 @@ def convert_as_stream( Examples -------- - from elevenlabs import ElevenLabs, VoiceSettings + from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert_as_stream( - voice_id="pMsXgVXv3BLzUgSXRplE", - optimize_streaming_latency="0", - output_format="mp3_22050_32", - text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", - voice_settings=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, - ), + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) """ with self._client_wrapper.httpx_client.stream( @@ -522,7 +534,7 @@ def stream_with_timestamps( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -555,8 +567,15 @@ def stream_with_timestamps( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -610,8 +629,10 @@ def stream_with_timestamps( api_key="YOUR_API_KEY", ) response = client.text_to_speech.stream_with_timestamps( - voice_id="21m00Tcm4TlvDq8ikWAM", - text="text", + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) for chunk in response: yield chunk @@ -693,7 +714,7 @@ async def convert( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -726,8 +747,15 @@ async def convert( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -777,7 +805,7 @@ async def convert( -------- import asyncio - from elevenlabs import AsyncElevenLabs, VoiceSettings + from elevenlabs import AsyncElevenLabs client = AsyncElevenLabs( api_key="YOUR_API_KEY", @@ -786,15 +814,10 @@ async def convert( async def main() -> None: await client.text_to_speech.convert( - voice_id="pMsXgVXv3BLzUgSXRplE", - optimize_streaming_latency="0", - output_format="mp3_22050_32", - text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", - voice_settings=VoiceSettings( - stability=0.5, - similarity_boost=0.75, - style=0.0, - ), + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) @@ -862,7 +885,7 @@ async def convert_with_timestamps( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -895,8 +918,15 @@ async def convert_with_timestamps( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -955,8 +985,10 @@ async def convert_with_timestamps( async def main() -> None: await client.text_to_speech.convert_with_timestamps( - voice_id="21m00Tcm4TlvDq8ikWAM", - text="text", + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) @@ -1026,7 +1058,7 @@ async def convert_as_stream( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -1059,8 +1091,15 @@ async def convert_as_stream( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -1110,7 +1149,7 @@ async def convert_as_stream( -------- import asyncio - from elevenlabs import AsyncElevenLabs, VoiceSettings + from elevenlabs import AsyncElevenLabs client = AsyncElevenLabs( api_key="YOUR_API_KEY", @@ -1119,15 +1158,10 @@ async def convert_as_stream( async def main() -> None: await client.text_to_speech.convert_as_stream( - voice_id="pMsXgVXv3BLzUgSXRplE", - optimize_streaming_latency="0", - output_format="mp3_22050_32", - text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", - voice_settings=VoiceSettings( - stability=0.1, - similarity_boost=0.3, - style=0.2, - ), + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) @@ -1195,7 +1229,7 @@ async def stream_with_timestamps( *, text: str, enable_logging: typing.Optional[bool] = None, - optimize_streaming_latency: typing.Optional[OptimizeStreamingLatency] = None, + optimize_streaming_latency: typing.Optional[int] = None, output_format: typing.Optional[OutputFormat] = None, model_id: typing.Optional[str] = OMIT, language_code: typing.Optional[str] = OMIT, @@ -1228,8 +1262,15 @@ async def stream_with_timestamps( enable_logging : typing.Optional[bool] When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers. - optimize_streaming_latency : typing.Optional[OptimizeStreamingLatency] - You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. + optimize_streaming_latency : typing.Optional[int] + You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: + 0 - default mode (no latency optimizations) + 1 - normal latency optimizations (about 50% of possible latency improvement of option 3) + 2 - strong latency optimizations (about 75% of possible latency improvement of option 3) + 3 - max latency optimizations + 4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + + Defaults to None. output_format : typing.Optional[OutputFormat] The output format of the generated audio. @@ -1288,8 +1329,10 @@ async def stream_with_timestamps( async def main() -> None: response = await client.text_to_speech.stream_with_timestamps( - voice_id="21m00Tcm4TlvDq8ikWAM", - text="text", + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) async for chunk in response: yield chunk diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py index 86ca5244..d2284c1d 100644 --- a/src/elevenlabs/types/__init__.py +++ b/src/elevenlabs/types/__init__.py @@ -25,7 +25,6 @@ from .asr_provider import AsrProvider from .asr_quality import AsrQuality from .audio_native_create_project_response_model import AudioNativeCreateProjectResponseModel -from .audio_native_get_embed_code_response_model import AudioNativeGetEmbedCodeResponseModel from .auth_settings import AuthSettings from .authorization_method import AuthorizationMethod from .ban_reason_type import BanReasonType @@ -106,7 +105,6 @@ from .get_pronunciation_dictionary_metadata_response import GetPronunciationDictionaryMetadataResponse from .get_speech_history_response import GetSpeechHistoryResponse from .get_voices_response import GetVoicesResponse -from .history import History from .history_alignment_response_model import HistoryAlignmentResponseModel from .history_alignments_response_model import HistoryAlignmentsResponseModel from .history_item import HistoryItem @@ -131,7 +129,6 @@ from .moderation_status_response_model_warning_status import ModerationStatusResponseModelWarningStatus from .object_json_schema_property import ObjectJsonSchemaProperty from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue -from .optimize_streaming_latency import OptimizeStreamingLatency from .orb_avatar import OrbAvatar from .output_format import OutputFormat from .phone_number_agent_info import PhoneNumberAgentInfo @@ -248,7 +245,6 @@ "AsrProvider", "AsrQuality", "AudioNativeCreateProjectResponseModel", - "AudioNativeGetEmbedCodeResponseModel", "AuthSettings", "AuthorizationMethod", "BanReasonType", @@ -323,7 +319,6 @@ "GetPronunciationDictionaryMetadataResponse", "GetSpeechHistoryResponse", "GetVoicesResponse", - "History", "HistoryAlignmentResponseModel", "HistoryAlignmentsResponseModel", "HistoryItem", @@ -348,7 +343,6 @@ "ModerationStatusResponseModelWarningStatus", "ObjectJsonSchemaProperty", "ObjectJsonSchemaPropertyPropertiesValue", - "OptimizeStreamingLatency", "OrbAvatar", "OutputFormat", "PhoneNumberAgentInfo", diff --git a/src/elevenlabs/types/audio_native_get_embed_code_response_model.py b/src/elevenlabs/types/audio_native_get_embed_code_response_model.py deleted file mode 100644 index 12c70385..00000000 --- a/src/elevenlabs/types/audio_native_get_embed_code_response_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AudioNativeGetEmbedCodeResponseModel = typing.Optional[typing.Any] diff --git a/src/elevenlabs/types/history.py b/src/elevenlabs/types/history.py deleted file mode 100644 index b9532340..00000000 --- a/src/elevenlabs/types/history.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -History = typing.Optional[typing.Any] diff --git a/src/elevenlabs/types/optimize_streaming_latency.py b/src/elevenlabs/types/optimize_streaming_latency.py deleted file mode 100644 index 1b9a4dec..00000000 --- a/src/elevenlabs/types/optimize_streaming_latency.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -OptimizeStreamingLatency = typing.Union[typing.Literal["0", "1", "2", "3", "4"], typing.Any] diff --git a/src/elevenlabs/voices/client.py b/src/elevenlabs/voices/client.py index c3775165..e10992a0 100644 --- a/src/elevenlabs/voices/client.py +++ b/src/elevenlabs/voices/client.py @@ -155,7 +155,7 @@ def get_settings(self, voice_id: str, *, request_options: typing.Optional[Reques api_key="YOUR_API_KEY", ) client.voices.get_settings( - voice_id="2EiwWnXFnvU5JabPnv8n", + voice_id="JBFqnCBsd6RMkjVDRZzb", ) """ _response = self._client_wrapper.httpx_client.request( @@ -221,7 +221,7 @@ def get( api_key="YOUR_API_KEY", ) client.voices.get( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="JBFqnCBsd6RMkjVDRZzb", ) """ _response = self._client_wrapper.httpx_client.request( @@ -283,7 +283,7 @@ def delete( api_key="YOUR_API_KEY", ) client.voices.delete( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="VOICE_ID", ) """ _response = self._client_wrapper.httpx_client.request( @@ -344,7 +344,7 @@ def edit_settings( api_key="YOUR_API_KEY", ) client.voices.edit_settings( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="VOICE_ID", request=VoiceSettings( stability=0.1, similarity_boost=0.3, @@ -521,7 +521,7 @@ def edit( api_key="YOUR_API_KEY", ) client.voices.edit( - voice_id="JBFqnCBsd6RMkjVDRZzb", + voice_id="VOICE_ID", name="George", ) """ @@ -1064,7 +1064,7 @@ async def get_settings( async def main() -> None: await client.voices.get_settings( - voice_id="2EiwWnXFnvU5JabPnv8n", + voice_id="JBFqnCBsd6RMkjVDRZzb", ) @@ -1138,7 +1138,7 @@ async def get( async def main() -> None: await client.voices.get( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="JBFqnCBsd6RMkjVDRZzb", ) @@ -1208,7 +1208,7 @@ async def delete( async def main() -> None: await client.voices.delete( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="VOICE_ID", ) @@ -1277,7 +1277,7 @@ async def edit_settings( async def main() -> None: await client.voices.edit_settings( - voice_id="29vD33N1CtxCmqQRPOHJ", + voice_id="VOICE_ID", request=VoiceSettings( stability=0.1, similarity_boost=0.3, @@ -1470,7 +1470,7 @@ async def edit( async def main() -> None: await client.voices.edit( - voice_id="JBFqnCBsd6RMkjVDRZzb", + voice_id="VOICE_ID", name="George", ) From 8b7d1abf48ca50bc6f70eb852cacb6860d23d40f Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Sun, 15 Dec 2024 21:47:28 +0000 Subject: [PATCH 37/45] feat: align default env keys with js --- README.md | 14 +++++++------- src/elevenlabs/base_client.py | 4 ++-- src/elevenlabs/client.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index e00eb6a5..32d915ad 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ from elevenlabs import play from elevenlabs.client import ElevenLabs client = ElevenLabs( - api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY + api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY ) audio = client.generate( @@ -70,7 +70,7 @@ List all your available voices with `voices()`. from elevenlabs.client import ElevenLabs client = ElevenLabs( - api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY + api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY ) response = client.voices.get_all() @@ -88,7 +88,7 @@ from elevenlabs import Voice, VoiceSettings, play from elevenlabs.client import ElevenLabs client = ElevenLabs( - api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY + api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY ) audio = client.generate( @@ -113,7 +113,7 @@ from elevenlabs.client import ElevenLabs from elevenlabs import play client = ElevenLabs( - api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY + api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY ) voice = client.clone( @@ -136,7 +136,7 @@ from elevenlabs.client import ElevenLabs from elevenlabs import stream client = ElevenLabs( - api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY + api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY ) audio_stream = client.generate( @@ -159,7 +159,7 @@ from elevenlabs.client import ElevenLabs from elevenlabs import stream client = ElevenLabs( - api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY + api_key="YOUR_API_KEY", # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY ) def text_stream(): @@ -189,7 +189,7 @@ import asyncio from elevenlabs.client import AsyncElevenLabs eleven = AsyncElevenLabs( - api_key="MY_API_KEY" # Defaults to ELEVEN_API_KEY + api_key="MY_API_KEY" # Defaults to ELEVEN_API_KEY or ELEVENLABS_API_KEY ) async def print_models() -> None: diff --git a/src/elevenlabs/base_client.py b/src/elevenlabs/base_client.py index a67c7e0e..dd5a5dbf 100644 --- a/src/elevenlabs/base_client.py +++ b/src/elevenlabs/base_client.py @@ -88,7 +88,7 @@ def __init__( *, base_url: typing.Optional[str] = None, environment: ElevenLabsEnvironment = ElevenLabsEnvironment.PRODUCTION, - api_key: typing.Optional[str] = os.getenv("ELEVEN_API_KEY"), + api_key: typing.Optional[str] = os.getenv("ELEVENLABS_API_KEY") or os.getenv("ELEVEN_API_KEY"), timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.Client] = None, @@ -167,7 +167,7 @@ def __init__( *, base_url: typing.Optional[str] = None, environment: ElevenLabsEnvironment = ElevenLabsEnvironment.PRODUCTION, - api_key: typing.Optional[str] = os.getenv("ELEVEN_API_KEY"), + api_key: typing.Optional[str] = os.getenv("ELEVENLABS_API_KEY") or os.getenv("ELEVEN_API_KEY"), timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.AsyncClient] = None, diff --git a/src/elevenlabs/client.py b/src/elevenlabs/client.py index f4f66bce..75fd3abc 100644 --- a/src/elevenlabs/client.py +++ b/src/elevenlabs/client.py @@ -68,7 +68,7 @@ def __init__( *, base_url: typing.Optional[str] = None, environment: ElevenLabsEnvironment = ElevenLabsEnvironment.PRODUCTION, - api_key: typing.Optional[str] = os.getenv("ELEVEN_API_KEY"), + api_key: typing.Optional[str] = os.getenv("ELEVENLABS_API_KEY") or os.getenv("ELEVEN_API_KEY"), timeout: typing.Optional[float] = 60, httpx_client: typing.Optional[httpx.Client] = None ): From 95bd48ea9119dffadb2f5d35683d11ff3143436b Mon Sep 17 00:00:00 2001 From: Ediz Ferit Kula <146680748+acse-efk23@users.noreply.github.com> Date: Tue, 17 Dec 2024 13:50:41 +0000 Subject: [PATCH 38/45] fix: move class-level attributes to instance-level in Conversation class (#418) --- src/elevenlabs/conversational_ai/conversation.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py index b164674e..b3196b5d 100644 --- a/src/elevenlabs/conversational_ai/conversation.py +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -73,10 +73,10 @@ class Conversation: callback_user_transcript: Optional[Callable[[str], None]] callback_latency_measurement: Optional[Callable[[int], None]] - _thread: Optional[threading.Thread] = None + _thread: Optional[threading.Thread] _should_stop: threading.Event - _conversation_id: Optional[str] = None - _last_interrupt_id: int = 0 + _conversation_id: Optional[str] + _last_interrupt_id: int def __init__( self, @@ -119,7 +119,11 @@ def __init__( self.callback_agent_response_correction = callback_agent_response_correction self.callback_user_transcript = callback_user_transcript self.callback_latency_measurement = callback_latency_measurement + + self._thread = None self._should_stop = threading.Event() + self._conversation_id = None + self._last_interrupt_id = 0 def start_session(self): """Starts the conversation session. From 97d77ed98adb686f8a20e69536fd527d3c469f58 Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Tue, 17 Dec 2024 13:20:00 +0000 Subject: [PATCH 39/45] chore: add basic mock --- .gitignore | 1 + tests/e2e_test_convai.py | 0 tests/test_convai.py | 118 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 tests/e2e_test_convai.py create mode 100644 tests/test_convai.py diff --git a/.gitignore b/.gitignore index 0da665fe..83bacf16 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ dist/ __pycache__/ poetry.toml .ruff_cache/ +.DS_Store diff --git a/tests/e2e_test_convai.py b/tests/e2e_test_convai.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_convai.py b/tests/test_convai.py new file mode 100644 index 00000000..076daf9a --- /dev/null +++ b/tests/test_convai.py @@ -0,0 +1,118 @@ +from unittest.mock import MagicMock, patch +from elevenlabs.conversational_ai.conversation import Conversation, AudioInterface +import json +import time + + +class MockAudioInterface(AudioInterface): + def start(self, input_callback): + print("Audio interface started") + self.input_callback = input_callback + + def stop(self): + print("Audio interface stopped") + + def output(self, audio): + print(f"Would play audio of length: {len(audio)} bytes") + + def interrupt(self): + print("Audio interrupted") + + +# Add test constants and helpers at module level +TEST_CONVERSATION_ID = "test123" +TEST_AGENT_ID = "test_agent" + + +def create_mock_websocket(messages=None): + """Helper to create a mock websocket with predefined responses""" + mock_ws = MagicMock() + + if messages is None: + messages = [ + { + "type": "conversation_initiation_metadata", + "conversation_initiation_metadata_event": {"conversation_id": TEST_CONVERSATION_ID}, + }, + {"type": "agent_response", "agent_response_event": {"agent_response": "Hello there!"}}, + ] + + def response_generator(): + for msg in messages: + yield json.dumps(msg) + while True: + yield '{"type": "keep_alive"}' + + mock_ws.recv = MagicMock(side_effect=response_generator()) + return mock_ws + + +def test_conversation_basic_flow(): + # Mock setup + mock_ws = create_mock_websocket() + mock_client = MagicMock() + agent_response_callback = MagicMock() + + # Setup the conversation + conversation = Conversation( + client=mock_client, + agent_id=TEST_AGENT_ID, + requires_auth=False, + audio_interface=MockAudioInterface(), + callback_agent_response=agent_response_callback, + ) + + # Run the test + with patch("elevenlabs.conversational_ai.conversation.connect") as mock_connect: + mock_connect.return_value.__enter__.return_value = mock_ws + conversation.start_session() + + # Add a wait for the callback to be called + timeout = 5 # 5 seconds timeout + start_time = time.time() + while not agent_response_callback.called and time.time() - start_time < timeout: + time.sleep(0.1) + + conversation.end_session() + conversation.wait_for_session_end() + + # Assertions + expected_init_message = { + "type": "conversation_initiation_client_data", + "custom_llm_extra_body": {}, + "conversation_config_override": {}, + } + mock_ws.send.assert_any_call(json.dumps(expected_init_message)) + agent_response_callback.assert_called_once_with("Hello there!") + assert conversation._conversation_id == TEST_CONVERSATION_ID + + +def test_conversation_with_auth(): + # Mock setup + mock_client = MagicMock() + mock_client.conversational_ai.get_signed_url.return_value.signed_url = "wss://signed.url" + mock_ws = create_mock_websocket( + [ + { + "type": "conversation_initiation_metadata", + "conversation_initiation_metadata_event": {"conversation_id": TEST_CONVERSATION_ID}, + } + ] + ) + + conversation = Conversation( + client=mock_client, + agent_id=TEST_AGENT_ID, + requires_auth=True, + audio_interface=MockAudioInterface(), + ) + + # Run the test + with patch("elevenlabs.conversational_ai.conversation.connect") as mock_connect: + mock_connect.return_value.__enter__.return_value = mock_ws + conversation.start_session() + conversation.end_session() + conversation.wait_for_session_end() + + # Assertions + mock_client.conversational_ai.get_signed_url.assert_called_once_with(agent_id=TEST_AGENT_ID) From 57f85b6f0840823a55cbc7ab041b0fd60517c3dd Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Tue, 17 Dec 2024 14:58:31 +0000 Subject: [PATCH 40/45] feat: add support for client-tools (Python SDK) --- .../conversational_ai/conversation.py | 147 ++++++++++++++++-- tests/e2e_test_convai.py | 79 ++++++++++ 2 files changed, 216 insertions(+), 10 deletions(-) diff --git a/src/elevenlabs/conversational_ai/conversation.py b/src/elevenlabs/conversational_ai/conversation.py index b3196b5d..5678e634 100644 --- a/src/elevenlabs/conversational_ai/conversation.py +++ b/src/elevenlabs/conversational_ai/conversation.py @@ -2,7 +2,9 @@ import base64 import json import threading -from typing import Callable, Optional +from typing import Callable, Optional, Awaitable, Union, Any +import asyncio +from concurrent.futures import ThreadPoolExecutor from websockets.sync.client import connect @@ -52,8 +54,117 @@ def interrupt(self): """ pass + +class ClientTools: + """Handles registration and execution of client-side tools that can be called by the agent. + + Supports both synchronous and asynchronous tools running in a dedicated event loop, + ensuring non-blocking operation of the main conversation thread. + """ + + def __init__(self): + self.tools: dict[str, tuple[Union[Callable[[dict], Any], Callable[[dict], Awaitable[Any]]], bool]] = {} + self.lock = threading.Lock() + self._loop = None + self._thread = None + self._running = threading.Event() + self.thread_pool = ThreadPoolExecutor() + + def start(self): + """Start the event loop in a separate thread for handling async operations.""" + if self._running.is_set(): + return + + def run_event_loop(): + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + self._running.set() + try: + self._loop.run_forever() + finally: + self._running.clear() + self._loop.close() + self._loop = None + + self._thread = threading.Thread(target=run_event_loop, daemon=True, name="ClientTools-EventLoop") + self._thread.start() + # Wait for loop to be ready + self._running.wait() + + def stop(self): + """Gracefully stop the event loop and clean up resources.""" + if self._loop and self._running.is_set(): + self._loop.call_soon_threadsafe(self._loop.stop) + self._thread.join() + self.thread_pool.shutdown(wait=False) + + def register( + self, + tool_name: str, + handler: Union[Callable[[dict], Any], Callable[[dict], Awaitable[Any]]], + is_async: bool = False, + ) -> None: + """Register a new tool that can be called by the AI agent. + + Args: + tool_name: Unique identifier for the tool + handler: Function that implements the tool's logic + is_async: Whether the handler is an async function + """ + with self.lock: + if not callable(handler): + raise ValueError("Handler must be callable") + if tool_name in self.tools: + raise ValueError(f"Tool '{tool_name}' is already registered") + self.tools[tool_name] = (handler, is_async) + + async def handle(self, tool_name: str, parameters: dict) -> Any: + """Execute a registered tool with the given parameters. + + Returns the result of the tool execution. + """ + with self.lock: + if tool_name not in self.tools: + raise ValueError(f"Tool '{tool_name}' is not registered") + handler, is_async = self.tools[tool_name] + + if is_async: + return await handler(parameters) + else: + return await asyncio.get_event_loop().run_in_executor(self.thread_pool, handler, parameters) + + def execute_tool(self, tool_name: str, parameters: dict, callback: Callable[[dict], None]): + """Execute a tool and send its result via the provided callback. + + This method is non-blocking and handles both sync and async tools. + """ + if not self._running.is_set(): + raise RuntimeError("ClientTools event loop is not running") + + async def _execute_and_callback(): + try: + result = await self.handle(tool_name, parameters) + response = { + "type": "client_tool_result", + "tool_call_id": parameters.get("tool_call_id"), + "result": result or f"Client tool: {tool_name} called successfully.", + "is_error": False, + } + except Exception as e: + response = { + "type": "client_tool_result", + "tool_call_id": parameters.get("tool_call_id"), + "result": str(e), + "is_error": True, + } + callback(response) + + asyncio.run_coroutine_threadsafe(_execute_and_callback(), self._loop) + + class ConversationConfig: """Configuration options for the Conversation.""" + def __init__( self, extra_body: Optional[dict] = None, @@ -61,13 +172,15 @@ def __init__( ): self.extra_body = extra_body or {} self.conversation_config_override = conversation_config_override or {} - + + class Conversation: client: BaseElevenLabs agent_id: str requires_auth: bool config: ConversationConfig audio_interface: AudioInterface + client_tools: Optional[ClientTools] callback_agent_response: Optional[Callable[[str], None]] callback_agent_response_correction: Optional[Callable[[str, str], None]] callback_user_transcript: Optional[Callable[[str], None]] @@ -86,7 +199,7 @@ def __init__( requires_auth: bool, audio_interface: AudioInterface, config: Optional[ConversationConfig] = None, - + client_tools: Optional[ClientTools] = None, callback_agent_response: Optional[Callable[[str], None]] = None, callback_agent_response_correction: Optional[Callable[[str, str], None]] = None, callback_user_transcript: Optional[Callable[[str], None]] = None, @@ -101,6 +214,7 @@ def __init__( agent_id: The ID of the agent to converse with. requires_auth: Whether the agent requires authentication. audio_interface: The audio interface to use for input and output. + client_tools: The client tools to use for the conversation. callback_agent_response: Callback for agent responses. callback_agent_response_correction: Callback for agent response corrections. First argument is the original response (previously given to @@ -112,14 +226,16 @@ def __init__( self.client = client self.agent_id = agent_id self.requires_auth = requires_auth - self.audio_interface = audio_interface self.callback_agent_response = callback_agent_response self.config = config or ConversationConfig() + self.client_tools = client_tools or ClientTools() self.callback_agent_response_correction = callback_agent_response_correction self.callback_user_transcript = callback_user_transcript self.callback_latency_measurement = callback_latency_measurement + self.client_tools.start() + self._thread = None self._should_stop = threading.Event() self._conversation_id = None @@ -135,8 +251,9 @@ def start_session(self): self._thread.start() def end_session(self): - """Ends the conversation session.""" + """Ends the conversation session and cleans up resources.""" self.audio_interface.stop() + self.client_tools.stop() self._should_stop.set() def wait_for_session_end(self) -> Optional[str]: @@ -155,10 +272,10 @@ def _run(self, ws_url: str): with connect(ws_url) as ws: ws.send( json.dumps( - { - "type": "conversation_initiation_client_data", - "custom_llm_extra_body": self.config.extra_body, - "conversation_config_override": self.config.conversation_config_override, + { + "type": "conversation_initiation_client_data", + "custom_llm_extra_body": self.config.extra_body, + "conversation_config_override": self.config.conversation_config_override, } ) ) @@ -210,7 +327,7 @@ def _handle_message(self, message, ws): self.callback_user_transcript(event["user_transcript"].strip()) elif message["type"] == "interruption": event = message["interruption_event"] - self.last_interrupt_id = int(event["event_id"]) + self._last_interrupt_id = int(event["event_id"]) self.audio_interface.interrupt() elif message["type"] == "ping": event = message["ping_event"] @@ -224,6 +341,16 @@ def _handle_message(self, message, ws): ) if self.callback_latency_measurement and event["ping_ms"]: self.callback_latency_measurement(int(event["ping_ms"])) + elif message["type"] == "client_tool_call": + tool_call = message.get("client_tool_call", {}) + tool_name = tool_call.get("tool_name") + parameters = {"tool_call_id": tool_call["tool_call_id"], **tool_call.get("parameters", {})} + + def send_response(response): + if not self._should_stop.is_set(): + ws.send(json.dumps(response)) + + self.client_tools.execute_tool(tool_name, parameters, send_response) else: pass # Ignore all other message types. diff --git a/tests/e2e_test_convai.py b/tests/e2e_test_convai.py index e69de29b..a0e62641 100644 --- a/tests/e2e_test_convai.py +++ b/tests/e2e_test_convai.py @@ -0,0 +1,79 @@ +import os +import time +import asyncio + +import pytest +from elevenlabs import ElevenLabs +from elevenlabs.conversational_ai.conversation import Conversation, ClientTools +from elevenlabs.conversational_ai.default_audio_interface import DefaultAudioInterface + + +@pytest.mark.skipif(os.getenv("CI") == "true", reason="Skip live conversation test in CI environment") +def test_live_conversation(): + """Test a live conversation with actual audio I/O""" + + api_key = os.getenv("ELEVENLABS_API_KEY") + if not api_key: + raise ValueError("ELEVENLABS_API_KEY environment variable missing.") + + agent_id = os.getenv("AGENT_ID") + if not api_key or not agent_id: + raise ValueError("AGENT_ID environment variable missing.") + + client = ElevenLabs(api_key=api_key) + + # Create conversation handlers + def on_agent_response(text: str): + print(f"Agent: {text}") + + def on_user_transcript(text: str): + print(f"You: {text}") + + def on_latency(ms: int): + print(f"Latency: {ms}ms") + + # Initialize client tools + client_tools = ClientTools() + + def test(parameters): + print("Sync tool called with parameters:", parameters) + return "Tool called successfully" + + async def test_async(parameters): + # Simulate some async work + await asyncio.sleep(10) + print("Async tool called with parameters:", parameters) + return "Tool called successfully" + + client_tools.register("test", test) + client_tools.register("test_async", test_async, is_async=True) + + # Initialize conversation + conversation = Conversation( + client=client, + agent_id=agent_id, + requires_auth=False, + audio_interface=DefaultAudioInterface(), + callback_agent_response=on_agent_response, + callback_user_transcript=on_user_transcript, + callback_latency_measurement=on_latency, + client_tools=client_tools, + ) + + # Start the conversation + conversation.start_session() + + # Let it run for 100 seconds + time.sleep(100) + + # End the conversation + conversation.end_session() + conversation.wait_for_session_end() + + # Get the conversation ID for reference + conversation_id = conversation._conversation_id + print(f"Conversation ID: {conversation_id}") + + +if __name__ == "__main__": + test_live_conversation() From 228f8f62070b38ee22a54bda4c20cd5ee0e953b0 Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Wed, 18 Dec 2024 09:47:28 +0000 Subject: [PATCH 41/45] chore: manual bump to 1.50.3 --- pyproject.toml | 2 +- src/elevenlabs/core/client_wrapper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9d0acfa9..8fd4caa7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.50.2" +version = "1.50.3" description = "" readme = "README.md" authors = [] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index dfbcbfcc..2a164bdb 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.50.2", + "X-Fern-SDK-Version": "1.50.3", } if self._api_key is not None: headers["xi-api-key"] = self._api_key From 7406b724f505008718dd11322b945d100b916026 Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Wed, 18 Dec 2024 09:53:54 +0000 Subject: [PATCH 42/45] chore: update .fernignore w/ manual wrappers --- .fernignore | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.fernignore b/.fernignore index 21514ae5..810ae01d 100644 --- a/.fernignore +++ b/.fernignore @@ -12,7 +12,15 @@ src/elevenlabs/realtime_tts.py README.md assets/ +# Ignore custom tests tests/ .github/ISSUE_TEMPLATE/ .github/SECURITY.md + +# Ignore manually created Conversational AI wrappers +src/elevenlabs/conversational_ai/conversation.py +src/elevenlabs/conversational_ai/default_audio_interface.py + + + From b50fec7b2ff487df523ac640a6560c41d500ba51 Mon Sep 17 00:00:00 2001 From: Louis Jordan Date: Wed, 18 Dec 2024 09:57:47 +0000 Subject: [PATCH 43/45] chore: simplify .fernignore --- .fernignore | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.fernignore b/.fernignore index 810ae01d..a222f8ab 100644 --- a/.fernignore +++ b/.fernignore @@ -1,13 +1,14 @@ # Specify files that shouldn't be modified by Fern +# Ignore manually created SDK wrappers src/elevenlabs/client.py src/elevenlabs/conversational_ai/conversation.py src/elevenlabs/conversational_ai/default_audio_interface.py src/elevenlabs/play.py src/elevenlabs/realtime_tts.py -.github/workflows/ci.yml -.github/workflows/tests.yml +# Ignore CI files +.github/ README.md assets/ @@ -15,12 +16,8 @@ assets/ # Ignore custom tests tests/ -.github/ISSUE_TEMPLATE/ -.github/SECURITY.md -# Ignore manually created Conversational AI wrappers -src/elevenlabs/conversational_ai/conversation.py -src/elevenlabs/conversational_ai/default_audio_interface.py + From 9f05f9dfe972ec9afa15da638f8fff9d35ea5d1a Mon Sep 17 00:00:00 2001 From: Satendra Rai Date: Fri, 10 Jan 2025 14:51:42 +0530 Subject: [PATCH 44/45] Update pyproject.toml update tag version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c9d0bb26..0898d49b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "elevenlabs" -version = "1.9.0" +version = "1.0.1-beta" description = "" readme = "README.md" authors = [] From e6f307ab667d39adc2346eb6077dbda601bd54c6 Mon Sep 17 00:00:00 2001 From: Satendra Rai Date: Fri, 10 Jan 2025 18:15:14 +0530 Subject: [PATCH 45/45] Update pyproject.toml project name added --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 8fd4caa7..a4d95ad1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,6 @@ +[project] +name = "elevenlabs" + [tool.poetry] name = "elevenlabs" version = "1.50.3"