From c2fcc4f5ac8d02f28e50e29077f7410e59ff2606 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 26 Mar 2026 13:41:17 +0000 Subject: [PATCH] SDK regeneration --- .fern/metadata.json | 7 +- poetry.lock | 430 +++++----- pyproject.toml | 2 +- reference.md | 802 +++++++++++------- src/cohere/__init__.py | 4 +- src/cohere/audio/__init__.py | 38 + src/cohere/audio/client.py | 63 ++ src/cohere/audio/raw_client.py | 13 + src/cohere/audio/transcriptions/__init__.py | 34 + src/cohere/audio/transcriptions/client.py | 156 ++++ src/cohere/audio/transcriptions/raw_client.py | 439 ++++++++++ .../audio/transcriptions/types/__init__.py | 34 + .../audio_transcriptions_create_response.py | 22 + src/cohere/base_client.py | 30 + src/cohere/batches/raw_client.py | 34 + src/cohere/connectors/raw_client.py | 50 ++ src/cohere/core/__init__.py | 22 +- src/cohere/core/client_wrapper.py | 19 +- src/cohere/core/datetime_utils.py | 42 + src/cohere/core/http_client.py | 195 ++++- src/cohere/core/logging.py | 107 +++ src/cohere/core/parse_error.py | 36 + src/cohere/datasets/raw_client.py | 42 + src/cohere/embed_jobs/raw_client.py | 34 + src/cohere/finetuning/raw_client.py | 58 ++ src/cohere/models/raw_client.py | 18 + src/cohere/raw_base_client.py | 102 +++ src/cohere/v2/raw_client.py | 40 + 28 files changed, 2317 insertions(+), 556 deletions(-) create mode 100644 src/cohere/audio/__init__.py create mode 100644 src/cohere/audio/client.py create mode 100644 src/cohere/audio/raw_client.py create mode 100644 src/cohere/audio/transcriptions/__init__.py create mode 100644 src/cohere/audio/transcriptions/client.py create mode 100644 src/cohere/audio/transcriptions/raw_client.py create mode 100644 src/cohere/audio/transcriptions/types/__init__.py create mode 100644 src/cohere/audio/transcriptions/types/audio_transcriptions_create_response.py create mode 100644 src/cohere/core/logging.py create mode 100644 src/cohere/core/parse_error.py diff --git a/.fern/metadata.json b/.fern/metadata.json index 030591d3c..848b691f3 100644 --- a/.fern/metadata.json +++ b/.fern/metadata.json @@ -1,7 +1,7 @@ { - "cliVersion": "3.86.0", + "cliVersion": "4.23.0", "generatorName": "fernapi/fern-python-sdk", - "generatorVersion": "4.55.4", + "generatorVersion": "4.63.4", "generatorConfig": { "pyproject_python_version": "^3.9", "inline_request_params": false, @@ -79,5 +79,6 @@ } ] }, - "sdkVersion": "5.20.7" + "originGitCommit": "7ffcb80a5edaf952524b88ad8960151e148ff39b", + "sdkVersion": "5.21.0" } \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index b77572c90..b4f355168 100644 --- a/poetry.lock +++ b/poetry.lock @@ -43,135 +43,151 @@ trio = ["trio (>=0.31.0)", "trio (>=0.32.0)"] [[package]] name = "certifi" -version = "2026.1.4" +version = "2026.2.25" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" files = [ - {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, - {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, + {file = "certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa"}, + {file = "certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7"}, ] [[package]] name = "charset-normalizer" -version = "3.4.4" +version = "3.4.6" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d"}, - {file = "charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016"}, - {file = "charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525"}, - {file = "charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14"}, - {file = "charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c"}, - {file = "charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-win32.whl", hash = "sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa"}, - {file = "charset_normalizer-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966"}, - {file = "charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50"}, - {file = "charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f"}, - {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2e1d8ca8611099001949d1cdfaefc510cf0f212484fe7c565f735b68c78c3c95"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e25369dc110d58ddf29b949377a93e0716d72a24f62bad72b2b39f155949c1fd"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:259695e2ccc253feb2a016303543d691825e920917e31f894ca1a687982b1de4"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dda86aba335c902b6149a02a55b38e96287157e609200811837678214ba2b1db"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51fb3c322c81d20567019778cb5a4a6f2dc1c200b886bc0d636238e364848c89"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:4482481cb0572180b6fd976a4d5c72a30263e98564da68b86ec91f0fe35e8565"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:39f5068d35621da2881271e5c3205125cc456f54e9030d3f723288c873a71bf9"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8bea55c4eef25b0b19a0337dc4e3f9a15b00d569c77211fa8cde38684f234fb7"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:f0cdaecd4c953bfae0b6bb64910aaaca5a424ad9c72d85cb88417bb9814f7550"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:150b8ce8e830eb7ccb029ec9ca36022f756986aaaa7956aad6d9ec90089338c0"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e68c14b04827dd76dcbd1aeea9e604e3e4b78322d8faf2f8132c7138efa340a8"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3778fd7d7cd04ae8f54651f4a7a0bd6e39a0cf20f801720a4c21d80e9b7ad6b0"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dad6e0f2e481fffdcf776d10ebee25e0ef89f16d691f1e5dee4b586375fdc64b"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win32.whl", hash = "sha256:74a2e659c7ecbc73562e2a15e05039f1e22c75b7c7618b4b574a3ea9118d1557"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win_amd64.whl", hash = "sha256:aa9cccf4a44b9b62d8ba8b4dd06c649ba683e4bf04eea606d2e94cfc2d6ff4d6"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win_arm64.whl", hash = "sha256:e985a16ff513596f217cee86c21371b8cd011c0f6f056d0920aa2d926c544058"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:82060f995ab5003a2d6e0f4ad29065b7672b6593c8c63559beefe5b443242c3e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60c74963d8350241a79cb8feea80e54d518f72c26db618862a8f53e5023deaf9"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6e4333fb15c83f7d1482a76d45a0818897b3d33f00efd215528ff7c51b8e35d"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bc72863f4d9aba2e8fd9085e63548a324ba706d2ea2c83b260da08a59b9482de"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9cc4fc6c196d6a8b76629a70ddfcd4635a6898756e2d9cac5565cf0654605d73"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:0c173ce3a681f309f31b87125fecec7a5d1347261ea11ebbb856fa6006b23c8c"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c907cdc8109f6c619e6254212e794d6548373cc40e1ec75e6e3823d9135d29cc"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:404a1e552cf5b675a87f0651f8b79f5f1e6fd100ee88dc612f89aa16abd4486f"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e3c701e954abf6fc03a49f7c579cc80c2c6cc52525340ca3186c41d3f33482ef"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7a6967aaf043bceabab5412ed6bd6bd26603dae84d5cb75bf8d9a74a4959d398"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5feb91325bbceade6afab43eb3b508c63ee53579fe896c77137ded51c6b6958e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f820f24b09e3e779fe84c3c456cb4108a7aa639b0d1f02c28046e11bfcd088ed"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b35b200d6a71b9839a46b9b7fff66b6638bb52fc9658aa58796b0326595d3021"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win32.whl", hash = "sha256:9ca4c0b502ab399ef89248a2c84c54954f77a070f28e546a85e91da627d1301e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win_amd64.whl", hash = "sha256:a9e68c9d88823b274cf1e72f28cb5dc89c990edf430b0bfd3e2fb0785bfeabf4"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win_arm64.whl", hash = "sha256:97d0235baafca5f2b09cf332cc275f021e694e8362c6bb9c96fc9a0eb74fc316"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:659a1e1b500fac8f2779dd9e1570464e012f43e580371470b45277a27baa7532"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f61aa92e4aad0be58eb6eb4e0c21acf32cf8065f4b2cae5665da756c4ceef982"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f50498891691e0864dc3da965f340fada0771f6142a378083dc4608f4ea513e2"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bf625105bb9eef28a56a943fec8c8a98aeb80e7d7db99bd3c388137e6eb2d237"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2bd9d128ef93637a5d7a6af25363cf5dec3fa21cf80e68055aad627f280e8afa"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_armv7l.whl", hash = "sha256:d08ec48f0a1c48d75d0356cea971921848fb620fdeba805b28f937e90691209f"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1ed80ff870ca6de33f4d953fda4d55654b9a2b340ff39ab32fa3adbcd718f264"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f98059e4fcd3e3e4e2d632b7cf81c2faae96c43c60b569e9c621468082f1d104"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:ab30e5e3e706e3063bc6de96b118688cb10396b70bb9864a430f67df98c61ecc"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:d5f5d1e9def3405f60e3ca8232d56f35c98fb7bf581efcc60051ebf53cb8b611"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:461598cd852bfa5a61b09cae2b1c02e2efcd166ee5516e243d540ac24bfa68a7"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:71be7e0e01753a89cf024abf7ecb6bca2c81738ead80d43004d9b5e3f1244e64"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:df01808ee470038c3f8dc4f48620df7225c49c2d6639e38f96e6d6ac6e6f7b0e"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-win32.whl", hash = "sha256:69dd852c2f0ad631b8b60cfbe25a28c0058a894de5abb566619c205ce0550eae"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-win_amd64.whl", hash = "sha256:517ad0e93394ac532745129ceabdf2696b609ec9f87863d337140317ebce1c14"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31215157227939b4fb3d740cd23fe27be0439afef67b785a1eb78a3ae69cba9e"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecbbd45615a6885fe3240eb9db73b9e62518b611850fdf8ab08bd56de7ad2b17"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c45a03a4c69820a399f1dda9e1d8fbf3562eda46e7720458180302021b08f778"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e8aeb10fcbe92767f0fa69ad5a72deca50d0dca07fbde97848997d778a50c9fe"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:54fae94be3d75f3e573c9a1b5402dc593de19377013c9a0e4285e3d402dd3a2a"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:2f7fdd9b6e6c529d6a2501a2d36b240109e78a8ceaef5687cfcfa2bbe671d297"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4d1d02209e06550bdaef34af58e041ad71b88e624f5d825519da3a3308e22687"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8bc5f0687d796c05b1e28ab0d38a50e6309906ee09375dd3aff6a9c09dd6e8f4"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ee4ec14bc1680d6b0afab9aea2ef27e26d2024f18b24a2d7155a52b60da7e833"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d1a2ee9c1499fc8f86f4521f27a973c914b211ffa87322f4ee33bb35392da2c5"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:48696db7f18afb80a068821504296eb0787d9ce239b91ca15059d1d3eaacf13b"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4f41da960b196ea355357285ad1316a00099f22d0929fe168343b99b254729c9"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:802168e03fba8bbc5ce0d866d589e4b1ca751d06edee69f7f3a19c5a9fe6b597"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win32.whl", hash = "sha256:8761ac29b6c81574724322a554605608a9960769ea83d2c73e396f3df896ad54"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win_amd64.whl", hash = "sha256:1cf0a70018692f85172348fe06d3a4b63f94ecb055e13a00c644d368eb82e5b8"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win_arm64.whl", hash = "sha256:3516bbb8d42169de9e61b8520cbeeeb716f12f4ecfe3fd30a9919aa16c806ca8"}, + {file = "charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69"}, + {file = "charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6"}, ] [[package]] @@ -368,33 +384,36 @@ files = [ [[package]] name = "hf-xet" -version = "1.3.0" +version = "1.4.2" description = "Fast transfer of large files with the Hugging Face Hub." optional = false python-versions = ">=3.8" files = [ - {file = "hf_xet-1.3.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:95bdeab4747cb45f855601e39b9e86ae92b4a114978ada6e0401961fcc5d2958"}, - {file = "hf_xet-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f99992583f27b139392601fe99e88df155dc4de7feba98ed27ce2d3e6b4a65bb"}, - {file = "hf_xet-1.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:687a71fc6d2eaa79d864da3aa13e5d887e124d357f5f306bfff6c385eea9d990"}, - {file = "hf_xet-1.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:75d19813ed0e24525409bc22566282ae9bc93e5d764b185565e863dc28280a45"}, - {file = "hf_xet-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:078af43569c2e05233137a93a33d2293f95c272745eaf030a9bb5f27bb0c9e9c"}, - {file = "hf_xet-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be8731e1620cc8549025c39ed3917c8fd125efaeae54ae679214a3d573e6c109"}, - {file = "hf_xet-1.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:1552616c0e0fa728a4ffdffa106e91faa0fd4edb44868e79b464fad00b2758ee"}, - {file = "hf_xet-1.3.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:a61496eccf412d7c51a5613c31a2051d357ddea6be53a0672c7644cf39bfefe9"}, - {file = "hf_xet-1.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:aba35218871cc438826076778958f7ab2a1f4f8d654e91c307073a815360558f"}, - {file = "hf_xet-1.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c444d8f657dedd7a72aa0ef0178fe01fe92b04b58014ee49e2b3b4985aea1529"}, - {file = "hf_xet-1.3.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:6d1bbda7900d72bc591cd39a64e35ad07f89a24f90e3d7b7c692cb93a1926cde"}, - {file = "hf_xet-1.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:588f5df302e7dba5c3b60d4e5c683f95678526c29b9f64cbeb23e9f1889c6b83"}, - {file = "hf_xet-1.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:944ae454b296c42b18219c37f245c78d0e64a734057423e9309f4938faa85d7f"}, - {file = "hf_xet-1.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:34cdd5f10e61b7a1a7542672d20887c85debcfeb70a471ff1506f5a4c9441e42"}, - {file = "hf_xet-1.3.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:df4447f69086dcc6418583315eda6ed09033ac1fbbc784fedcbbbdf67bea1680"}, - {file = "hf_xet-1.3.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:39f4fe714628adc2214ab4a67391182ee751bc4db581868cb3204900817758a8"}, - {file = "hf_xet-1.3.0-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9b16e53ed6b5c8197cefb3fd12047a430b7034428effed463c03cec68de7e9a3"}, - {file = "hf_xet-1.3.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:92051a1f73019489be77f6837671024ec785a3d1b888466b09d3a9ea15c4a1b5"}, - {file = "hf_xet-1.3.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:943046b160e7804a85e68a659d2eee1a83ce3661f72d1294d3cc5ece0f45a355"}, - {file = "hf_xet-1.3.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9b798a95d41b4f33b0b455c8aa76ff1fd26a587a4dd3bdec29f0a37c60b78a2f"}, - {file = "hf_xet-1.3.0-cp37-abi3-win_amd64.whl", hash = "sha256:227eee5b99d19b9f20c31d901a0c2373af610a24a34e6c2701072c9de48d6d95"}, - {file = "hf_xet-1.3.0.tar.gz", hash = "sha256:9c154ad63e17aca970987b2cf17dbd8a0c09bb18aeb246f637647a8058e4522b"}, + {file = "hf_xet-1.4.2-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:ac8202ae1e664b2c15cdfc7298cbb25e80301ae596d602ef7870099a126fcad4"}, + {file = "hf_xet-1.4.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6d2f8ee39fa9fba9af929f8c0d0482f8ee6e209179ad14a909b6ad78ffcb7c81"}, + {file = "hf_xet-1.4.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4642a6cf249c09da8c1f87fe50b24b2a3450b235bf8adb55700b52f0ea6e2eb6"}, + {file = "hf_xet-1.4.2-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:769431385e746c92dc05492dde6f687d304584b89c33d79def8367ace06cb555"}, + {file = "hf_xet-1.4.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c9dd1c1bc4cc56168f81939b0e05b4c36dd2d28c13dc1364b17af89aa0082496"}, + {file = "hf_xet-1.4.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fca58a2ae4e6f6755cc971ac6fcdf777ea9284d7e540e350bb000813b9a3008d"}, + {file = "hf_xet-1.4.2-cp313-cp313t-win_amd64.whl", hash = "sha256:163aab46854ccae0ab6a786f8edecbbfbaa38fcaa0184db6feceebf7000c93c0"}, + {file = "hf_xet-1.4.2-cp313-cp313t-win_arm64.whl", hash = "sha256:09b138422ecbe50fd0c84d4da5ff537d27d487d3607183cd10e3e53f05188e82"}, + {file = "hf_xet-1.4.2-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:949dcf88b484bb9d9276ca83f6599e4aa03d493c08fc168c124ad10b2e6f75d7"}, + {file = "hf_xet-1.4.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:41659966020d59eb9559c57de2cde8128b706a26a64c60f0531fa2318f409418"}, + {file = "hf_xet-1.4.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5c588e21d80010119458dd5d02a69093f0d115d84e3467efe71ffb2c67c19146"}, + {file = "hf_xet-1.4.2-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:a296744d771a8621ad1d50c098d7ab975d599800dae6d48528ba3944e5001ba0"}, + {file = "hf_xet-1.4.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f563f7efe49588b7d0629d18d36f46d1658fe7e08dce3fa3d6526e1c98315e2d"}, + {file = "hf_xet-1.4.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5b2e0132c56d7ee1bf55bdb638c4b62e7106f6ac74f0b786fed499d5548c5570"}, + {file = "hf_xet-1.4.2-cp314-cp314t-win_amd64.whl", hash = "sha256:2f45c712c2fa1215713db10df6ac84b49d0e1c393465440e9cb1de73ecf7bbf6"}, + {file = "hf_xet-1.4.2-cp314-cp314t-win_arm64.whl", hash = "sha256:6d53df40616f7168abfccff100d232e9d460583b9d86fa4912c24845f192f2b8"}, + {file = "hf_xet-1.4.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:71f02d6e4cdd07f344f6844845d78518cc7186bd2bc52d37c3b73dc26a3b0bc5"}, + {file = "hf_xet-1.4.2-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:e9b38d876e94d4bdcf650778d6ebbaa791dd28de08db9736c43faff06ede1b5a"}, + {file = "hf_xet-1.4.2-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:77e8c180b7ef12d8a96739a4e1e558847002afe9ea63b6f6358b2271a8bdda1c"}, + {file = "hf_xet-1.4.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c3b3c6a882016b94b6c210957502ff7877802d0dbda8ad142c8595db8b944271"}, + {file = "hf_xet-1.4.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9d9a634cc929cfbaf2e1a50c0e532ae8c78fa98618426769480c58501e8c8ac2"}, + {file = "hf_xet-1.4.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6b0932eb8b10317ea78b7da6bab172b17be03bbcd7809383d8d5abd6a2233e04"}, + {file = "hf_xet-1.4.2-cp37-abi3-win_amd64.whl", hash = "sha256:ad185719fb2e8ac26f88c8100562dbf9dbdcc3d9d2add00faa94b5f106aea53f"}, + {file = "hf_xet-1.4.2-cp37-abi3-win_arm64.whl", hash = "sha256:32c012286b581f783653e718c1862aea5b9eb140631685bb0c5e7012c8719a87"}, + {file = "hf_xet-1.4.2.tar.gz", hash = "sha256:b7457b6b482d9e0743bd116363239b1fa904a5e65deede350fbc0c4ea67c71ea"}, ] [package.extras] @@ -447,36 +466,35 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "huggingface-hub" -version = "1.4.1" +version = "1.8.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.9.0" files = [ - {file = "huggingface_hub-1.4.1-py3-none-any.whl", hash = "sha256:9931d075fb7a79af5abc487106414ec5fba2c0ae86104c0c62fd6cae38873d18"}, - {file = "huggingface_hub-1.4.1.tar.gz", hash = "sha256:b41131ec35e631e7383ab26d6146b8d8972abc8b6309b963b306fbcca87f5ed5"}, + {file = "huggingface_hub-1.8.0-py3-none-any.whl", hash = "sha256:d3eb5047bd4e33c987429de6020d4810d38a5bef95b3b40df9b17346b7f353f2"}, + {file = "huggingface_hub-1.8.0.tar.gz", hash = "sha256:c5627b2fd521e00caf8eff4ac965ba988ea75167fad7ee72e17f9b7183ec63f3"}, ] [package.dependencies] -filelock = "*" +filelock = ">=3.10.0" fsspec = ">=2023.5.0" -hf-xet = {version = ">=1.2.0,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""} +hf-xet = {version = ">=1.4.2,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""} httpx = ">=0.23.0,<1" packaging = ">=20.9" pyyaml = ">=5.1" -shellingham = "*" tqdm = ">=4.42.1" -typer-slim = "*" +typer = "*" typing-extensions = ">=4.1.0" [package.extras] -all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] -dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "duckdb", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "duckdb", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -hf-xet = ["hf-xet (>=1.2.0,<2.0.0)"] +hf-xet = ["hf-xet (>=1.4.2,<2.0.0)"] mcp = ["mcp (>=1.8.0)"] oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "ruff (>=0.9.0)", "ty"] -testing = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "duckdb", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] @@ -1085,58 +1103,58 @@ testing = ["datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff", [[package]] name = "tomli" -version = "2.4.0" +version = "2.4.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867"}, - {file = "tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9"}, - {file = "tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95"}, - {file = "tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76"}, - {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d"}, - {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576"}, - {file = "tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a"}, - {file = "tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa"}, - {file = "tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614"}, - {file = "tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1"}, - {file = "tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8"}, - {file = "tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a"}, - {file = "tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1"}, - {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b"}, - {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51"}, - {file = "tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729"}, - {file = "tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da"}, - {file = "tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3"}, - {file = "tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0"}, - {file = "tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e"}, - {file = "tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4"}, - {file = "tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e"}, - {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c"}, - {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f"}, - {file = "tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86"}, - {file = "tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87"}, - {file = "tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132"}, - {file = "tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6"}, - {file = "tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc"}, - {file = "tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66"}, - {file = "tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d"}, - {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702"}, - {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8"}, - {file = "tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776"}, - {file = "tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475"}, - {file = "tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2"}, - {file = "tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9"}, - {file = "tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0"}, - {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df"}, - {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d"}, - {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f"}, - {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b"}, - {file = "tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087"}, - {file = "tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd"}, - {file = "tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4"}, - {file = "tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a"}, - {file = "tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c"}, + {file = "tomli-2.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f8f0fc26ec2cc2b965b7a3b87cd19c5c6b8c5e5f436b984e85f486d652285c30"}, + {file = "tomli-2.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ab97e64ccda8756376892c53a72bd1f964e519c77236368527f758fbc36a53a"}, + {file = "tomli-2.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96481a5786729fd470164b47cdb3e0e58062a496f455ee41b4403be77cb5a076"}, + {file = "tomli-2.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a881ab208c0baf688221f8cecc5401bd291d67e38a1ac884d6736cbcd8247e9"}, + {file = "tomli-2.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47149d5bd38761ac8be13a84864bf0b7b70bc051806bc3669ab1cbc56216b23c"}, + {file = "tomli-2.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ec9bfaf3ad2df51ace80688143a6a4ebc09a248f6ff781a9945e51937008fcbc"}, + {file = "tomli-2.4.1-cp311-cp311-win32.whl", hash = "sha256:ff2983983d34813c1aeb0fa89091e76c3a22889ee83ab27c5eeb45100560c049"}, + {file = "tomli-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:5ee18d9ebdb417e384b58fe414e8d6af9f4e7a0ae761519fb50f721de398dd4e"}, + {file = "tomli-2.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:c2541745709bad0264b7d4705ad453b76ccd191e64aa6f0fc66b69a293a45ece"}, + {file = "tomli-2.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c742f741d58a28940ce01d58f0ab2ea3ced8b12402f162f4d534dfe18ba1cd6a"}, + {file = "tomli-2.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7f86fd587c4ed9dd76f318225e7d9b29cfc5a9d43de44e5754db8d1128487085"}, + {file = "tomli-2.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ff18e6a727ee0ab0388507b89d1bc6a22b138d1e2fa56d1ad494586d61d2eae9"}, + {file = "tomli-2.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:136443dbd7e1dee43c68ac2694fde36b2849865fa258d39bf822c10e8068eac5"}, + {file = "tomli-2.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e262d41726bc187e69af7825504c933b6794dc3fbd5945e41a79bb14c31f585"}, + {file = "tomli-2.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5cb41aa38891e073ee49d55fbc7839cfdb2bc0e600add13874d048c94aadddd1"}, + {file = "tomli-2.4.1-cp312-cp312-win32.whl", hash = "sha256:da25dc3563bff5965356133435b757a795a17b17d01dbc0f42fb32447ddfd917"}, + {file = "tomli-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:52c8ef851d9a240f11a88c003eacb03c31fc1c9c4ec64a99a0f922b93874fda9"}, + {file = "tomli-2.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:f758f1b9299d059cc3f6546ae2af89670cb1c4d48ea29c3cacc4fe7de3058257"}, + {file = "tomli-2.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36d2bd2ad5fb9eaddba5226aa02c8ec3fa4f192631e347b3ed28186d43be6b54"}, + {file = "tomli-2.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:eb0dc4e38e6a1fd579e5d50369aa2e10acfc9cace504579b2faabb478e76941a"}, + {file = "tomli-2.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c7f2c7f2b9ca6bdeef8f0fa897f8e05085923eb091721675170254cbc5b02897"}, + {file = "tomli-2.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f3c6818a1a86dd6dca7ddcaaf76947d5ba31aecc28cb1b67009a5877c9a64f3f"}, + {file = "tomli-2.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d312ef37c91508b0ab2cee7da26ec0b3ed2f03ce12bd87a588d771ae15dcf82d"}, + {file = "tomli-2.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51529d40e3ca50046d7606fa99ce3956a617f9b36380da3b7f0dd3dd28e68cb5"}, + {file = "tomli-2.4.1-cp313-cp313-win32.whl", hash = "sha256:2190f2e9dd7508d2a90ded5ed369255980a1bcdd58e52f7fe24b8162bf9fedbd"}, + {file = "tomli-2.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:8d65a2fbf9d2f8352685bc1364177ee3923d6baf5e7f43ea4959d7d8bc326a36"}, + {file = "tomli-2.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:4b605484e43cdc43f0954ddae319fb75f04cc10dd80d830540060ee7cd0243cd"}, + {file = "tomli-2.4.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:fd0409a3653af6c147209d267a0e4243f0ae46b011aa978b1080359fddc9b6cf"}, + {file = "tomli-2.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a120733b01c45e9a0c34aeef92bf0cf1d56cfe81ed9d47d562f9ed591a9828ac"}, + {file = "tomli-2.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:559db847dc486944896521f68d8190be1c9e719fced785720d2216fe7022b662"}, + {file = "tomli-2.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01f520d4f53ef97964a240a035ec2a869fe1a37dde002b57ebc4417a27ccd853"}, + {file = "tomli-2.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7f94b27a62cfad8496c8d2513e1a222dd446f095fca8987fceef261225538a15"}, + {file = "tomli-2.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ede3e6487c5ef5d28634ba3f31f989030ad6af71edfb0055cbbd14189ff240ba"}, + {file = "tomli-2.4.1-cp314-cp314-win32.whl", hash = "sha256:3d48a93ee1c9b79c04bb38772ee1b64dcf18ff43085896ea460ca8dec96f35f6"}, + {file = "tomli-2.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:88dceee75c2c63af144e456745e10101eb67361050196b0b6af5d717254dddf7"}, + {file = "tomli-2.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:b8c198f8c1805dc42708689ed6864951fd2494f924149d3e4bce7710f8eb5232"}, + {file = "tomli-2.4.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:d4d8fe59808a54658fcc0160ecfb1b30f9089906c50b23bcb4c69eddc19ec2b4"}, + {file = "tomli-2.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7008df2e7655c495dd12d2a4ad038ff878d4ca4b81fccaf82b714e07eae4402c"}, + {file = "tomli-2.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1d8591993e228b0c930c4bb0db464bdad97b3289fb981255d6c9a41aedc84b2d"}, + {file = "tomli-2.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:734e20b57ba95624ecf1841e72b53f6e186355e216e5412de414e3c51e5e3c41"}, + {file = "tomli-2.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8a650c2dbafa08d42e51ba0b62740dae4ecb9338eefa093aa5c78ceb546fcd5c"}, + {file = "tomli-2.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:504aa796fe0569bb43171066009ead363de03675276d2d121ac1a4572397870f"}, + {file = "tomli-2.4.1-cp314-cp314t-win32.whl", hash = "sha256:b1d22e6e9387bf4739fbe23bfa80e93f6b0373a7f1b96c6227c32bef95a4d7a8"}, + {file = "tomli-2.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2c1c351919aca02858f740c6d33adea0c5deea37f9ecca1cc1ef9e884a619d26"}, + {file = "tomli-2.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eab21f45c7f66c13f2a9e0e1535309cee140182a9cdae1e041d02e47291e8396"}, + {file = "tomli-2.4.1-py3-none-any.whl", hash = "sha256:0d85819802132122da43cb86656f8d1f8c6587d54ae7dcaf30e90533028b49fe"}, + {file = "tomli-2.4.1.tar.gz", hash = "sha256:7c7e1a961a0b2f2472c1ac5b69affa0ae1132c39adcb67aba98568702b9cc23f"}, ] [[package]] @@ -1180,20 +1198,6 @@ click = [ rich = ">=12.3.0" shellingham = ">=1.3.0" -[[package]] -name = "typer-slim" -version = "0.23.2" -description = "Typer, build great CLIs. Easy to code. Based on Python type hints." -optional = false -python-versions = ">=3.9" -files = [ - {file = "typer_slim-0.23.2-py3-none-any.whl", hash = "sha256:2bc3f67ac58fe40763b414c3c65f6fcc92c6b81393d0d89339663aa69252c688"}, - {file = "typer_slim-0.23.2.tar.gz", hash = "sha256:19714179f4717a891650d8d5d062e990a0a1ed23e8d6e0f502f5a800802b3cdf"}, -] - -[package.dependencies] -typer = ">=0.23.2" - [[package]] name = "types-python-dateutil" version = "2.9.0.20260124" diff --git a/pyproject.toml b/pyproject.toml index b991fc2ef..7c7461dae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ dynamic = ["version"] [tool.poetry] name = "cohere" -version = "5.20.7" +version = "5.21.0" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 2a2ec15dd..56ae66ede 100644 --- a/reference.md +++ b/reference.md @@ -1,7 +1,5 @@ # Reference -
client.chat_stream(...) -> typing.AsyncIterator[ - AsyncHttpResponse[typing.AsyncIterator[StreamedChatResponse]] -] +
client.chat_stream(...) -> typing.Iterator[bytes]
@@ -31,17 +29,17 @@ To learn how to use the Chat API and RAG follow our [Text Generation guides](htt ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) -response = client.chat_stream( + +client.chat_stream( model="command-a-03-2025", message="hello!", ) -for chunk in response.data: - yield chunk ```
@@ -69,7 +67,23 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**accepts:** `typing.Optional[typing.Literal["text/event-stream"]]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. +**stream:** `typing.Literal` + +Defaults to `false`. + +When `true`, the response will be a JSON stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`. + +Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**accepts:** `typing.Optional[typing.Literal]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
@@ -103,7 +117,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**chat_history:** `typing.Optional[typing.Sequence[Message]]` +**chat_history:** `typing.Optional[typing.List[Message]]` A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. @@ -155,7 +169,7 @@ Compatible Deployments:
-**connectors:** `typing.Optional[typing.Sequence[ChatConnector]]` +**connectors:** `typing.Optional[typing.List[ChatConnector]]` Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) one. @@ -183,7 +197,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**documents:** `typing.Optional[typing.Sequence[ChatDocument]]` +**documents:** `typing.Optional[typing.List[ChatDocument]]` A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary. @@ -309,7 +323,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**stop_sequences:** `typing.Optional[typing.Sequence[str]]` +**stop_sequences:** `typing.Optional[typing.List[str]]` A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. @@ -362,7 +376,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**tools:** `typing.Optional[typing.Sequence[Tool]]` +**tools:** `typing.Optional[typing.List[Tool]]` A list of available tools (functions) that the model may suggest invoking before producing a text response. @@ -376,7 +390,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**tool_results:** `typing.Optional[typing.Sequence[ToolResult]]` +**tool_results:** `typing.Optional[typing.List[ToolResult]]` A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. @@ -455,7 +469,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-
client.chat(...) -> AsyncHttpResponse[NonStreamedChatResponse] +
client.chat(...) -> NonStreamedChatResponse
@@ -483,39 +497,17 @@ To learn how to use the Chat API and RAG follow our [Text Generation guides](htt
```python -from cohere import Client, Tool, ToolParameterDefinitionsValue +from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) -client.chat( + +client.chat_stream( model="command-a-03-2025", - message="Can you provide a sales summary for 29th September 2023, and also give me some details about the products in the 'Electronics' category, for example their prices and stock levels?", - tools=[ - Tool( - name="query_daily_sales_report", - description="Connects to a database to retrieve overall sales volumes and sales information for a given day.", - parameter_definitions={ - "day": ToolParameterDefinitionsValue( - description="Retrieves sales data for this day, formatted as YYYY-MM-DD.", - type="str", - required=True, - ) - }, - ), - Tool( - name="query_product_catalog", - description="Connects to a a product catalog with information about all the products being sold, including categories, prices, and stock levels.", - parameter_definitions={ - "category": ToolParameterDefinitionsValue( - description="Retrieves product information data for all products in this category.", - type="str", - required=True, - ) - }, - ), - ], + message="Tell me about LLMs", ) ``` @@ -544,7 +536,23 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**accepts:** `typing.Optional[typing.Literal["text/event-stream"]]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. +**stream:** `typing.Literal` + +Defaults to `false`. + +When `true`, the response will be a JSON stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`. + +Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**accepts:** `typing.Optional[typing.Literal]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
@@ -578,7 +586,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**chat_history:** `typing.Optional[typing.Sequence[Message]]` +**chat_history:** `typing.Optional[typing.List[Message]]` A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. @@ -630,7 +638,7 @@ Compatible Deployments:
-**connectors:** `typing.Optional[typing.Sequence[ChatConnector]]` +**connectors:** `typing.Optional[typing.List[ChatConnector]]` Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) one. @@ -658,7 +666,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**documents:** `typing.Optional[typing.Sequence[ChatDocument]]` +**documents:** `typing.Optional[typing.List[ChatDocument]]` A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary. @@ -784,7 +792,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**stop_sequences:** `typing.Optional[typing.Sequence[str]]` +**stop_sequences:** `typing.Optional[typing.List[str]]` A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. @@ -837,7 +845,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**tools:** `typing.Optional[typing.Sequence[Tool]]` +**tools:** `typing.Optional[typing.List[Tool]]` A list of available tools (functions) that the model may suggest invoking before producing a text response. @@ -851,7 +859,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-**tool_results:** `typing.Optional[typing.Sequence[ToolResult]]` +**tool_results:** `typing.Optional[typing.List[ToolResult]]` A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. @@ -930,9 +938,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
-
client.generate_stream(...) -> typing.AsyncIterator[ - AsyncHttpResponse[typing.AsyncIterator[GenerateStreamedResponse]] -] +
client.generate_stream(...) -> typing.Iterator[bytes]
@@ -963,16 +969,16 @@ Generates realistic text conditioned on a given input. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) -response = client.generate_stream( + +client.generate_stream( prompt="Please explain to me how LLMs work", ) -for chunk in response.data: - yield chunk ```
@@ -999,6 +1005,22 @@ Note: The prompt will be pre-processed and modified before reaching the model.
+**stream:** `typing.Literal` + +When `true`, the response will be a JSON stream of events. Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. + +The final event will contain the complete response, and will contain an `is_finished` field set to `true`. The event will also contain a `finish_reason`, which can be one of the following: +- `COMPLETE` - the model sent back a finished reply +- `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens for its context length +- `ERROR` - something went wrong when generating the reply +- `ERROR_TOXIC` - the model generated a reply that was deemed toxic + +
+
+ +
+
+ **model:** `typing.Optional[str]` The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). @@ -1082,7 +1104,7 @@ When a preset is specified, the `prompt` parameter becomes optional, and any inc
-**end_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. +**end_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text.
@@ -1090,7 +1112,7 @@ When a preset is specified, the `prompt` parameter becomes optional, and any inc
-**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. +**stop_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text.
@@ -1180,7 +1202,7 @@ WARNING: `ALL` is deprecated, and will be removed in a future release.
-
client.generate(...) -> AsyncHttpResponse[Generation] +
client.generate(...) -> Generation
@@ -1211,12 +1233,14 @@ Generates realistic text conditioned on a given input. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) -client.generate( + +client.generate_stream( prompt="Please explain to me how LLMs work", ) @@ -1245,6 +1269,22 @@ Note: The prompt will be pre-processed and modified before reaching the model.
+**stream:** `typing.Literal` + +When `true`, the response will be a JSON stream of events. Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. + +The final event will contain the complete response, and will contain an `is_finished` field set to `true`. The event will also contain a `finish_reason`, which can be one of the following: +- `COMPLETE` - the model sent back a finished reply +- `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens for its context length +- `ERROR` - something went wrong when generating the reply +- `ERROR_TOXIC` - the model generated a reply that was deemed toxic + +
+
+ +
+
+ **model:** `typing.Optional[str]` The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). @@ -1328,7 +1368,7 @@ When a preset is specified, the `prompt` parameter becomes optional, and any inc
-**end_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. +**end_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text.
@@ -1336,7 +1376,7 @@ When a preset is specified, the `prompt` parameter becomes optional, and any inc
-**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. +**stop_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text.
@@ -1426,7 +1466,7 @@ WARNING: `ALL` is deprecated, and will be removed in a future release.
-
client.embed(...) -> AsyncHttpResponse[EmbedResponse] +
client.embed(...) -> EmbedResponse
@@ -1458,18 +1498,20 @@ If you want to learn more how to use the embedding model, have a look at the [Se ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.embed( - model="embed-v4.0", - input_type="image", - embedding_types=["float"], - images=[ - "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD//gAfQ29tcHJlc3NlZCBieSBqcGVnLXJlY29tcHJlc3P/2wCEAAQEBAQEBAQEBAQGBgUGBggHBwcHCAwJCQkJCQwTDA4MDA4MExEUEA8QFBEeFxUVFx4iHRsdIiolJSo0MjRERFwBBAQEBAQEBAQEBAYGBQYGCAcHBwcIDAkJCQkJDBMMDgwMDgwTERQQDxAUER4XFRUXHiIdGx0iKiUlKjQyNEREXP/CABEIAZABkAMBIgACEQEDEQH/xAAdAAEAAQQDAQAAAAAAAAAAAAAABwEFBggCAwQJ/9oACAEBAAAAAN/gAAAAAAAAAAAAAAAAAAAAAAAAAAHTg9j6agAAp23/ADjsAAAPFrlAUYeagAAArdZ12uzcAAKax6jWUAAAAO/bna+oAC1aBxAAAAAAbM7rVABYvnRgYAAAAAbwbIABw+cMYAAAAAAvH1CuwA091RAAAAAAbpbPAGJfMXzAAAAAAJk+hdQGlmsQAAAAABk31JqBx+V1iAAAAAALp9W6gRp826AAAAAAGS/UqoGuGjwAAAAAAl76I1A1K1EAAAAAAG5G1ADUHU0AAAAAAu/1Cu4DVbTgAAAAAA3n2JAIG0IAAAAAArt3toAMV+XfEAAAAAL1uzPlQBT5qR2AAAAAenZDbm/AAa06SgAAAAerYra/LQADp+YmIAAAAC77J7Q5KAACIPnjwAAAAzbZzY24gAAGq+m4AAA7Zo2cmaoAAANWdOOAAAMl2N2TysAAAApEOj2HgAOyYtl5w5jw4zZPJyuGQ5H2AAAdes+suDUAVyfYbZTLajG8HxjgD153n3IAABH8QxxiVo4XPKpGlyTKjowvCbUAF4mD3AAACgqCzYPiPQAA900XAACmN4favRk+a9wB0xdiNAAAvU1cgAxeDcUoPdL0s1B44atQAACSs8AEewD0gM72I5jjDFiAAAPfO1QGL6z9IAlGdRgkaAAABMmRANZsSADls7k6kFW8AAAJIz4DHtW6AAk+d1jhUAAAGdyWBFcGgAX/AGnYZFgAAAM4k4CF4hAA9u3FcKi4AAAEiSEBCsRgAe3biuGxWAAACXsoAiKFgALttgs0J0AAAHpnvkBhOt4AGebE1pBtsAAAGeySA4an2wAGwEjGFxaAAAe+c+wAjKBgAyfZ3kUh3HAAAO6Yb+AKQLGgBctmb2HXDNjAAD1yzkQAENRF1gyvYG9AcI2wjgAByyuSveAAWWMcQtnoyOQs8qAPFhVh8HADt999y65gAAKKgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/8QAGgEBAAMBAQEAAAAAAAAAAAAAAAEFBgIEA//aAAgBAhAAAAAAAAAAAAABEAAJkBEAAB0CIAABMhyAAA6EQAAA6EQAABMiIAAAmREAAAmQiAABMgOQAEyAHIATIACIBMu7H3fT419eACEnps7DoPFQch889Wd3V2TeWIBV0o+eF8I0OrXVoAIyvBm8uDe2Wp6ADO+Mw9WDV6rSgAzvjMNWA1Op1AARlvmZbOA3NnpfSAK6iHnwfnFttZ9Wh7AeXPcB5cxWd3Wk7Pvb+uR8q+rgAAAAAAAAP//EABsBAQABBQEAAAAAAAAAAAAAAAAEAQIDBQYH/9oACAEDEAAAAAAAAAC20AL6gCNDxAArnn3gpro4AAv2l4QIgAAJWwGLVAAAX7cQYYAAFdyNZgAAAy7UazAAABsZI18UAAE6YEfWgACRNygavCACsmZkALNZjAMkqVcAC2FFoKyJWe+fMyYoMAAUw2L8t0jYzqhE0dAzd70eHj+PK7mcAa7UDN7VvBwXmDb7EAU5uw9C9KCnh2n6WoAaKIey9ODy/jN+ADRRD2fpQeY8P0QAU5zGel+gg8V53oc4AgaYTfcJ45Tx5I31wCPobQ2PpPRYuP8APMZm2kqoxQddQAAAAAAAAP/EAFMQAAEDAgIDCQkMBwUIAwAAAAECAwQFEQAGBzFREhMhMEBBYXGBCBQYIjJCRlDSFSBSVGJygpGTobHREDRDc6LBwiMzU3CyFiQlNVVkdISSlLP/2gAIAQEAAT8A/wAo74nVaBAb32bNYitfDfcS2PrURiZpU0dwVFMjN1OVY8O8u7//APkFYc076LmfSVSvmQpB/ox4QGjH/r7v/wBGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0Y89fd7IMj2cN6e9GDpCTmRaOuFI9nEDSlo9qakpj5upoJNgH3d4+50JxGlxpbSH4r7bzSvJW0sLSeop5NWsw0fL8RU2rVGPDjJ4C6+4EAnYnaegYzV3StDhFcfK1LdqDuoSZBLDHWlPlqxXtNmkOulaVVxcFg3/sYA73A+kLrxKnTJrpfmSXX3jrcdWVqPWVYudvJ7nbil16s0R7vikVSVDduCVR3lNk9e5IvjKfdG5rpKmo+Yo7NXi8ALlgxJH0kiysZL0l5Uzsz/AMFn2l7m7kJ8BuSj6PnAbU8ieeZitOPPuoQ22krWtZCUpSkXJJOoDGkHui4MBT1MyW2ibITdJnuA97o/dJ1uHFczFXMyzV1Gu1N+bJV57yr7kbEjUkdA5dGlSYb7UqJIcZfaUFtuNLKFoUNRSocIONF3dBb6tih58eSCQEM1PUOqT7eELS4lK0KCkkAgg3BB4/M2Z6NlKlSKtWJiI8VoWueFS1nUhA85ZxpJ0v13Pj7kNorg0NC7tw0K4XNi3yPKPRqHqLQnpkeoD8XKmZZJVSHCG4klw/qijqQs/wCF/pwDfjc1ZqpOUKNLrVXf3qMyLJSLFbrh8ltA51qxn7P9az9V1z6istxWypMSIhRLbCD+Kj5yvUYJHCMdz7pLXWoByfWJBXUILV4bizwvRk+Z0qa4yoTodKgyZ859DEWO0t11xZslCEC5UrGlHSNOz/XVvBa26RFKkQY+xHO4v5a/UtArU3LlZptbpzm4lQ30ut7DbWk9ChwHGXq5EzHQ6ZWoCv8AdpsdDyRrIKtaFdKTwHi+6I0hrffGRKU/ZloodqSkngW5rQz1I1n1P3M2ZzJpFYyvIXdUJ0SowP8AhP8AAtI6AvitIWbWclZVqlbWElxpvcRmz+0kOcDaf5nEyXJnypM2Y8p2Q+6t11xRupa1m6lHpJ9T6B6uaVpHo7alEMz0PQnepxN0/wASRgauJ7pTNZmVynZTjuXZpzYkSRtkPDgB6UI9UZMlrgZsy1MQqxZqkRy/QHRfA4iZIaiRX5D6ghpptTi1bEIFycZmrL2YcwVitvk7ubLdfsfNClcCewcHqiiX91qbbX3yz/rGBxGmKse4ujnMz6F2dfjiGj/2VBs/ccE3J9UZOirm5ry3EQm5eqkRu3Qp0YHEd01PLGUqPT0mxk1QLV0oZaPteqdBtKNV0kUIkXah77Md6mkcH8RGBq4jupH7JyXG/wDPcP1tj1T3MuWVMQK5mt9FjJWmDGO1tHjuHqJ4nupEnvrJa+beZ4/jR6ooNGnZhrFOotNa3yXMeS02OvWo9CRwk4ytQIeWKDS6HC/V4TCWgq1itWtSz0rPCeJ7qKNenZSl2/upEtonpcShXqcC+NA+jFeW4H+1NbYKatOaswysWMaOrbscc4rujaYZuj/vzccMCpR3yehwFn+r1MAVGwGNDOhVbK4ubc4xLLFnYMB1PCNjrw/BHF58opzDk7MlHSndOSID28ja6gbtH3jChZRHqShZerOZag1S6JT3pcpzUhsahtUTwJTtJxow0G0vKRYreYS1PrIAUhNrx4yvkA+WsfCONXFnGlTLZytnqvU5KLRlvmTG2Fl/xwB0J1eookOXPkNRYUZ1991W5baaQVrWdiUi5JxkbudKzVCzOzg+abE196NWXKWOnWlvGW8p0DKMEU6g01qKzwFe5F1uEDynFnhUeO7pTJ5n0aBmyK3d+mneJVtZjOnxVfQX6ghwZtRktQ4EV6RJcNkNMoK1qOwJTcnGTe5yr9V3qXmuSKXFNj3uizkpY/0oxlbIOVslRt6oVKaZdIst9XjyHPnOK4ezkFVgw6vAmU2ewHYsllbDiFaloWNyoYz1lKZknMtRoEu6gyvdMO8zrC/IXy2j0Cs5glpg0WmyJkk+YwgrIG1WwdJxk7uap75amZyqQit6zChkLe6lueSnGWcl5ayjGEegUliKCAFuAbp5z57irqPI9NOjVOdqB31T2x7tU5KlxNryNa2CenWnDra2XFtOoUhaFFKkqFiCOAgg8qyro7zdnJwCh0Z5xi9lSVje46etarA22DGUe5spEPe5ebqgue78Ui3aj9Sl+WvFIodHoMREGj02PDjJ1NMNhAJ2m2s8m07aIHJi5WdMsxSZFiuoxG08LoGt9sDz/hjGrkzLD0hxDLDSluLISlKQSpRPMAMZU0C54zFvcidHTR4Sv2k24dI+SyPG+u2MqaBskZc3qRLimrzEftZoBaB+S0PFw0y2y2hppCUIQAEpSAAAOYAauU6XtBJmuycy5LjASVXcl05sWDu1bGxe1GHWnGXFtOoUhxCilSVAghSTYgg6iOR5eyfmXNT/AHvQKNJmKBspTaLNo+es2SntOMq9zNIc3uTm+sBoazEgWWvtdWLDGWchZTyk2E0KiR4zlrKkEbt9XW4u6uW6SNDNAzwHZ7BTTq3YkSm0XS7sS+ka/na8ZuyJmbJMwxK9T1NJJs1IR47D3S2vj2mXXlobabUtaiAlKRcknUAMZV0F56zJvT8iEKVCVY77PuhZHyWvLxlTuesl0Te3qqlysy08JMnxI4PQ0n+onEWDFhMNxokdphhsWQ20gIQkbEpFgPeyqnBg/rMhCCBfc3ur6hw4lZ1hNbpMdlbpGokhKT+OHs7zVf3EdpHzgVfzGDnGqnnbHUkYGcqqOZo/OT+VsMZ5eBG/w0K2lJKPaxDzfTJBCXFLZUTbxk3+q2GJTEhAcYdQtB1KSoEckqdLp1ThvQqnEZkxXU7lbLyAtCusKxnPubKVNU9NyhOMB03Pekm7kfsXwqRjM+jfOWUVLNZochEcapLY31gj56LgduLHZxNjjL+TM0ZpcDdCokuWL2LiEWaSflOKskYyt3M8t0tSM31hLCNZiwbLc7XVCwxljR9lHKDaRQ6Kww6BZUlQ32Qr6a7nAAHvFLSkEqUAAMT81UyGClDm/r2N6u1WKhm2oywpDKt4bPMjX/8ALC3HHCVLWSSbm+338adLhuB2O+tChzg4pOdOFDVRRbm31A/EflhiQ1IbS6y4laFaik3HJCkKBBAII4RjMOibIOYCtc/LkZD6tb0W8Zy+0luwVisdzDRX925RMyS4uxMtlD46gUFGKj3NWdY11wajSpbf71bS/qUnErQTpPjXIy2Xk7WZLCv68L0R6R2/KylO+ikK/A4Tom0jL1ZRqHa3bEXQjpPlkBGVXkDa48yj8V4p/c358lEGW/TIaOcOSCtfYG0qxSO5gp6AldczQ+9tbhsBr+NwqxRNDWjygFDjGXmpL4N99nEyVH6K/FGGmGY7SGm20oQgAJSkAJAHMAPeyJ8WEjfJD6EX1XP4DWTioZ1ZRdEBndnmWvgT2DE6tVCoE98SFFPMgGyR2DBN+E8XSq3MpToUyu7ZIK0HUcUmsRapGK46wlfBuknWnk5AOsY3I2YsNmLAagPf1HMFNp+6S68FOD9mjhV+QxUM5THrohJDKNutWHpL8halvOqWo6yokk8fT58inSESI6ylST2EbDtGKRU49VitvtkJI8tOsg7OOJA1nFSzhQKaVIkT21OA23DV3Fdu51Yk6VICCREpzznS4pKPw3WDpXk34KOgD9+fZwxpWB4JNIIG1D1/xTinaSMvylJDy3YyjwDfUXH1pviFPhTGw/FkNuoOpbagofdxU2fHhMqekOBDadus4q+bJcwqahkssfxnrOFKKjckk8iodWcpUxDySS2rgcTfWMMPtvstvNKCkLSFJI5weMzFm6mZfQUvL32UQCiOg+N1q2DFbzlWa2paXHyzGOplolKbfKOtWLnb72FUp9NeD8GU4y4OdBtfr2jGW9JTbqm4tdQlCr2D6fIPzxzYadbdQhxpYUlQBBBuCD7+pVKPTIq5D6uAcCUjWpWwYqtWlVV9Tr6yE6kIHkpHJcl1cqS5TXjfc+O3f7xxedc6IoqTAgEKnqHCdYZB5ztVsGH5D0p5x+Q6px1ZKlKUbknico5zk0J5EWWtTtPWeFOstdKejaMR5TMxhuQw4lbTiQpKkm4UD7151thtbriwlCElSidQAxXaw7VZalXsyglLadg/M8mpstcKbHko1oWDbb0duGXEOtIcQbpUkKB2g8Tm3MSMv0xbySDJduhhB+FtPQMSJD0p5yRIcK3XFFSlK1kni9HealU+UijzFjvZ5X9iVHyHDzdSve5yqqm2kU5pViuynCNnMOUZVld80lgKsVNEtns4QPqPEKNgTjOdbVWq0+tC7xmCWmRzWTrV2njEqUhQUkkEG4Ixk6ue7dFjPuuXeau08Plp5+0cP6VrS22pSiAACSdgGKpMXPnSJK/PWSBsHMOzlGRX/EmsW8koWOs3B4jONTNNoNQkIUUr3ve27awpzxb4PCTxujGpKYqkinKV4klvdJ+e3+nMkjvakS1DWtIb7FcB+7BNyTyjI67S5CDzsqP1EcRpUkqRTqfFBtvr6l9iE2/nx2V5XeeYKS9/3CEdizuD+OEm4/RnVak0+OhJtd256gm38+U5JTeY+rYyofeniNKyjv8AR0c24f8AxTx1NJTUYKhrD7Z/iGEeSP0Z63Pe8Xc6hur9dxynI7JtNeOqyAO0m/EaVv1mj/Mf/FPHU7/mEL98j8cI8gfozq2pdOZWnmdseopJ5TlKIWKShZFi8tSz2eL/AC4jSsx/Y0qR8FbqD9IA8dQmFSK1S2UjypTQ7N0L4SLJ/RmOOJVIloSk+Ijdjb4nCcEWJB5PDjrlSWWGxdS1hI7TiHHRGjsso8htCUDqSLcRpDppl5ckLABXHUl8DYBwH7jx2juAZeYmXyk7iM2t07L23I/HA/QtIWkpULggjFXgqp8+RHINkrO5O0axyfJlLK3l1F1Pit3S3cecRr7BxMqM3IjusOpCkOoKVjakixGKzTXaTU5cB4HdNOEAnzk6we0cbo3o5g0hU91FnZhCh+7T5PvM6UjfWkTmE3W0LObSnmPZyanQHqjKajMjhUeE2uANpxAhNQYzTDabNtpsOk85PXxWkjLJmRk1mGjdPR0WdA85rb9HjMqUByv1Rtgg97N2W+vYjZ1qww02y2htCQlCEhKUjUAPeLQlxCkLAUlQsQdRBxmKiOUqWopSox1m6FHht0HkjDDsl1DLKCpajYAYoFFRSYw3dlSF8K1bPkji1JCgUkXBxnjJTlJecqVOZvCWbrQn9kT/AEniqVSplYmNQoTRW4s9iRzqUeYDGXaBFoFPbiMC6/KdctYrVt/Ie+qECNMjKjyE7oLHaOkYrVEkUl8hQKmVE7hY1HkUOFInPoYjtla1bMUDLzNKb3xyy5KvKXzDoTxrjaHEKQ4gKSoWIIuCDzYzTo5WlTk2ggEG6lxr6vmH+WHmXWHFtPNqQ4k2UlQIIOwg+/y/lCq19xKm2yzFv4z7g8X6I844oOXoFBiiPDb4TYuOny1kbTxEmOxKaVHebS4hXlA4rWTpEdSnqfdxu5JR5w6tuFtONKKXEFJBsQeOShSzZIvilZTnTShySCwyfhDxj1DFPpcSmtBuM0B8JR4VK6zyCr5apFaQROiJWsCwdT4qx1KGKloseG7XSp4UnmQ+LfxJxJyLmaMoj3OU4n4TakqwrLVfSbGjy/sV4ZyhmN/yKRI+kncf6rYhaM64+QZa2YyOk7tQ7E4o+jyiU0h2SgzHhzu+R2I/PCEIbASgAJAsAOLqFFp84HvphKlkCyhwK4OnZiXkcElUKV9Fz2hh/KdZataPuwfOSoEYXQqog2MJ49Taj/LHuNVPiEj7Jf5Y9xqp8QkfZL/LHuNVPiEj7Jf5Y9xqp8QkfZL/ACx7jVT4hI+yX+WPcaqfEJH2S/yx7jVT4hI+yX+WEUCquaoTw+chQ/EYYyjWHQSpgN9K1C33XOIuR0+VMlfRbH8ziFRKdTwksRkhY89XjK+/VyWwxYf5ef/EADgRAAIBAgMDCQUHBQAAAAAAAAECAwQRAAUgMUFhEhMhIjBAUXGREDJQU6EGFDNCYoGSUnKiwdH/2gAIAQIBAT8A+L37e/wE9zHfj3k90Gk90Gk9ztqPcbd3t3e3b2129qRySGyIScRZY56ZXtwGFoKZfyX8zj7rT/JX0w+X0zbFKngcTZdLHdozyx9cbOg9pbFtENJPNYqlh4nEOWxJYykufQYVFQWRQBw1VVGk4LKAJPHxwysjFWFiNUsscKGSVwqjecVOfgErSxX/AFNhs5r2P4oHkoxHndchHKZXHFf+YpM7gnISYc0/+J0KpYhVFycUtCkQDygM/huHZZjThl59R1l97iNMsqQxvLIbKoucV1dLWykkkRg9VdOUZmyOtLO10PQhO4+Hty6mCrz7jpPu+XZsoZSp2EEYkQxyOh/KSNGf1JAipVO3rNq2EHGW1P3mkikJ6w6reYxGpd0QbyBhVCqFGwC3aV4tUycbHRnLFq+UeAUfTX9nmJhqE3BwfUYoxeqi8+1ryDVPwA0ZwCMwm4hT9Nf2eB5qobcWUfTFM3Inib9Q7QkAEnYMSvzkrv4knRn8BEkVQB0Ecg+Y15RTmCij5Qsz9c/v7KWYTQo28dDefZ5hUBI+aU9Z9vAaamnSqheF9jD0OKmmlpZWilFiNh3Eacqy9quUSSLaFDc8T4YAt7KWpNPJfap94YR1kUOhuD2NTVJTr4vuGHdpHZ3NydVVSQVaciZfIjaMVOR1URJhtKvocNSVSmzU8gP9pxHQVkhASnf9xbFJkJuHq2Fv6F/2cIiRoqIoVQLADRBUSwG6Ho3g7DiLMYX6Huh9RgTwtslT1GOdi+YnqMc7F8xP5DHOxfMT+Qxz0XzE9Rh6ymTbKD5dOJsyY3WFbcThmZiWYkk7z8W//8QAOREAAgECAgYHBwMDBQAAAAAAAQIDAAQFERITICExkQYwQVFSYXEQFCJAQlOBMlChI4KSYnJzsbL/2gAIAQMBAT8A/YCyjiwFa2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8YoMp4EHq5LlV3LvNPNI/FuXW5kcDUdw6cd4pJFkGanbJABJqacvmq7l+RR2Rgy0jiRQw2rmXM6CncOPydq+T6B4HZmfQjJ7eA+UQ6LqfMbN229V/Pyg4j1GzcnOVvlIV0pFH52bgZSt8pbRaC6TcTs3YycHvHyQBJAFQ2+WTyfgbVymlHmOI+Rjt3fe3wio4kj4Df39RNGY38jw60AscgMzSWrHe5yFJEkfBd/f1UiLIpU1JG0ZyPVJE7/pWktRxc/gUqKgyVQOtZVcZMMxUlqw3pvHdRBU5EEbIBO4CktpG3t8IpLeNOzM+fsSN5DkikmosPY75Wy8hS2duv0Z+te7wfaXlT2Nu3BSvoalsJE3xnTH81vG49UVVtzAGjbRH6cq90TxGvdE8RoW0Q7M6Cqu5VA9kVrNLvC5DvNRWEa75CWPIUqqgyVQB5bVzarMCy7n7++mUoxVhkRtW9tPdypBbRNJI3BVFYf0FdlWTErnQP24uP5JqLojgUYyNqznvZ2q46GYLKDq0khPejk/8ArOsU6HX1irTWre8xDeQBk4/FHduPtALEKozJq3skjAaQaT/wOqv4NJdco3jj6bNtby3c8VtAulJIwVRWCYJb4PbKqqGnYDWSdpPcPLZ6V9HEmikxOxjAlQaUqL9Q7x5+2xgCrrmG8/p9OrIDAg8CKkTQd07iRsdBcPV3ucSkX9H9KP1O8naIBBBG410gsBh2K3MCDKNjrE/2tSLpuqDtIFKAqhRwA6y9GVw/mAdjohEEwK2I4u0jH/Lb6exgXljL2tEwP9pq0GdzF69bfHO4fyAGx0ScPgVpl9JkB/yO309cG6w9O0ROeZq3bQnib/UOsJyBJqV9ZI7952Ogl8DDdYezfEra1B5HcdvpTfC+xicoc44QIl/t4/z7LaUTRK3bwPr1d9PoJqlPxN/A2cOvpsNvIbyA/Eh3jvHaDWHYjbYnapdWzgg/qHap7js9JseTDLZreBwbuVSAB9AP1GiSSSeJ9ltcGB8/pPEUjq6hlOYPU3FykC97dgp3aRi7HMnaw3FbzCptdaSZeJDvVh5isO6aYdcqq3gNvJ25705ikxXDJAGS/gI/5FqfHMIt10pb+H0DBjyGdYr03XRaLCojnw1sg/6FTTSzyPNNIXkc5szHMnYhuJIDmh3doPCo7+F9z5oaE0R4SrzrWR/cXnWsj+4vOtZH9xeYrWx/cXmKe6gTjID6b6lxAnMQrl5mmYsSzEkn92//2Q==" + texts=[ + "hello", + "goodbye" ], + model="embed-v4.0", + input_type="classification", ) ``` @@ -1486,7 +1528,7 @@ client.embed(
-**texts:** `typing.Optional[typing.Sequence[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`. +**texts:** `typing.Optional[typing.List[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`.
@@ -1494,7 +1536,7 @@ client.embed(
-**images:** `typing.Optional[typing.Sequence[str]]` +**images:** `typing.Optional[typing.List[str]]` An array of image data URIs for the model to embed. Maximum number of images per call is `1`. @@ -1524,7 +1566,7 @@ Images are only supported with Embed v3.0 and newer models.
-**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]` +**embedding_types:** `typing.Optional[typing.List[EmbeddingType]]` Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. @@ -1566,7 +1608,7 @@ If `NONE` is selected, when the input exceeds the maximum input token length an
-
client.rerank(...) -> AsyncHttpResponse[RerankResponse] +
client.rerank(...) -> RerankResponse
@@ -1594,11 +1636,13 @@ This endpoint takes in a query and a list of texts and produces an ordered array ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.rerank( documents=[ { @@ -1615,7 +1659,7 @@ client.rerank( }, { "text": "Capital punishment has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states." - }, + } ], query="What is the capital of the United States?", top_n=3, @@ -1644,7 +1688,7 @@ client.rerank(
-**documents:** `typing.Sequence[RerankRequestDocumentsItem]` +**documents:** `typing.List[RerankRequestDocumentsItem]` A list of document objects or strings to rerank. If a document is provided the text fields is required and all other fields will be preserved in the response. @@ -1675,7 +1719,7 @@ We recommend a maximum of 1,000 documents for optimal endpoint performance.
-**rank_fields:** `typing.Optional[typing.Sequence[str]]` — If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking. +**rank_fields:** `typing.Optional[typing.List[str]]` — If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking.
@@ -1714,7 +1758,7 @@ We recommend a maximum of 1,000 documents for optimal endpoint performance.
-
client.classify(...) -> AsyncHttpResponse[ClassifyResponse] +
client.classify(...) -> ClassifyResponse
@@ -1742,20 +1786,22 @@ Note: [Fine-tuned models](https://docs.cohere.com/docs/classify-fine-tuning) tra
```python -from cohere import ClassifyExample, Client +from cohere import Client, ClassifyExample +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.classify( examples=[ ClassifyExample( - text="Dermatologists don't like her!", + text="Dermatologists don\'t like her!", label="Spam", ), ClassifyExample( - text="'Hello, open to this?'", + text="\'Hello, open to this?\'", label="Spam", ), ClassifyExample( @@ -1783,15 +1829,18 @@ client.classify( label="Not spam", ), ClassifyExample( - text="'Re: Follow up from today's meeting'", + text="\'Re: Follow up from today\'s meeting\'", label="Not spam", ), ClassifyExample( text="Pre-read for tomorrow", label="Not spam", - ), + ) + ], + inputs=[ + "Confirm your email address", + "hey i need u to send some $" ], - inputs=["Confirm your email address", "hey i need u to send some $"], model="YOUR-FINE-TUNED-MODEL-ID", ) @@ -1809,7 +1858,7 @@ client.classify(
-**inputs:** `typing.Sequence[str]` +**inputs:** `typing.List[str]` A list of up to 96 texts to be classified. Each one must be a non-empty string. There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models). @@ -1821,7 +1870,7 @@ Note: by default the `truncate` parameter is set to `END`, so tokens exceeding t
-**examples:** `typing.Optional[typing.Sequence[ClassifyExample]]` +**examples:** `typing.Optional[typing.List[ClassifyExample]]` An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`. Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly. @@ -1872,7 +1921,7 @@ If `NONE` is selected, when the input exceeds the maximum input token length an
-
client.summarize(...) -> AsyncHttpResponse[SummarizeResponse] +
client.summarize(...) -> SummarizeResponse
@@ -1903,13 +1952,15 @@ Generates a summary in English for a given text. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.summarize( - text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases.\n\nThe meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo\r all variants. Analogues made from dairy alternatives, such as goat\'s or sheep\'s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', + text="Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases.\n\nThe meaning of the name \"ice cream\" varies from one country to another. In some countries, such as the United States, \"ice cream\" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled \"frozen dairy dessert\" instead. In other countries, such as Italy and Argentina, one word is used fo\r all variants. Analogues made from dairy alternatives, such as goat\'s or sheep\'s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.", ) ``` @@ -1994,7 +2045,7 @@ client.summarize(
-
client.tokenize(...) -> AsyncHttpResponse[TokenizeResponse] +
client.tokenize(...) -> TokenizeResponse
@@ -2022,11 +2073,13 @@ This endpoint splits input text into smaller units called tokens using byte-pair ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.tokenize( text="tokenize me! :D", model="command", @@ -2074,7 +2127,7 @@ client.tokenize(
-
client.detokenize(...) -> AsyncHttpResponse[DetokenizeResponse] +
client.detokenize(...) -> DetokenizeResponse
@@ -2102,13 +2155,22 @@ This endpoint takes tokens using byte-pair encoding and returns their text repre ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.detokenize( - tokens=[10002, 2261, 2012, 8, 2792, 43], + tokens=[ + 10002, + 2261, + 2012, + 8, + 2792, + 43 + ], model="command", ) @@ -2126,7 +2188,7 @@ client.detokenize(
-**tokens:** `typing.Sequence[int]` — The list of tokens to be detokenized. +**tokens:** `typing.List[int]` — The list of tokens to be detokenized.
@@ -2154,7 +2216,7 @@ client.detokenize(
-
client.check_api_key() -> AsyncHttpResponse[CheckApiKeyResponse] +
client.check_api_key() -> CheckApiKeyResponse
@@ -2182,11 +2244,13 @@ Checks that the api key in the Authorization header is valid and active ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.check_api_key() ``` @@ -2216,9 +2280,7 @@ client.check_api_key()
## V2 -
client.v2.chat_stream(...) -> typing.AsyncIterator[ - AsyncHttpResponse[typing.AsyncIterator[V2ChatStreamResponse]] -] +
client.v2.chat_stream(...) -> typing.Iterator[bytes]
@@ -2247,38 +2309,23 @@ Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2)
```python -from cohere import ( - Client, - ImageUrl, - ImageUrlContent, - TextContent, - UserChatMessageV2, -) +from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) -response = client.v2.chat_stream( - model="command-a-vision-07-2025", + +client.v2.chat_stream( + model="command-a-03-2025", messages=[ - UserChatMessageV2( - content=[ - TextContent( - text="Describe this image", - ), - ImageUrlContent( - image_url=ImageUrl( - url="https://cohere.com/favicon-32x32.png", - detail="auto", - ), - ), - ], - ) + { + "role": "user", + "content": "Tell me about LLMs" + } ], ) -for chunk in response.data: - yield chunk ```
@@ -2294,6 +2341,20 @@ for chunk in response.data:
+**stream:** `typing.Literal` + +Defaults to `false`. + +When `true`, the response will be a SSE stream of events. + +Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. + +
+
+ +
+
+ **model:** `str` — The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models).
@@ -2310,7 +2371,7 @@ for chunk in response.data:
-**tools:** `typing.Optional[typing.Sequence[ToolV2]]` +**tools:** `typing.Optional[typing.List[ToolV2]]` A list of tools (functions) available to the model. The model response may contain 'tool_calls' to the specified tools. @@ -2334,7 +2395,7 @@ When set to `true`, tool calls in the Assistant message will be forced to follow
-**documents:** `typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. +**documents:** `typing.Optional[typing.List[V2ChatStreamRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata.
@@ -2389,7 +2450,7 @@ The maximum number of output tokens the model will generate in the response. If
-**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. +**stop_sequences:** `typing.Optional[typing.List[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
@@ -2518,7 +2579,7 @@ If tool_choice isn't specified, then the model is free to choose whether to use
-
client.v2.chat(...) -> AsyncHttpResponse[V2ChatResponse] +
client.v2.chat(...) -> V2ChatResponse
@@ -2547,34 +2608,21 @@ Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2)
```python -from cohere import ( - Client, - ImageUrl, - ImageUrlContent, - TextContent, - UserChatMessageV2, -) +from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) -client.v2.chat( - model="command-a-vision-07-2025", + +client.v2.chat_stream( + model="command-a-03-2025", messages=[ - UserChatMessageV2( - content=[ - TextContent( - text="Describe this image", - ), - ImageUrlContent( - image_url=ImageUrl( - url="https://cohere.com/favicon-32x32.png", - detail="auto", - ), - ), - ], - ) + { + "role": "user", + "content": "Tell me about LLMs" + } ], ) @@ -2592,6 +2640,20 @@ client.v2.chat(
+**stream:** `typing.Literal` + +Defaults to `false`. + +When `true`, the response will be a SSE stream of events. + +Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. + +
+
+ +
+
+ **model:** `str` — The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models).
@@ -2608,7 +2670,7 @@ client.v2.chat(
-**tools:** `typing.Optional[typing.Sequence[ToolV2]]` +**tools:** `typing.Optional[typing.List[ToolV2]]` A list of tools (functions) available to the model. The model response may contain 'tool_calls' to the specified tools. @@ -2632,7 +2694,7 @@ When set to `true`, tool calls in the Assistant message will be forced to follow
-**documents:** `typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. +**documents:** `typing.Optional[typing.List[V2ChatRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata.
@@ -2687,7 +2749,7 @@ The maximum number of output tokens the model will generate in the response. If
-**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. +**stop_sequences:** `typing.Optional[typing.List[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
@@ -2816,7 +2878,7 @@ If tool_choice isn't specified, then the model is free to choose whether to use
-
client.v2.embed(...) -> AsyncHttpResponse[EmbedByTypeResponse] +
client.v2.embed(...) -> EmbedByTypeResponse
@@ -2848,17 +2910,22 @@ If you want to learn more how to use the embedding model, have a look at the [Se ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.v2.embed( + texts=[ + "hello", + "goodbye" + ], model="embed-v4.0", - input_type="image", - embedding_types=["float"], - images=[ - "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD//gAfQ29tcHJlc3NlZCBieSBqcGVnLXJlY29tcHJlc3P/2wCEAAQEBAQEBAQEBAQGBgUGBggHBwcHCAwJCQkJCQwTDA4MDA4MExEUEA8QFBEeFxUVFx4iHRsdIiolJSo0MjRERFwBBAQEBAQEBAQEBAYGBQYGCAcHBwcIDAkJCQkJDBMMDgwMDgwTERQQDxAUER4XFRUXHiIdGx0iKiUlKjQyNEREXP/CABEIAZABkAMBIgACEQEDEQH/xAAdAAEAAQQDAQAAAAAAAAAAAAAABwEFBggCAwQJ/9oACAEBAAAAAN/gAAAAAAAAAAAAAAAAAAAAAAAAAAHTg9j6agAAp23/ADjsAAAPFrlAUYeagAAArdZ12uzcAAKax6jWUAAAAO/bna+oAC1aBxAAAAAAbM7rVABYvnRgYAAAAAbwbIABw+cMYAAAAAAvH1CuwA091RAAAAAAbpbPAGJfMXzAAAAAAJk+hdQGlmsQAAAAABk31JqBx+V1iAAAAAALp9W6gRp826AAAAAAGS/UqoGuGjwAAAAAAl76I1A1K1EAAAAAAG5G1ADUHU0AAAAAAu/1Cu4DVbTgAAAAAA3n2JAIG0IAAAAAArt3toAMV+XfEAAAAAL1uzPlQBT5qR2AAAAAenZDbm/AAa06SgAAAAerYra/LQADp+YmIAAAAC77J7Q5KAACIPnjwAAAAzbZzY24gAAGq+m4AAA7Zo2cmaoAAANWdOOAAAMl2N2TysAAAApEOj2HgAOyYtl5w5jw4zZPJyuGQ5H2AAAdes+suDUAVyfYbZTLajG8HxjgD153n3IAABH8QxxiVo4XPKpGlyTKjowvCbUAF4mD3AAACgqCzYPiPQAA900XAACmN4favRk+a9wB0xdiNAAAvU1cgAxeDcUoPdL0s1B44atQAACSs8AEewD0gM72I5jjDFiAAAPfO1QGL6z9IAlGdRgkaAAABMmRANZsSADls7k6kFW8AAAJIz4DHtW6AAk+d1jhUAAAGdyWBFcGgAX/AGnYZFgAAAM4k4CF4hAA9u3FcKi4AAAEiSEBCsRgAe3biuGxWAAACXsoAiKFgALttgs0J0AAAHpnvkBhOt4AGebE1pBtsAAAGeySA4an2wAGwEjGFxaAAAe+c+wAjKBgAyfZ3kUh3HAAAO6Yb+AKQLGgBctmb2HXDNjAAD1yzkQAENRF1gyvYG9AcI2wjgAByyuSveAAWWMcQtnoyOQs8qAPFhVh8HADt999y65gAAKKgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/8QAGgEBAAMBAQEAAAAAAAAAAAAAAAEFBgIEA//aAAgBAhAAAAAAAAAAAAABEAAJkBEAAB0CIAABMhyAAA6EQAAA6EQAABMiIAAAmREAAAmQiAABMgOQAEyAHIATIACIBMu7H3fT419eACEnps7DoPFQch889Wd3V2TeWIBV0o+eF8I0OrXVoAIyvBm8uDe2Wp6ADO+Mw9WDV6rSgAzvjMNWA1Op1AARlvmZbOA3NnpfSAK6iHnwfnFttZ9Wh7AeXPcB5cxWd3Wk7Pvb+uR8q+rgAAAAAAAAP//EABsBAQABBQEAAAAAAAAAAAAAAAAEAQIDBQYH/9oACAEDEAAAAAAAAAC20AL6gCNDxAArnn3gpro4AAv2l4QIgAAJWwGLVAAAX7cQYYAAFdyNZgAAAy7UazAAABsZI18UAAE6YEfWgACRNygavCACsmZkALNZjAMkqVcAC2FFoKyJWe+fMyYoMAAUw2L8t0jYzqhE0dAzd70eHj+PK7mcAa7UDN7VvBwXmDb7EAU5uw9C9KCnh2n6WoAaKIey9ODy/jN+ADRRD2fpQeY8P0QAU5zGel+gg8V53oc4AgaYTfcJ45Tx5I31wCPobQ2PpPRYuP8APMZm2kqoxQddQAAAAAAAAP/EAFMQAAEDAgIDCQkMBwUIAwAAAAECAwQFEQAGBzFREhMhMEBBYXGBCBQYIjJCRlDSFSBSVGJygpGTobHREDRDc6LBwiMzU3CyFiQlNVVkdISSlLP/2gAIAQEAAT8A/wAo74nVaBAb32bNYitfDfcS2PrURiZpU0dwVFMjN1OVY8O8u7//APkFYc076LmfSVSvmQpB/ox4QGjH/r7v/wBGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0Y89fd7IMj2cN6e9GDpCTmRaOuFI9nEDSlo9qakpj5upoJNgH3d4+50JxGlxpbSH4r7bzSvJW0sLSeop5NWsw0fL8RU2rVGPDjJ4C6+4EAnYnaegYzV3StDhFcfK1LdqDuoSZBLDHWlPlqxXtNmkOulaVVxcFg3/sYA73A+kLrxKnTJrpfmSXX3jrcdWVqPWVYudvJ7nbil16s0R7vikVSVDduCVR3lNk9e5IvjKfdG5rpKmo+Yo7NXi8ALlgxJH0kiysZL0l5Uzsz/AMFn2l7m7kJ8BuSj6PnAbU8ieeZitOPPuoQ22krWtZCUpSkXJJOoDGkHui4MBT1MyW2ibITdJnuA97o/dJ1uHFczFXMyzV1Gu1N+bJV57yr7kbEjUkdA5dGlSYb7UqJIcZfaUFtuNLKFoUNRSocIONF3dBb6tih58eSCQEM1PUOqT7eELS4lK0KCkkAgg3BB4/M2Z6NlKlSKtWJiI8VoWueFS1nUhA85ZxpJ0v13Pj7kNorg0NC7tw0K4XNi3yPKPRqHqLQnpkeoD8XKmZZJVSHCG4klw/qijqQs/wCF/pwDfjc1ZqpOUKNLrVXf3qMyLJSLFbrh8ltA51qxn7P9az9V1z6istxWypMSIhRLbCD+Kj5yvUYJHCMdz7pLXWoByfWJBXUILV4bizwvRk+Z0qa4yoTodKgyZ859DEWO0t11xZslCEC5UrGlHSNOz/XVvBa26RFKkQY+xHO4v5a/UtArU3LlZptbpzm4lQ30ut7DbWk9ChwHGXq5EzHQ6ZWoCv8AdpsdDyRrIKtaFdKTwHi+6I0hrffGRKU/ZloodqSkngW5rQz1I1n1P3M2ZzJpFYyvIXdUJ0SowP8AhP8AAtI6AvitIWbWclZVqlbWElxpvcRmz+0kOcDaf5nEyXJnypM2Y8p2Q+6t11xRupa1m6lHpJ9T6B6uaVpHo7alEMz0PQnepxN0/wASRgauJ7pTNZmVynZTjuXZpzYkSRtkPDgB6UI9UZMlrgZsy1MQqxZqkRy/QHRfA4iZIaiRX5D6ghpptTi1bEIFycZmrL2YcwVitvk7ubLdfsfNClcCewcHqiiX91qbbX3yz/rGBxGmKse4ujnMz6F2dfjiGj/2VBs/ccE3J9UZOirm5ry3EQm5eqkRu3Qp0YHEd01PLGUqPT0mxk1QLV0oZaPteqdBtKNV0kUIkXah77Md6mkcH8RGBq4jupH7JyXG/wDPcP1tj1T3MuWVMQK5mt9FjJWmDGO1tHjuHqJ4nupEnvrJa+beZ4/jR6ooNGnZhrFOotNa3yXMeS02OvWo9CRwk4ytQIeWKDS6HC/V4TCWgq1itWtSz0rPCeJ7qKNenZSl2/upEtonpcShXqcC+NA+jFeW4H+1NbYKatOaswysWMaOrbscc4rujaYZuj/vzccMCpR3yehwFn+r1MAVGwGNDOhVbK4ubc4xLLFnYMB1PCNjrw/BHF58opzDk7MlHSndOSID28ja6gbtH3jChZRHqShZerOZag1S6JT3pcpzUhsahtUTwJTtJxow0G0vKRYreYS1PrIAUhNrx4yvkA+WsfCONXFnGlTLZytnqvU5KLRlvmTG2Fl/xwB0J1eookOXPkNRYUZ1991W5baaQVrWdiUi5JxkbudKzVCzOzg+abE196NWXKWOnWlvGW8p0DKMEU6g01qKzwFe5F1uEDynFnhUeO7pTJ5n0aBmyK3d+mneJVtZjOnxVfQX6ghwZtRktQ4EV6RJcNkNMoK1qOwJTcnGTe5yr9V3qXmuSKXFNj3uizkpY/0oxlbIOVslRt6oVKaZdIst9XjyHPnOK4ezkFVgw6vAmU2ewHYsllbDiFaloWNyoYz1lKZknMtRoEu6gyvdMO8zrC/IXy2j0Cs5glpg0WmyJkk+YwgrIG1WwdJxk7uap75amZyqQit6zChkLe6lueSnGWcl5ayjGEegUliKCAFuAbp5z57irqPI9NOjVOdqB31T2x7tU5KlxNryNa2CenWnDra2XFtOoUhaFFKkqFiCOAgg8qyro7zdnJwCh0Z5xi9lSVje46etarA22DGUe5spEPe5ebqgue78Ui3aj9Sl+WvFIodHoMREGj02PDjJ1NMNhAJ2m2s8m07aIHJi5WdMsxSZFiuoxG08LoGt9sDz/hjGrkzLD0hxDLDSluLISlKQSpRPMAMZU0C54zFvcidHTR4Sv2k24dI+SyPG+u2MqaBskZc3qRLimrzEftZoBaB+S0PFw0y2y2hppCUIQAEpSAAAOYAauU6XtBJmuycy5LjASVXcl05sWDu1bGxe1GHWnGXFtOoUhxCilSVAghSTYgg6iOR5eyfmXNT/AHvQKNJmKBspTaLNo+es2SntOMq9zNIc3uTm+sBoazEgWWvtdWLDGWchZTyk2E0KiR4zlrKkEbt9XW4u6uW6SNDNAzwHZ7BTTq3YkSm0XS7sS+ka/na8ZuyJmbJMwxK9T1NJJs1IR47D3S2vj2mXXlobabUtaiAlKRcknUAMZV0F56zJvT8iEKVCVY77PuhZHyWvLxlTuesl0Te3qqlysy08JMnxI4PQ0n+onEWDFhMNxokdphhsWQ20gIQkbEpFgPeyqnBg/rMhCCBfc3ur6hw4lZ1hNbpMdlbpGokhKT+OHs7zVf3EdpHzgVfzGDnGqnnbHUkYGcqqOZo/OT+VsMZ5eBG/w0K2lJKPaxDzfTJBCXFLZUTbxk3+q2GJTEhAcYdQtB1KSoEckqdLp1ThvQqnEZkxXU7lbLyAtCusKxnPubKVNU9NyhOMB03Pekm7kfsXwqRjM+jfOWUVLNZochEcapLY31gj56LgduLHZxNjjL+TM0ZpcDdCokuWL2LiEWaSflOKskYyt3M8t0tSM31hLCNZiwbLc7XVCwxljR9lHKDaRQ6Kww6BZUlQ32Qr6a7nAAHvFLSkEqUAAMT81UyGClDm/r2N6u1WKhm2oywpDKt4bPMjX/8ALC3HHCVLWSSbm+338adLhuB2O+tChzg4pOdOFDVRRbm31A/EflhiQ1IbS6y4laFaik3HJCkKBBAII4RjMOibIOYCtc/LkZD6tb0W8Zy+0luwVisdzDRX925RMyS4uxMtlD46gUFGKj3NWdY11wajSpbf71bS/qUnErQTpPjXIy2Xk7WZLCv68L0R6R2/KylO+ikK/A4Tom0jL1ZRqHa3bEXQjpPlkBGVXkDa48yj8V4p/c358lEGW/TIaOcOSCtfYG0qxSO5gp6AldczQ+9tbhsBr+NwqxRNDWjygFDjGXmpL4N99nEyVH6K/FGGmGY7SGm20oQgAJSkAJAHMAPeyJ8WEjfJD6EX1XP4DWTioZ1ZRdEBndnmWvgT2DE6tVCoE98SFFPMgGyR2DBN+E8XSq3MpToUyu7ZIK0HUcUmsRapGK46wlfBuknWnk5AOsY3I2YsNmLAagPf1HMFNp+6S68FOD9mjhV+QxUM5THrohJDKNutWHpL8halvOqWo6yokk8fT58inSESI6ylST2EbDtGKRU49VitvtkJI8tOsg7OOJA1nFSzhQKaVIkT21OA23DV3Fdu51Yk6VICCREpzznS4pKPw3WDpXk34KOgD9+fZwxpWB4JNIIG1D1/xTinaSMvylJDy3YyjwDfUXH1pviFPhTGw/FkNuoOpbagofdxU2fHhMqekOBDadus4q+bJcwqahkssfxnrOFKKjckk8iodWcpUxDySS2rgcTfWMMPtvstvNKCkLSFJI5weMzFm6mZfQUvL32UQCiOg+N1q2DFbzlWa2paXHyzGOplolKbfKOtWLnb72FUp9NeD8GU4y4OdBtfr2jGW9JTbqm4tdQlCr2D6fIPzxzYadbdQhxpYUlQBBBuCD7+pVKPTIq5D6uAcCUjWpWwYqtWlVV9Tr6yE6kIHkpHJcl1cqS5TXjfc+O3f7xxedc6IoqTAgEKnqHCdYZB5ztVsGH5D0p5x+Q6px1ZKlKUbknico5zk0J5EWWtTtPWeFOstdKejaMR5TMxhuQw4lbTiQpKkm4UD7151thtbriwlCElSidQAxXaw7VZalXsyglLadg/M8mpstcKbHko1oWDbb0duGXEOtIcQbpUkKB2g8Tm3MSMv0xbySDJduhhB+FtPQMSJD0p5yRIcK3XFFSlK1kni9HealU+UijzFjvZ5X9iVHyHDzdSve5yqqm2kU5pViuynCNnMOUZVld80lgKsVNEtns4QPqPEKNgTjOdbVWq0+tC7xmCWmRzWTrV2njEqUhQUkkEG4Ixk6ue7dFjPuuXeau08Plp5+0cP6VrS22pSiAACSdgGKpMXPnSJK/PWSBsHMOzlGRX/EmsW8koWOs3B4jONTNNoNQkIUUr3ve27awpzxb4PCTxujGpKYqkinKV4klvdJ+e3+nMkjvakS1DWtIb7FcB+7BNyTyjI67S5CDzsqP1EcRpUkqRTqfFBtvr6l9iE2/nx2V5XeeYKS9/3CEdizuD+OEm4/RnVak0+OhJtd256gm38+U5JTeY+rYyofeniNKyjv8AR0c24f8AxTx1NJTUYKhrD7Z/iGEeSP0Z63Pe8Xc6hur9dxynI7JtNeOqyAO0m/EaVv1mj/Mf/FPHU7/mEL98j8cI8gfozq2pdOZWnmdseopJ5TlKIWKShZFi8tSz2eL/AC4jSsx/Y0qR8FbqD9IA8dQmFSK1S2UjypTQ7N0L4SLJ/RmOOJVIloSk+Ijdjb4nCcEWJB5PDjrlSWWGxdS1hI7TiHHRGjsso8htCUDqSLcRpDppl5ckLABXHUl8DYBwH7jx2juAZeYmXyk7iM2t07L23I/HA/QtIWkpULggjFXgqp8+RHINkrO5O0axyfJlLK3l1F1Pit3S3cecRr7BxMqM3IjusOpCkOoKVjakixGKzTXaTU5cB4HdNOEAnzk6we0cbo3o5g0hU91FnZhCh+7T5PvM6UjfWkTmE3W0LObSnmPZyanQHqjKajMjhUeE2uANpxAhNQYzTDabNtpsOk85PXxWkjLJmRk1mGjdPR0WdA85rb9HjMqUByv1Rtgg97N2W+vYjZ1qww02y2htCQlCEhKUjUAPeLQlxCkLAUlQsQdRBxmKiOUqWopSox1m6FHht0HkjDDsl1DLKCpajYAYoFFRSYw3dlSF8K1bPkji1JCgUkXBxnjJTlJecqVOZvCWbrQn9kT/AEniqVSplYmNQoTRW4s9iRzqUeYDGXaBFoFPbiMC6/KdctYrVt/Ie+qECNMjKjyE7oLHaOkYrVEkUl8hQKmVE7hY1HkUOFInPoYjtla1bMUDLzNKb3xyy5KvKXzDoTxrjaHEKQ4gKSoWIIuCDzYzTo5WlTk2ggEG6lxr6vmH+WHmXWHFtPNqQ4k2UlQIIOwg+/y/lCq19xKm2yzFv4z7g8X6I844oOXoFBiiPDb4TYuOny1kbTxEmOxKaVHebS4hXlA4rWTpEdSnqfdxu5JR5w6tuFtONKKXEFJBsQeOShSzZIvilZTnTShySCwyfhDxj1DFPpcSmtBuM0B8JR4VK6zyCr5apFaQROiJWsCwdT4qx1KGKloseG7XSp4UnmQ+LfxJxJyLmaMoj3OU4n4TakqwrLVfSbGjy/sV4ZyhmN/yKRI+kncf6rYhaM64+QZa2YyOk7tQ7E4o+jyiU0h2SgzHhzu+R2I/PCEIbASgAJAsAOLqFFp84HvphKlkCyhwK4OnZiXkcElUKV9Fz2hh/KdZataPuwfOSoEYXQqog2MJ49Taj/LHuNVPiEj7Jf5Y9xqp8QkfZL/LHuNVPiEj7Jf5Y9xqp8QkfZL/ACx7jVT4hI+yX+WPcaqfEJH2S/yx7jVT4hI+yX+WEUCquaoTw+chQ/EYYyjWHQSpgN9K1C33XOIuR0+VMlfRbH8ziFRKdTwksRkhY89XjK+/VyWwxYf5ef/EADgRAAIBAgMDCQUHBQAAAAAAAAECAwQRAAUgMUFhEhMhIjBAUXGREDJQU6EGFDNCYoGSUnKiwdH/2gAIAQIBAT8A+L37e/wE9zHfj3k90Gk90Gk9ztqPcbd3t3e3b2129qRySGyIScRZY56ZXtwGFoKZfyX8zj7rT/JX0w+X0zbFKngcTZdLHdozyx9cbOg9pbFtENJPNYqlh4nEOWxJYykufQYVFQWRQBw1VVGk4LKAJPHxwysjFWFiNUsscKGSVwqjecVOfgErSxX/AFNhs5r2P4oHkoxHndchHKZXHFf+YpM7gnISYc0/+J0KpYhVFycUtCkQDygM/huHZZjThl59R1l97iNMsqQxvLIbKoucV1dLWykkkRg9VdOUZmyOtLO10PQhO4+Hty6mCrz7jpPu+XZsoZSp2EEYkQxyOh/KSNGf1JAipVO3rNq2EHGW1P3mkikJ6w6reYxGpd0QbyBhVCqFGwC3aV4tUycbHRnLFq+UeAUfTX9nmJhqE3BwfUYoxeqi8+1ryDVPwA0ZwCMwm4hT9Nf2eB5qobcWUfTFM3Inib9Q7QkAEnYMSvzkrv4knRn8BEkVQB0Ecg+Y15RTmCij5Qsz9c/v7KWYTQo28dDefZ5hUBI+aU9Z9vAaamnSqheF9jD0OKmmlpZWilFiNh3Eacqy9quUSSLaFDc8T4YAt7KWpNPJfap94YR1kUOhuD2NTVJTr4vuGHdpHZ3NydVVSQVaciZfIjaMVOR1URJhtKvocNSVSmzU8gP9pxHQVkhASnf9xbFJkJuHq2Fv6F/2cIiRoqIoVQLADRBUSwG6Ho3g7DiLMYX6Huh9RgTwtslT1GOdi+YnqMc7F8xP5DHOxfMT+Qxz0XzE9Rh6ymTbKD5dOJsyY3WFbcThmZiWYkk7z8W//8QAOREAAgECAgYHBwMDBQAAAAAAAQIDAAQFERITICExkQYwQVFSYXEQFCJAQlOBMlChI4KSYnJzsbL/2gAIAQMBAT8A/YCyjiwFa2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8YoMp4EHq5LlV3LvNPNI/FuXW5kcDUdw6cd4pJFkGanbJABJqacvmq7l+RR2Rgy0jiRQw2rmXM6CncOPydq+T6B4HZmfQjJ7eA+UQ6LqfMbN229V/Pyg4j1GzcnOVvlIV0pFH52bgZSt8pbRaC6TcTs3YycHvHyQBJAFQ2+WTyfgbVymlHmOI+Rjt3fe3wio4kj4Df39RNGY38jw60AscgMzSWrHe5yFJEkfBd/f1UiLIpU1JG0ZyPVJE7/pWktRxc/gUqKgyVQOtZVcZMMxUlqw3pvHdRBU5EEbIBO4CktpG3t8IpLeNOzM+fsSN5DkikmosPY75Wy8hS2duv0Z+te7wfaXlT2Nu3BSvoalsJE3xnTH81vG49UVVtzAGjbRH6cq90TxGvdE8RoW0Q7M6Cqu5VA9kVrNLvC5DvNRWEa75CWPIUqqgyVQB5bVzarMCy7n7++mUoxVhkRtW9tPdypBbRNJI3BVFYf0FdlWTErnQP24uP5JqLojgUYyNqznvZ2q46GYLKDq0khPejk/8ArOsU6HX1irTWre8xDeQBk4/FHduPtALEKozJq3skjAaQaT/wOqv4NJdco3jj6bNtby3c8VtAulJIwVRWCYJb4PbKqqGnYDWSdpPcPLZ6V9HEmikxOxjAlQaUqL9Q7x5+2xgCrrmG8/p9OrIDAg8CKkTQd07iRsdBcPV3ucSkX9H9KP1O8naIBBBG410gsBh2K3MCDKNjrE/2tSLpuqDtIFKAqhRwA6y9GVw/mAdjohEEwK2I4u0jH/Lb6exgXljL2tEwP9pq0GdzF69bfHO4fyAGx0ScPgVpl9JkB/yO309cG6w9O0ROeZq3bQnib/UOsJyBJqV9ZI7952Ogl8DDdYezfEra1B5HcdvpTfC+xicoc44QIl/t4/z7LaUTRK3bwPr1d9PoJqlPxN/A2cOvpsNvIbyA/Eh3jvHaDWHYjbYnapdWzgg/qHap7js9JseTDLZreBwbuVSAB9AP1GiSSSeJ9ltcGB8/pPEUjq6hlOYPU3FykC97dgp3aRi7HMnaw3FbzCptdaSZeJDvVh5isO6aYdcqq3gNvJ25705ikxXDJAGS/gI/5FqfHMIt10pb+H0DBjyGdYr03XRaLCojnw1sg/6FTTSzyPNNIXkc5szHMnYhuJIDmh3doPCo7+F9z5oaE0R4SrzrWR/cXnWsj+4vOtZH9xeYrWx/cXmKe6gTjID6b6lxAnMQrl5mmYsSzEkn92//2Q==" + input_type="classification", + embedding_types=[ + "float" ], ) @@ -2892,7 +2959,7 @@ client.v2.embed(
-**texts:** `typing.Optional[typing.Sequence[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`. +**texts:** `typing.Optional[typing.List[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`.
@@ -2900,7 +2967,7 @@ client.v2.embed(
-**images:** `typing.Optional[typing.Sequence[str]]` +**images:** `typing.Optional[typing.List[str]]` An array of image data URIs for the model to embed. Maximum number of images per call is `1`. @@ -2914,7 +2981,7 @@ Image embeddings are supported with Embed v3.0 and newer models.
-**inputs:** `typing.Optional[typing.Sequence[EmbedInput]]` — An array of inputs for the model to embed. Maximum number of inputs per call is `96`. An input can contain a mix of text and image components. +**inputs:** `typing.Optional[typing.List[EmbedInput]]` — An array of inputs for the model to embed. Maximum number of inputs per call is `96`. An input can contain a mix of text and image components.
@@ -2941,7 +3008,7 @@ Possible values are `256`, `512`, `1024`, and `1536`. The default is `1536`.
-**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]` +**embedding_types:** `typing.Optional[typing.List[EmbeddingType]]` Specifies the types of embeddings you want to get back. Can be one or more of the following types. @@ -2992,7 +3059,7 @@ If `NONE` is selected, when the input exceeds the maximum input token length an
-
client.v2.rerank(...) -> AsyncHttpResponse[V2RerankResponse] +
client.v2.rerank(...) -> V2RerankResponse
@@ -3020,18 +3087,20 @@ This endpoint takes in a query and a list of texts and produces an ordered array ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.v2.rerank( documents=[ "Carson City is the capital city of the American state of Nevada.", "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.", "Capitalization or capitalisation in English grammar is the use of a capital letter at the start of a word. English usage varies from capitalization in other languages.", "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.", - "Capital punishment has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states.", + "Capital punishment has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states." ], query="What is the capital of the United States?", top_n=3, @@ -3068,7 +3137,7 @@ client.v2.rerank(
-**documents:** `typing.Sequence[str]` +**documents:** `typing.List[str]` A list of texts that will be compared to the `query`. For optimal performance we recommend against sending more than 1,000 documents in a single request. @@ -3120,7 +3189,7 @@ For optimal performance we recommend against sending more than 1,000 documents i
## Batches -
client.batches.list(...) -> AsyncHttpResponse[ListBatchesResponse] +
client.batches.list(...) -> ListBatchesResponse
@@ -3148,11 +3217,13 @@ List the batches for the current user ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.batches.list( page_size=1, page_token="page_token", @@ -3220,7 +3291,7 @@ Use `created_at` for creation time or `updated_at` for last updated time.
-
client.batches.create(...) -> AsyncHttpResponse[CreateBatchResponse] +
client.batches.create(...) -> CreateBatchResponse
@@ -3248,12 +3319,14 @@ Creates and executes a batch from an uploaded dataset of requests ```python from cohere import Client +from cohere.environment import ClientEnvironment from cohere.batches import Batch client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.batches.create( request=Batch( name="name", @@ -3296,7 +3369,7 @@ client.batches.create(
-
client.batches.retrieve(...) -> AsyncHttpResponse[GetBatchResponse] +
client.batches.retrieve(...) -> GetBatchResponse
@@ -3324,11 +3397,13 @@ Retrieves a batch ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.batches.retrieve( id="id", ) @@ -3367,7 +3442,7 @@ client.batches.retrieve(
-
client.batches.cancel(...) -> AsyncHttpResponse[CancelBatchResponse] +
client.batches.cancel(...) -> CancelBatchResponse
@@ -3395,11 +3470,13 @@ Cancels an in-progress batch ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.batches.cancel( id="id", ) @@ -3439,7 +3516,7 @@ client.batches.cancel(
## EmbedJobs -
client.embed_jobs.list() -> AsyncHttpResponse[ListEmbedJobResponse] +
client.embed_jobs.list() -> ListEmbedJobResponse
@@ -3467,11 +3544,13 @@ The list embed job endpoint allows users to view all embed jobs history for that ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.embed_jobs.list() ``` @@ -3500,7 +3579,7 @@ client.embed_jobs.list()
-
client.embed_jobs.create(...) -> AsyncHttpResponse[CreateEmbedJobResponse] +
client.embed_jobs.create(...) -> CreateEmbedJobResponse
@@ -3528,11 +3607,13 @@ This API launches an async Embed job for a [Dataset](https://docs.cohere.com/doc ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.embed_jobs.create( model="model", dataset_id="dataset_id", @@ -3594,7 +3675,7 @@ Available models and corresponding embedding dimensions:
-**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]` +**embedding_types:** `typing.Optional[typing.List[EmbeddingType]]` Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. @@ -3634,7 +3715,7 @@ Passing `START` will discard the start of the input. `END` will discard the end
-
client.embed_jobs.get(...) -> AsyncHttpResponse[EmbedJob] +
client.embed_jobs.get(...) -> EmbedJob
@@ -3662,11 +3743,13 @@ This API retrieves the details about an embed job started by the same user. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.embed_jobs.get( id="id", ) @@ -3705,7 +3788,7 @@ client.embed_jobs.get(
-
client.embed_jobs.cancel(...) -> AsyncHttpResponse[None] +
client.embed_jobs.cancel(...)
@@ -3733,11 +3816,13 @@ This API allows users to cancel an active embed job. Once invoked, the embedding ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.embed_jobs.cancel( id="id", ) @@ -3777,7 +3862,7 @@ client.embed_jobs.cancel(
## Datasets -
client.datasets.list(...) -> AsyncHttpResponse[DatasetsListResponse] +
client.datasets.list(...) -> DatasetsListResponse
@@ -3804,22 +3889,19 @@ List datasets that have been created.
```python -import datetime - from cohere import Client +from cohere.environment import ClientEnvironment +import datetime client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.datasets.list( dataset_type="datasetType", - before=datetime.datetime.fromisoformat( - "2024-01-15 09:30:00+00:00", - ), - after=datetime.datetime.fromisoformat( - "2024-01-15 09:30:00+00:00", - ), + before=datetime.datetime.fromisoformat("2024-01-15T09:30:00+00:00"), + after=datetime.datetime.fromisoformat("2024-01-15T09:30:00+00:00"), limit=1.1, offset=1.1, validation_status="unknown", @@ -3847,7 +3929,7 @@ client.datasets.list(
-**before:** `typing.Optional[dt.datetime]` — optional filter before a date +**before:** `typing.Optional[datetime.datetime]` — optional filter before a date
@@ -3855,7 +3937,7 @@ client.datasets.list(
-**after:** `typing.Optional[dt.datetime]` — optional filter after a date +**after:** `typing.Optional[datetime.datetime]` — optional filter after a date
@@ -3899,7 +3981,7 @@ client.datasets.list(
-
client.datasets.create(...) -> AsyncHttpResponse[DatasetsCreateResponse] +
client.datasets.create(...) -> DatasetsCreateResponse
@@ -3927,11 +4009,13 @@ Create a dataset by uploading a file. See ['Dataset Creation'](https://docs.cohe ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.datasets.create( name="name", type="embed-input", @@ -3939,6 +4023,8 @@ client.datasets.create( skip_malformed_input=True, text_separator="text_separator", csv_delimiter="csv_delimiter", + data="example_data", + eval_data="example_eval_data", ) ``` @@ -3971,9 +4057,7 @@ client.datasets.create(
-**data:** `from __future__ import annotations - -core.File` — See core.File for more documentation +**data:** `core.File` — The file to upload
@@ -4029,9 +4113,7 @@ core.File` — See core.File for more documentation
-**eval_data:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**eval_data:** `typing.Optional[core.File]` — An optional evaluation file to upload
@@ -4051,7 +4133,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.datasets.get_usage() -> AsyncHttpResponse[DatasetsGetUsageResponse] +
client.datasets.get_usage() -> DatasetsGetUsageResponse
@@ -4079,11 +4161,13 @@ View the dataset storage usage for your Organization. Each Organization can have ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.datasets.get_usage() ``` @@ -4112,7 +4196,7 @@ client.datasets.get_usage()
-
client.datasets.get(...) -> AsyncHttpResponse[DatasetsGetResponse] +
client.datasets.get(...) -> DatasetsGetResponse
@@ -4140,11 +4224,13 @@ Retrieve a dataset by ID. See ['Datasets'](https://docs.cohere.com/docs/datasets ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.datasets.get( id="id", ) @@ -4183,7 +4269,7 @@ client.datasets.get(
-
client.datasets.delete(...) -> AsyncHttpResponse[typing.Dict[str, typing.Any]] +
client.datasets.delete(...) -> typing.Dict[str, typing.Any]
@@ -4211,11 +4297,13 @@ Delete a dataset by ID. Datasets are automatically deleted after 30 days, but th ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.datasets.delete( id="id", ) @@ -4255,7 +4343,7 @@ client.datasets.delete(
## Connectors -
client.connectors.list(...) -> AsyncHttpResponse[ListConnectorsResponse] +
client.connectors.list(...) -> ListConnectorsResponse
@@ -4283,11 +4371,13 @@ Returns a list of connectors ordered by descending creation date (newer first). ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.connectors.list( limit=1.1, offset=1.1, @@ -4335,7 +4425,7 @@ client.connectors.list(
-
client.connectors.create(...) -> AsyncHttpResponse[CreateConnectorResponse] +
client.connectors.create(...) -> CreateConnectorResponse
@@ -4363,11 +4453,13 @@ Creates a new connector. The connector is tested during registration and will ca ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.connectors.create( name="name", url="url", @@ -4411,7 +4503,7 @@ client.connectors.create(
-**excludes:** `typing.Optional[typing.Sequence[str]]` — A list of fields to exclude from the prompt (fields remain in the document). +**excludes:** `typing.Optional[typing.List[str]]` — A list of fields to exclude from the prompt (fields remain in the document).
@@ -4463,7 +4555,7 @@ client.connectors.create(
-
client.connectors.get(...) -> AsyncHttpResponse[GetConnectorResponse] +
client.connectors.get(...) -> GetConnectorResponse
@@ -4491,11 +4583,13 @@ Retrieve a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/conn ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.connectors.get( id="id", ) @@ -4534,7 +4628,7 @@ client.connectors.get(
-
client.connectors.delete(...) -> AsyncHttpResponse[DeleteConnectorResponse] +
client.connectors.delete(...) -> DeleteConnectorResponse
@@ -4562,11 +4656,13 @@ Delete a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connec ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.connectors.delete( id="id", ) @@ -4605,7 +4701,7 @@ client.connectors.delete(
-
client.connectors.update(...) -> AsyncHttpResponse[UpdateConnectorResponse] +
client.connectors.update(...) -> UpdateConnectorResponse
@@ -4633,11 +4729,13 @@ Update a connector by ID. Omitted fields will not be updated. See ['Managing you ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.connectors.update( id="id", ) @@ -4680,7 +4778,7 @@ client.connectors.update(
-**excludes:** `typing.Optional[typing.Sequence[str]]` — A list of fields to exclude from the prompt (fields remain in the document). +**excludes:** `typing.Optional[typing.List[str]]` — A list of fields to exclude from the prompt (fields remain in the document).
@@ -4732,7 +4830,7 @@ client.connectors.update(
-
client.connectors.o_auth_authorize(...) -> AsyncHttpResponse[OAuthAuthorizeResponse] +
client.connectors.o_auth_authorize(...) -> OAuthAuthorizeResponse
@@ -4760,11 +4858,13 @@ Authorize the connector with the given ID for the connector oauth app. See ['Co ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.connectors.o_auth_authorize( id="id", after_token_redirect="after_token_redirect", @@ -4813,7 +4913,7 @@ client.connectors.o_auth_authorize(
## Models -
client.models.get(...) -> AsyncHttpResponse[GetModelResponse] +
client.models.get(...) -> GetModelResponse
@@ -4841,11 +4941,13 @@ Returns the details of a model, provided its name. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.models.get( model="command-a-03-2025", ) @@ -4884,7 +4986,7 @@ client.models.get(
-
client.models.list(...) -> AsyncHttpResponse[ListModelsResponse] +
client.models.list(...) -> ListModelsResponse
@@ -4912,11 +5014,13 @@ Returns a list of models available for use. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.models.list( page_size=1.1, page_token="page_token", @@ -4986,7 +5090,7 @@ Defaults to `20`, min value of `1`, max value of `1000`.
## /finetuning -
client.finetuning.list_finetuned_models(...) -> AsyncHttpResponse[ListFinetunedModelsResponse] +
client.finetuning.list_finetuned_models(...) -> ListFinetunedModelsResponse
@@ -5014,11 +5118,13 @@ Returns a list of fine-tuned models that the user has access to. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.finetuning.list_finetuned_models( page_size=1, page_token="page_token", @@ -5085,7 +5191,7 @@ Supported sorting fields:
-
client.finetuning.create_finetuned_model(...) -> AsyncHttpResponse[CreateFinetunedModelResponse] +
client.finetuning.create_finetuned_model(...) -> CreateFinetunedModelResponse
@@ -5113,12 +5219,14 @@ Creates a new fine-tuned model. The model will be trained on the dataset specifi ```python from cohere import Client -from cohere.finetuning.finetuning import BaseModel, FinetunedModel, Settings +from cohere.environment import ClientEnvironment +from cohere.finetuning.finetuning import FinetunedModel, Settings, BaseModel client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.finetuning.create_finetuned_model( request=FinetunedModel( name="name", @@ -5165,7 +5273,7 @@ client.finetuning.create_finetuned_model(
-
client.finetuning.get_finetuned_model(...) -> AsyncHttpResponse[GetFinetunedModelResponse] +
client.finetuning.get_finetuned_model(...) -> GetFinetunedModelResponse
@@ -5193,11 +5301,13 @@ Retrieve a fine-tuned model by its ID. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.finetuning.get_finetuned_model( id="id", ) @@ -5236,7 +5346,7 @@ client.finetuning.get_finetuned_model(
-
client.finetuning.delete_finetuned_model(...) -> AsyncHttpResponse[DeleteFinetunedModelResponse] +
client.finetuning.delete_finetuned_model(...) -> DeleteFinetunedModelResponse
@@ -5265,11 +5375,13 @@ This operation is irreversible. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.finetuning.delete_finetuned_model( id="id", ) @@ -5308,7 +5420,7 @@ client.finetuning.delete_finetuned_model(
-
client.finetuning.update_finetuned_model(...) -> AsyncHttpResponse[UpdateFinetunedModelResponse] +
client.finetuning.update_finetuned_model(...) -> UpdateFinetunedModelResponse
@@ -5336,12 +5448,14 @@ Updates the fine-tuned model with the given ID. The model will be updated with t ```python from cohere import Client -from cohere.finetuning.finetuning import BaseModel, Settings +from cohere.environment import ClientEnvironment +from cohere.finetuning.finetuning import Settings, BaseModel client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.finetuning.update_finetuned_model( id="id", name="name", @@ -5411,7 +5525,7 @@ client.finetuning.update_finetuned_model(
-
client.finetuning.list_events(...) -> AsyncHttpResponse[ListEventsResponse] +
client.finetuning.list_events(...) -> ListEventsResponse
@@ -5441,11 +5555,13 @@ The list can be paginated using `page_size` and `page_token` parameters. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.finetuning.list_events( finetuned_model_id="finetuned_model_id", page_size=1, @@ -5521,7 +5637,7 @@ Supported sorting fields:
-
client.finetuning.list_training_step_metrics(...) -> AsyncHttpResponse[ListTrainingStepMetricsResponse] +
client.finetuning.list_training_step_metrics(...) -> ListTrainingStepMetricsResponse
@@ -5551,11 +5667,13 @@ The list can be paginated using `page_size` and `page_token` parameters. ```python from cohere import Client +from cohere.environment import ClientEnvironment client = Client( - client_name="YOUR_CLIENT_NAME", - token="YOUR_TOKEN", + token="", + environment=ClientEnvironment.PRODUCTION, ) + client.finetuning.list_training_step_metrics( finetuned_model_id="finetuned_model_id", page_size=1, @@ -5615,3 +5733,103 @@ Maximum number of results to be returned by the server. If 0, defaults to
+## Audio Transcriptions +
client.audio.transcriptions.create(...) -> AudioTranscriptionsCreateResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Transcribe an audio file. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client +from cohere.environment import ClientEnvironment + +client = Client( + token="", + environment=ClientEnvironment.PRODUCTION, +) + +client.audio.transcriptions.create( + file="example_file", + model="model", + language="language", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` — ID of the model to use. + +
+
+ +
+
+ +**language:** `str` — The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format. + +
+
+ +
+
+ +**file:** `core.File` — The audio file object to transcribe. Supported file extensions are flac, mp3, mpeg, mpga, ogg, and wav. + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` — The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ diff --git a/src/cohere/__init__.py b/src/cohere/__init__.py index 5a9a38251..fbadbfd07 100644 --- a/src/cohere/__init__.py +++ b/src/cohere/__init__.py @@ -256,7 +256,7 @@ UnauthorizedError, UnprocessableEntityError, ) - from . import batches, connectors, datasets, embed_jobs, finetuning, models, v2 + from . import audio, batches, connectors, datasets, embed_jobs, finetuning, models, v2 from .aliases import ( ChatResponse, ContentDeltaStreamedChatResponseV2, @@ -613,6 +613,7 @@ "V2RerankResponse": ".v2", "V2RerankResponseResultsItem": ".v2", "__version__": ".version", + "audio": ".audio", "batches": ".batches", "connectors": ".connectors", "datasets": ".datasets", @@ -946,6 +947,7 @@ def __dir__(): "V2RerankResponse", "V2RerankResponseResultsItem", "__version__", + "audio", "batches", "connectors", "datasets", diff --git a/src/cohere/audio/__init__.py b/src/cohere/audio/__init__.py new file mode 100644 index 000000000..a05014114 --- /dev/null +++ b/src/cohere/audio/__init__.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from . import transcriptions + from .transcriptions import AudioTranscriptionsCreateResponse +_dynamic_imports: typing.Dict[str, str] = { + "AudioTranscriptionsCreateResponse": ".transcriptions", + "transcriptions": ".transcriptions", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + + +__all__ = ["AudioTranscriptionsCreateResponse", "transcriptions"] diff --git a/src/cohere/audio/client.py b/src/cohere/audio/client.py new file mode 100644 index 000000000..9634c9296 --- /dev/null +++ b/src/cohere/audio/client.py @@ -0,0 +1,63 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from .raw_client import AsyncRawAudioClient, RawAudioClient + +if typing.TYPE_CHECKING: + from .transcriptions.client import AsyncTranscriptionsClient, TranscriptionsClient + + +class AudioClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawAudioClient(client_wrapper=client_wrapper) + self._client_wrapper = client_wrapper + self._transcriptions: typing.Optional[TranscriptionsClient] = None + + @property + def with_raw_response(self) -> RawAudioClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawAudioClient + """ + return self._raw_client + + @property + def transcriptions(self): + if self._transcriptions is None: + from .transcriptions.client import TranscriptionsClient # noqa: E402 + + self._transcriptions = TranscriptionsClient(client_wrapper=self._client_wrapper) + return self._transcriptions + + +class AsyncAudioClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawAudioClient(client_wrapper=client_wrapper) + self._client_wrapper = client_wrapper + self._transcriptions: typing.Optional[AsyncTranscriptionsClient] = None + + @property + def with_raw_response(self) -> AsyncRawAudioClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawAudioClient + """ + return self._raw_client + + @property + def transcriptions(self): + if self._transcriptions is None: + from .transcriptions.client import AsyncTranscriptionsClient # noqa: E402 + + self._transcriptions = AsyncTranscriptionsClient(client_wrapper=self._client_wrapper) + return self._transcriptions diff --git a/src/cohere/audio/raw_client.py b/src/cohere/audio/raw_client.py new file mode 100644 index 000000000..af8a831c2 --- /dev/null +++ b/src/cohere/audio/raw_client.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper + + +class RawAudioClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + +class AsyncRawAudioClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper diff --git a/src/cohere/audio/transcriptions/__init__.py b/src/cohere/audio/transcriptions/__init__.py new file mode 100644 index 000000000..558d96a77 --- /dev/null +++ b/src/cohere/audio/transcriptions/__init__.py @@ -0,0 +1,34 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .types import AudioTranscriptionsCreateResponse +_dynamic_imports: typing.Dict[str, str] = {"AudioTranscriptionsCreateResponse": ".types"} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + + +__all__ = ["AudioTranscriptionsCreateResponse"] diff --git a/src/cohere/audio/transcriptions/client.py b/src/cohere/audio/transcriptions/client.py new file mode 100644 index 000000000..1f8ced078 --- /dev/null +++ b/src/cohere/audio/transcriptions/client.py @@ -0,0 +1,156 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ... import core +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.request_options import RequestOptions +from .raw_client import AsyncRawTranscriptionsClient, RawTranscriptionsClient +from .types.audio_transcriptions_create_response import AudioTranscriptionsCreateResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class TranscriptionsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawTranscriptionsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawTranscriptionsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawTranscriptionsClient + """ + return self._raw_client + + def create( + self, + *, + model: str, + language: str, + file: core.File, + temperature: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AudioTranscriptionsCreateResponse: + """ + Transcribe an audio file. + + Parameters + ---------- + model : str + ID of the model to use. + + language : str + The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format. + + file : core.File + See core.File for more documentation + + temperature : typing.Optional[float] + The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AudioTranscriptionsCreateResponse + A successful response. + + Examples + -------- + from cohere import Client + + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.audio.transcriptions.create( + model="model", + language="language", + ) + """ + _response = self._raw_client.create( + model=model, language=language, file=file, temperature=temperature, request_options=request_options + ) + return _response.data + + +class AsyncTranscriptionsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawTranscriptionsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawTranscriptionsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawTranscriptionsClient + """ + return self._raw_client + + async def create( + self, + *, + model: str, + language: str, + file: core.File, + temperature: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AudioTranscriptionsCreateResponse: + """ + Transcribe an audio file. + + Parameters + ---------- + model : str + ID of the model to use. + + language : str + The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format. + + file : core.File + See core.File for more documentation + + temperature : typing.Optional[float] + The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AudioTranscriptionsCreateResponse + A successful response. + + Examples + -------- + import asyncio + + from cohere import AsyncClient + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.audio.transcriptions.create( + model="model", + language="language", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.create( + model=model, language=language, file=file, temperature=temperature, request_options=request_options + ) + return _response.data diff --git a/src/cohere/audio/transcriptions/raw_client.py b/src/cohere/audio/transcriptions/raw_client.py new file mode 100644 index 000000000..d597cd970 --- /dev/null +++ b/src/cohere/audio/transcriptions/raw_client.py @@ -0,0 +1,439 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ... import core +from ...core.api_error import ApiError +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.http_response import AsyncHttpResponse, HttpResponse +from ...core.parse_error import ParsingError +from ...core.request_options import RequestOptions +from ...core.unchecked_base_model import construct_type +from ...errors.bad_request_error import BadRequestError +from ...errors.client_closed_request_error import ClientClosedRequestError +from ...errors.forbidden_error import ForbiddenError +from ...errors.gateway_timeout_error import GatewayTimeoutError +from ...errors.internal_server_error import InternalServerError +from ...errors.invalid_token_error import InvalidTokenError +from ...errors.not_found_error import NotFoundError +from ...errors.not_implemented_error import NotImplementedError +from ...errors.service_unavailable_error import ServiceUnavailableError +from ...errors.too_many_requests_error import TooManyRequestsError +from ...errors.unauthorized_error import UnauthorizedError +from ...errors.unprocessable_entity_error import UnprocessableEntityError +from .types.audio_transcriptions_create_response import AudioTranscriptionsCreateResponse +from pydantic import ValidationError + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawTranscriptionsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def create( + self, + *, + model: str, + language: str, + file: core.File, + temperature: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AudioTranscriptionsCreateResponse]: + """ + Transcribe an audio file. + + Parameters + ---------- + model : str + ID of the model to use. + + language : str + The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format. + + file : core.File + See core.File for more documentation + + temperature : typing.Optional[float] + The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AudioTranscriptionsCreateResponse] + A successful response. + """ + _response = self._client_wrapper.httpx_client.request( + "v2/audio/transcriptions", + method="POST", + data={ + "model": model, + "language": language, + "temperature": temperature, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + force_multipart=True, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AudioTranscriptionsCreateResponse, + construct_type( + type_=AudioTranscriptionsCreateResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 401: + raise UnauthorizedError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 403: + raise ForbiddenError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 404: + raise NotFoundError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 429: + raise TooManyRequestsError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 498: + raise InvalidTokenError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 500: + raise InternalServerError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 501: + raise NotImplementedError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawTranscriptionsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def create( + self, + *, + model: str, + language: str, + file: core.File, + temperature: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AudioTranscriptionsCreateResponse]: + """ + Transcribe an audio file. + + Parameters + ---------- + model : str + ID of the model to use. + + language : str + The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format. + + file : core.File + See core.File for more documentation + + temperature : typing.Optional[float] + The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AudioTranscriptionsCreateResponse] + A successful response. + """ + _response = await self._client_wrapper.httpx_client.request( + "v2/audio/transcriptions", + method="POST", + data={ + "model": model, + "language": language, + "temperature": temperature, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + force_multipart=True, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AudioTranscriptionsCreateResponse, + construct_type( + type_=AudioTranscriptionsCreateResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 401: + raise UnauthorizedError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 403: + raise ForbiddenError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 404: + raise NotFoundError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 429: + raise TooManyRequestsError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 498: + raise InvalidTokenError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 500: + raise InternalServerError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 501: + raise NotImplementedError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/audio/transcriptions/types/__init__.py b/src/cohere/audio/transcriptions/types/__init__.py new file mode 100644 index 000000000..ff4238773 --- /dev/null +++ b/src/cohere/audio/transcriptions/types/__init__.py @@ -0,0 +1,34 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .audio_transcriptions_create_response import AudioTranscriptionsCreateResponse +_dynamic_imports: typing.Dict[str, str] = {"AudioTranscriptionsCreateResponse": ".audio_transcriptions_create_response"} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + + +__all__ = ["AudioTranscriptionsCreateResponse"] diff --git a/src/cohere/audio/transcriptions/types/audio_transcriptions_create_response.py b/src/cohere/audio/transcriptions/types/audio_transcriptions_create_response.py new file mode 100644 index 000000000..ede95d73b --- /dev/null +++ b/src/cohere/audio/transcriptions/types/audio_transcriptions_create_response.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ....core.pydantic_utilities import IS_PYDANTIC_V2 +from ....core.unchecked_base_model import UncheckedBaseModel + + +class AudioTranscriptionsCreateResponse(UncheckedBaseModel): + text: str = pydantic.Field() + """ + The transcribed text. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow") # type: ignore # Pydantic v2 + else: + + class Config: + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/base_client.py b/src/cohere/base_client.py index b5a96a388..c895ff4e2 100644 --- a/src/cohere/base_client.py +++ b/src/cohere/base_client.py @@ -8,6 +8,7 @@ import httpx from .core.api_error import ApiError from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from .core.logging import LogConfig, Logger from .core.request_options import RequestOptions from .environment import ClientEnvironment from .raw_base_client import AsyncRawBaseCohere, RawBaseCohere @@ -49,6 +50,7 @@ from .types.tool_result import ToolResult if typing.TYPE_CHECKING: + from .audio.client import AsyncAudioClient, AudioClient from .batches.client import AsyncBatchesClient, BatchesClient from .connectors.client import AsyncConnectorsClient, ConnectorsClient from .datasets.client import AsyncDatasetsClient, DatasetsClient @@ -92,6 +94,9 @@ class BaseCohere: httpx_client : typing.Optional[httpx.Client] The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + logging : typing.Optional[typing.Union[LogConfig, Logger]] + Configure logging for the SDK. Accepts a LogConfig dict with 'level' (debug/info/warn/error), 'logger' (custom logger implementation), and 'silent' (boolean, defaults to True) fields. You can also pass a pre-configured Logger instance. + Examples -------- from cohere import Client @@ -113,6 +118,7 @@ def __init__( timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.Client] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): _defaulted_timeout = ( timeout if timeout is not None else 300 if httpx_client is None else httpx_client.timeout.read @@ -130,6 +136,7 @@ def __init__( if follow_redirects is not None else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, + logging=logging, ) self._raw_client = RawBaseCohere(client_wrapper=self._client_wrapper) self._v2: typing.Optional[V2Client] = None @@ -139,6 +146,7 @@ def __init__( self._connectors: typing.Optional[ConnectorsClient] = None self._models: typing.Optional[ModelsClient] = None self._finetuning: typing.Optional[FinetuningClient] = None + self._audio: typing.Optional[AudioClient] = None @property def with_raw_response(self) -> RawBaseCohere: @@ -1564,6 +1572,14 @@ def finetuning(self): self._finetuning = FinetuningClient(client_wrapper=self._client_wrapper) return self._finetuning + @property + def audio(self): + if self._audio is None: + from .audio.client import AudioClient # noqa: E402 + + self._audio = AudioClient(client_wrapper=self._client_wrapper) + return self._audio + class AsyncBaseCohere: """ @@ -1597,6 +1613,9 @@ class AsyncBaseCohere: httpx_client : typing.Optional[httpx.AsyncClient] The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + logging : typing.Optional[typing.Union[LogConfig, Logger]] + Configure logging for the SDK. Accepts a LogConfig dict with 'level' (debug/info/warn/error), 'logger' (custom logger implementation), and 'silent' (boolean, defaults to True) fields. You can also pass a pre-configured Logger instance. + Examples -------- from cohere import AsyncClient @@ -1618,6 +1637,7 @@ def __init__( timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.AsyncClient] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): _defaulted_timeout = ( timeout if timeout is not None else 300 if httpx_client is None else httpx_client.timeout.read @@ -1635,6 +1655,7 @@ def __init__( if follow_redirects is not None else httpx.AsyncClient(timeout=_defaulted_timeout), timeout=_defaulted_timeout, + logging=logging, ) self._raw_client = AsyncRawBaseCohere(client_wrapper=self._client_wrapper) self._v2: typing.Optional[AsyncV2Client] = None @@ -1644,6 +1665,7 @@ def __init__( self._connectors: typing.Optional[AsyncConnectorsClient] = None self._models: typing.Optional[AsyncModelsClient] = None self._finetuning: typing.Optional[AsyncFinetuningClient] = None + self._audio: typing.Optional[AsyncAudioClient] = None @property def with_raw_response(self) -> AsyncRawBaseCohere: @@ -3159,6 +3181,14 @@ def finetuning(self): self._finetuning = AsyncFinetuningClient(client_wrapper=self._client_wrapper) return self._finetuning + @property + def audio(self): + if self._audio is None: + from .audio.client import AsyncAudioClient # noqa: E402 + + self._audio = AsyncAudioClient(client_wrapper=self._client_wrapper) + return self._audio + def _get_base_url(*, base_url: typing.Optional[str] = None, environment: ClientEnvironment) -> str: if base_url is not None: diff --git a/src/cohere/batches/raw_client.py b/src/cohere/batches/raw_client.py index cf3ca51e3..36caa9854 100644 --- a/src/cohere/batches/raw_client.py +++ b/src/cohere/batches/raw_client.py @@ -7,6 +7,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata from ..core.unchecked_base_model import construct_type @@ -21,6 +22,7 @@ from .types.create_batch_response import CreateBatchResponse from .types.get_batch_response import GetBatchResponse from .types.list_batches_response import ListBatchesResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -154,6 +156,10 @@ def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def create( @@ -263,6 +269,10 @@ def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def retrieve( @@ -368,6 +378,10 @@ def retrieve( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def cancel( @@ -473,6 +487,10 @@ def cancel( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -604,6 +622,10 @@ async def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def create( @@ -713,6 +735,10 @@ async def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def retrieve( @@ -818,6 +844,10 @@ async def retrieve( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def cancel( @@ -923,4 +953,8 @@ async def cancel( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/connectors/raw_client.py b/src/cohere/connectors/raw_client.py index ccc81c605..c3e271395 100644 --- a/src/cohere/connectors/raw_client.py +++ b/src/cohere/connectors/raw_client.py @@ -7,6 +7,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata from ..core.unchecked_base_model import construct_type @@ -30,6 +31,7 @@ from ..types.list_connectors_response import ListConnectorsResponse from ..types.o_auth_authorize_response import OAuthAuthorizeResponse from ..types.update_connector_response import UpdateConnectorResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -219,6 +221,10 @@ def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def create( @@ -439,6 +445,10 @@ def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def get( @@ -610,6 +620,10 @@ def get( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def delete( @@ -781,6 +795,10 @@ def delete( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def update( @@ -998,6 +1016,10 @@ def update( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def o_auth_authorize( @@ -1179,6 +1201,10 @@ def o_auth_authorize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -1366,6 +1392,10 @@ async def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def create( @@ -1586,6 +1616,10 @@ async def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def get( @@ -1757,6 +1791,10 @@ async def get( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def delete( @@ -1928,6 +1966,10 @@ async def delete( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def update( @@ -2145,6 +2187,10 @@ async def update( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def o_auth_authorize( @@ -2326,4 +2372,8 @@ async def o_auth_authorize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/core/__init__.py b/src/cohere/core/__init__.py index a7e9f0334..3b227d7f2 100644 --- a/src/cohere/core/__init__.py +++ b/src/cohere/core/__init__.py @@ -8,11 +8,13 @@ if typing.TYPE_CHECKING: from .api_error import ApiError from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper - from .datetime_utils import serialize_datetime + from .datetime_utils import Rfc2822DateTime, parse_rfc2822_datetime, serialize_datetime from .file import File, convert_file_dict_to_httpx_tuples, with_content_type from .http_client import AsyncHttpClient, HttpClient from .http_response import AsyncHttpResponse, HttpResponse from .jsonable_encoder import jsonable_encoder + from .logging import ConsoleLogger, ILogger, LogConfig, LogLevel, Logger, create_logger + from .parse_error import ParsingError from .pydantic_utilities import ( IS_PYDANTIC_V2, UniversalBaseModel, @@ -33,12 +35,19 @@ "AsyncHttpClient": ".http_client", "AsyncHttpResponse": ".http_response", "BaseClientWrapper": ".client_wrapper", + "ConsoleLogger": ".logging", "FieldMetadata": ".serialization", "File": ".file", "HttpClient": ".http_client", "HttpResponse": ".http_response", + "ILogger": ".logging", "IS_PYDANTIC_V2": ".pydantic_utilities", + "LogConfig": ".logging", + "LogLevel": ".logging", + "Logger": ".logging", + "ParsingError": ".parse_error", "RequestOptions": ".request_options", + "Rfc2822DateTime": ".datetime_utils", "SyncClientWrapper": ".client_wrapper", "UncheckedBaseModel": ".unchecked_base_model", "UnionMetadata": ".unchecked_base_model", @@ -47,9 +56,11 @@ "construct_type": ".unchecked_base_model", "convert_and_respect_annotation_metadata": ".serialization", "convert_file_dict_to_httpx_tuples": ".file", + "create_logger": ".logging", "encode_query": ".query_encoder", "jsonable_encoder": ".jsonable_encoder", "parse_obj_as": ".pydantic_utilities", + "parse_rfc2822_datetime": ".datetime_utils", "remove_none_from_dict": ".remove_none_from_dict", "serialize_datetime": ".datetime_utils", "universal_field_validator": ".pydantic_utilities", @@ -86,12 +97,19 @@ def __dir__(): "AsyncHttpClient", "AsyncHttpResponse", "BaseClientWrapper", + "ConsoleLogger", "FieldMetadata", "File", "HttpClient", "HttpResponse", + "ILogger", "IS_PYDANTIC_V2", + "LogConfig", + "LogLevel", + "Logger", + "ParsingError", "RequestOptions", + "Rfc2822DateTime", "SyncClientWrapper", "UncheckedBaseModel", "UnionMetadata", @@ -100,9 +118,11 @@ def __dir__(): "construct_type", "convert_and_respect_annotation_metadata", "convert_file_dict_to_httpx_tuples", + "create_logger", "encode_query", "jsonable_encoder", "parse_obj_as", + "parse_rfc2822_datetime", "remove_none_from_dict", "serialize_datetime", "universal_field_validator", diff --git a/src/cohere/core/client_wrapper.py b/src/cohere/core/client_wrapper.py index 7e5579b36..fbd5e76dd 100644 --- a/src/cohere/core/client_wrapper.py +++ b/src/cohere/core/client_wrapper.py @@ -4,6 +4,7 @@ import httpx from .http_client import AsyncHttpClient, HttpClient +from .logging import LogConfig, Logger class BaseClientWrapper: @@ -15,23 +16,25 @@ def __init__( headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): self._client_name = client_name self._token = token self._headers = headers self._base_url = base_url self._timeout = timeout + self._logging = logging def get_headers(self) -> typing.Dict[str, str]: import platform headers: typing.Dict[str, str] = { - "User-Agent": "cohere/5.20.7", + "User-Agent": "cohere/5.21.0", "X-Fern-Language": "Python", "X-Fern-Runtime": f"python/{platform.python_version()}", "X-Fern-Platform": f"{platform.system().lower()}/{platform.release()}", "X-Fern-SDK-Name": "cohere", - "X-Fern-SDK-Version": "5.20.7", + "X-Fern-SDK-Version": "5.21.0", **(self.get_custom_headers() or {}), } if self._client_name is not None: @@ -64,14 +67,18 @@ def __init__( headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, httpx_client: httpx.Client, ): - super().__init__(client_name=client_name, token=token, headers=headers, base_url=base_url, timeout=timeout) + super().__init__( + client_name=client_name, token=token, headers=headers, base_url=base_url, timeout=timeout, logging=logging + ) self.httpx_client = HttpClient( httpx_client=httpx_client, base_headers=self.get_headers, base_timeout=self.get_timeout, base_url=self.get_base_url, + logging_config=self._logging, ) @@ -84,10 +91,13 @@ def __init__( headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, async_token: typing.Optional[typing.Callable[[], typing.Awaitable[str]]] = None, httpx_client: httpx.AsyncClient, ): - super().__init__(client_name=client_name, token=token, headers=headers, base_url=base_url, timeout=timeout) + super().__init__( + client_name=client_name, token=token, headers=headers, base_url=base_url, timeout=timeout, logging=logging + ) self._async_token = async_token self.httpx_client = AsyncHttpClient( httpx_client=httpx_client, @@ -95,6 +105,7 @@ def __init__( base_timeout=self.get_timeout, base_url=self.get_base_url, async_base_headers=self.async_get_headers, + logging_config=self._logging, ) async def async_get_headers(self) -> typing.Dict[str, str]: diff --git a/src/cohere/core/datetime_utils.py b/src/cohere/core/datetime_utils.py index 7c9864a94..a12b2ad03 100644 --- a/src/cohere/core/datetime_utils.py +++ b/src/cohere/core/datetime_utils.py @@ -1,6 +1,48 @@ # This file was auto-generated by Fern from our API Definition. import datetime as dt +from email.utils import parsedate_to_datetime +from typing import Any + +import pydantic + +IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.") + + +def parse_rfc2822_datetime(v: Any) -> dt.datetime: + """ + Parse an RFC 2822 datetime string (e.g., "Wed, 02 Oct 2002 13:00:00 GMT") + into a datetime object. If the value is already a datetime, return it as-is. + Falls back to ISO 8601 parsing if RFC 2822 parsing fails. + """ + if isinstance(v, dt.datetime): + return v + if isinstance(v, str): + try: + return parsedate_to_datetime(v) + except Exception: + pass + # Fallback to ISO 8601 parsing + return dt.datetime.fromisoformat(v.replace("Z", "+00:00")) + raise ValueError(f"Expected str or datetime, got {type(v)}") + + +class Rfc2822DateTime(dt.datetime): + """A datetime subclass that parses RFC 2822 date strings. + + On Pydantic V1, uses __get_validators__ for pre-validation. + On Pydantic V2, uses __get_pydantic_core_schema__ for BeforeValidator-style parsing. + """ + + @classmethod + def __get_validators__(cls): # type: ignore[no-untyped-def] + yield parse_rfc2822_datetime + + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> Any: # type: ignore[override] + from pydantic_core import core_schema + + return core_schema.no_info_before_validator_function(parse_rfc2822_datetime, core_schema.datetime_schema()) def serialize_datetime(v: dt.datetime) -> str: diff --git a/src/cohere/core/http_client.py b/src/cohere/core/http_client.py index 7c6c936f9..ee937589c 100644 --- a/src/cohere/core/http_client.py +++ b/src/cohere/core/http_client.py @@ -12,6 +12,7 @@ from .file import File, convert_file_dict_to_httpx_tuples from .force_multipart import FORCE_MULTIPART from .jsonable_encoder import jsonable_encoder +from .logging import LogConfig, Logger, create_logger from .query_encoder import encode_query from .remove_none_from_dict import remove_none_from_dict as remove_none_from_dict from .request_options import RequestOptions @@ -122,6 +123,32 @@ def _should_retry(response: httpx.Response) -> bool: return response.status_code >= 500 or response.status_code in retryable_400s +_SENSITIVE_HEADERS = frozenset( + { + "authorization", + "www-authenticate", + "x-api-key", + "api-key", + "apikey", + "x-api-token", + "x-auth-token", + "auth-token", + "cookie", + "set-cookie", + "proxy-authorization", + "proxy-authenticate", + "x-csrf-token", + "x-xsrf-token", + "x-session-token", + "x-access-token", + } +) + + +def _redact_headers(headers: typing.Dict[str, str]) -> typing.Dict[str, str]: + return {k: ("[REDACTED]" if k.lower() in _SENSITIVE_HEADERS else v) for k, v in headers.items()} + + def _build_url(base_url: str, path: typing.Optional[str]) -> str: """ Build a full URL by joining a base URL with a path. @@ -238,11 +265,13 @@ def __init__( base_timeout: typing.Callable[[], typing.Optional[float]], base_headers: typing.Callable[[], typing.Dict[str, str]], base_url: typing.Optional[typing.Callable[[], str]] = None, + logging_config: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): self.base_url = base_url self.base_timeout = base_timeout self.base_headers = base_headers self.httpx_client = httpx_client + self.logger = create_logger(logging_config) def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: base_url = maybe_base_url @@ -315,18 +344,30 @@ def request( ) ) + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + has_body=json_body is not None or data_body is not None, + ) + response = self.httpx_client.request( method=method, - url=_build_url(base_url, path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, @@ -353,6 +394,24 @@ def request( omit=omit, ) + if self.logger.is_debug(): + if 200 <= response.status_code < 400: + self.logger.debug( + "HTTP request succeeded", + method=method, + url=_request_url, + status_code=response.status_code, + ) + + if self.logger.is_error(): + if response.status_code >= 400: + self.logger.error( + "HTTP request failed with error status", + method=method, + url=_request_url, + status_code=response.status_code, + ) + return response @contextmanager @@ -418,18 +477,29 @@ def stream( ) ) + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making streaming HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + ) + with self.httpx_client.stream( method=method, - url=_build_url(base_url, path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) if request_options is not None else {}), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, @@ -449,12 +519,14 @@ def __init__( base_headers: typing.Callable[[], typing.Dict[str, str]], base_url: typing.Optional[typing.Callable[[], str]] = None, async_base_headers: typing.Optional[typing.Callable[[], typing.Awaitable[typing.Dict[str, str]]]] = None, + logging_config: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): self.base_url = base_url self.base_timeout = base_timeout self.base_headers = base_headers self.async_base_headers = async_base_headers self.httpx_client = httpx_client + self.logger = create_logger(logging_config) async def _get_headers(self) -> typing.Dict[str, str]: if self.async_base_headers is not None: @@ -535,19 +607,30 @@ async def request( ) ) - # Add the input to each of these and do None-safety checks + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **_headers, + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + has_body=json_body is not None or data_body is not None, + ) + response = await self.httpx_client.request( method=method, - url=_build_url(base_url, path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **_headers, - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, @@ -573,6 +656,25 @@ async def request( retries=retries + 1, omit=omit, ) + + if self.logger.is_debug(): + if 200 <= response.status_code < 400: + self.logger.debug( + "HTTP request succeeded", + method=method, + url=_request_url, + status_code=response.status_code, + ) + + if self.logger.is_error(): + if response.status_code >= 400: + self.logger.error( + "HTTP request failed with error status", + method=method, + url=_request_url, + status_code=response.status_code, + ) + return response @asynccontextmanager @@ -641,18 +743,29 @@ async def stream( ) ) + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **_headers, + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making streaming HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + ) + async with self.httpx_client.stream( method=method, - url=_build_url(base_url, path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **_headers, - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) if request_options is not None else {}), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, diff --git a/src/cohere/core/logging.py b/src/cohere/core/logging.py new file mode 100644 index 000000000..e5e572458 --- /dev/null +++ b/src/cohere/core/logging.py @@ -0,0 +1,107 @@ +# This file was auto-generated by Fern from our API Definition. + +import logging +import typing + +LogLevel = typing.Literal["debug", "info", "warn", "error"] + +_LOG_LEVEL_MAP: typing.Dict[LogLevel, int] = { + "debug": 1, + "info": 2, + "warn": 3, + "error": 4, +} + + +class ILogger(typing.Protocol): + def debug(self, message: str, **kwargs: typing.Any) -> None: ... + def info(self, message: str, **kwargs: typing.Any) -> None: ... + def warn(self, message: str, **kwargs: typing.Any) -> None: ... + def error(self, message: str, **kwargs: typing.Any) -> None: ... + + +class ConsoleLogger: + _logger: logging.Logger + + def __init__(self) -> None: + self._logger = logging.getLogger("fern") + if not self._logger.handlers: + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s")) + self._logger.addHandler(handler) + self._logger.setLevel(logging.DEBUG) + + def debug(self, message: str, **kwargs: typing.Any) -> None: + self._logger.debug(message, extra=kwargs) + + def info(self, message: str, **kwargs: typing.Any) -> None: + self._logger.info(message, extra=kwargs) + + def warn(self, message: str, **kwargs: typing.Any) -> None: + self._logger.warning(message, extra=kwargs) + + def error(self, message: str, **kwargs: typing.Any) -> None: + self._logger.error(message, extra=kwargs) + + +class LogConfig(typing.TypedDict, total=False): + level: LogLevel + logger: ILogger + silent: bool + + +class Logger: + _level: int + _logger: ILogger + _silent: bool + + def __init__(self, *, level: LogLevel, logger: ILogger, silent: bool) -> None: + self._level = _LOG_LEVEL_MAP[level] + self._logger = logger + self._silent = silent + + def _should_log(self, level: LogLevel) -> bool: + return not self._silent and self._level <= _LOG_LEVEL_MAP[level] + + def is_debug(self) -> bool: + return self._should_log("debug") + + def is_info(self) -> bool: + return self._should_log("info") + + def is_warn(self) -> bool: + return self._should_log("warn") + + def is_error(self) -> bool: + return self._should_log("error") + + def debug(self, message: str, **kwargs: typing.Any) -> None: + if self.is_debug(): + self._logger.debug(message, **kwargs) + + def info(self, message: str, **kwargs: typing.Any) -> None: + if self.is_info(): + self._logger.info(message, **kwargs) + + def warn(self, message: str, **kwargs: typing.Any) -> None: + if self.is_warn(): + self._logger.warn(message, **kwargs) + + def error(self, message: str, **kwargs: typing.Any) -> None: + if self.is_error(): + self._logger.error(message, **kwargs) + + +_default_logger: Logger = Logger(level="info", logger=ConsoleLogger(), silent=True) + + +def create_logger(config: typing.Optional[typing.Union[LogConfig, Logger]] = None) -> Logger: + if config is None: + return _default_logger + if isinstance(config, Logger): + return config + return Logger( + level=config.get("level", "info"), + logger=config.get("logger", ConsoleLogger()), + silent=config.get("silent", True), + ) diff --git a/src/cohere/core/parse_error.py b/src/cohere/core/parse_error.py new file mode 100644 index 000000000..4527c6a8a --- /dev/null +++ b/src/cohere/core/parse_error.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, Optional + + +class ParsingError(Exception): + """ + Raised when the SDK fails to parse/validate a response from the server. + This typically indicates that the server returned a response whose shape + does not match the expected schema. + """ + + headers: Optional[Dict[str, str]] + status_code: Optional[int] + body: Any + cause: Optional[Exception] + + def __init__( + self, + *, + headers: Optional[Dict[str, str]] = None, + status_code: Optional[int] = None, + body: Any = None, + cause: Optional[Exception] = None, + ) -> None: + self.headers = headers + self.status_code = status_code + self.body = body + self.cause = cause + super().__init__() + if cause is not None: + self.__cause__ = cause + + def __str__(self) -> str: + cause_str = f", cause: {self.cause}" if self.cause is not None else "" + return f"headers: {self.headers}, status_code: {self.status_code}, body: {self.body}{cause_str}" diff --git a/src/cohere/datasets/raw_client.py b/src/cohere/datasets/raw_client.py index 3e3b73ccb..f667c9571 100644 --- a/src/cohere/datasets/raw_client.py +++ b/src/cohere/datasets/raw_client.py @@ -10,6 +10,7 @@ from ..core.datetime_utils import serialize_datetime from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.request_options import RequestOptions from ..core.unchecked_base_model import construct_type from ..errors.bad_request_error import BadRequestError @@ -30,6 +31,7 @@ from .types.datasets_get_response import DatasetsGetResponse from .types.datasets_get_usage_response import DatasetsGetUsageResponse from .types.datasets_list_response import DatasetsListResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -239,6 +241,10 @@ def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def create( @@ -466,6 +472,10 @@ def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def get_usage( @@ -634,6 +644,10 @@ def get_usage( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def get( @@ -804,6 +818,10 @@ def get( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def delete( @@ -974,6 +992,10 @@ def delete( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -1181,6 +1203,10 @@ async def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def create( @@ -1408,6 +1434,10 @@ async def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def get_usage( @@ -1576,6 +1606,10 @@ async def get_usage( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def get( @@ -1746,6 +1780,10 @@ async def get( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def delete( @@ -1916,4 +1954,8 @@ async def delete( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/embed_jobs/raw_client.py b/src/cohere/embed_jobs/raw_client.py index e49127f58..68da40e73 100644 --- a/src/cohere/embed_jobs/raw_client.py +++ b/src/cohere/embed_jobs/raw_client.py @@ -7,6 +7,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.request_options import RequestOptions from ..core.unchecked_base_model import construct_type from ..errors.bad_request_error import BadRequestError @@ -27,6 +28,7 @@ from ..types.embedding_type import EmbeddingType from ..types.list_embed_job_response import ListEmbedJobResponse from .types.create_embed_job_request_truncate import CreateEmbedJobRequestTruncate +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -200,6 +202,10 @@ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> Ht _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def create( @@ -420,6 +426,10 @@ def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[EmbedJob]: @@ -589,6 +599,10 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def cancel(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: @@ -750,6 +764,10 @@ def cancel(self, id: str, *, request_options: typing.Optional[RequestOptions] = _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -923,6 +941,10 @@ async def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def create( @@ -1143,6 +1165,10 @@ async def create( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def get( @@ -1314,6 +1340,10 @@ async def get( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def cancel( @@ -1477,4 +1507,8 @@ async def cancel( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/finetuning/raw_client.py b/src/cohere/finetuning/raw_client.py index a5a56addb..ca8b876f7 100644 --- a/src/cohere/finetuning/raw_client.py +++ b/src/cohere/finetuning/raw_client.py @@ -7,6 +7,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata from ..core.unchecked_base_model import construct_type @@ -26,6 +27,7 @@ from .finetuning.types.settings import Settings from .finetuning.types.status import Status from .finetuning.types.update_finetuned_model_response import UpdateFinetunedModelResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -160,6 +162,10 @@ def list_finetuned_models( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def create_finetuned_model( @@ -269,6 +275,10 @@ def create_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def get_finetuned_model( @@ -374,6 +384,10 @@ def get_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def delete_finetuned_model( @@ -480,6 +494,10 @@ def delete_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def update_finetuned_model( @@ -611,6 +629,10 @@ def update_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def list_events( @@ -744,6 +766,10 @@ def list_events( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def list_training_step_metrics( @@ -867,6 +893,10 @@ def list_training_step_metrics( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -999,6 +1029,10 @@ async def list_finetuned_models( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def create_finetuned_model( @@ -1108,6 +1142,10 @@ async def create_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def get_finetuned_model( @@ -1213,6 +1251,10 @@ async def get_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def delete_finetuned_model( @@ -1319,6 +1361,10 @@ async def delete_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def update_finetuned_model( @@ -1450,6 +1496,10 @@ async def update_finetuned_model( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def list_events( @@ -1583,6 +1633,10 @@ async def list_events( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def list_training_step_metrics( @@ -1706,4 +1760,8 @@ async def list_training_step_metrics( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/models/raw_client.py b/src/cohere/models/raw_client.py index 57037d6de..64bf6aa42 100644 --- a/src/cohere/models/raw_client.py +++ b/src/cohere/models/raw_client.py @@ -7,6 +7,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.request_options import RequestOptions from ..core.unchecked_base_model import construct_type from ..errors.bad_request_error import BadRequestError @@ -24,6 +25,7 @@ from ..types.compatible_endpoint import CompatibleEndpoint from ..types.get_model_response import GetModelResponse from ..types.list_models_response import ListModelsResponse +from pydantic import ValidationError class RawModelsClient: @@ -198,6 +200,10 @@ def get( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def list( @@ -391,6 +397,10 @@ def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -566,6 +576,10 @@ async def get( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def list( @@ -759,4 +773,8 @@ async def list( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/raw_base_client.py b/src/cohere/raw_base_client.py index 645086721..db525c987 100644 --- a/src/cohere/raw_base_client.py +++ b/src/cohere/raw_base_client.py @@ -8,6 +8,7 @@ from .core.api_error import ApiError from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from .core.http_response import AsyncHttpResponse, HttpResponse +from .core.parse_error import ParsingError from .core.request_options import RequestOptions from .core.serialization import convert_and_respect_annotation_metadata from .core.unchecked_base_model import construct_type @@ -59,6 +60,7 @@ from .types.tokenize_response import TokenizeResponse from .types.tool import Tool from .types.tool_result import ToolResult +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -532,6 +534,13 @@ def _iter(): raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), body=_response.text ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) yield _stream() @@ -983,6 +992,10 @@ def chat( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @contextlib.contextmanager @@ -1287,6 +1300,13 @@ def _iter(): raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), body=_response.text ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) yield _stream() @@ -1577,6 +1597,10 @@ def generate( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def embed( @@ -1800,6 +1824,10 @@ def embed( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def rerank( @@ -2019,6 +2047,10 @@ def rerank( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def classify( @@ -2228,6 +2260,10 @@ def classify( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def summarize( @@ -2442,6 +2478,10 @@ def summarize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def tokenize( @@ -2624,6 +2664,10 @@ def tokenize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def detokenize( @@ -2806,6 +2850,10 @@ def detokenize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def check_api_key( @@ -2974,6 +3022,10 @@ def check_api_key( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -3445,6 +3497,13 @@ async def _iter(): raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), body=_response.text ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) yield await _stream() @@ -3896,6 +3955,10 @@ async def chat( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @contextlib.asynccontextmanager @@ -4200,6 +4263,13 @@ async def _iter(): raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), body=_response.text ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) yield await _stream() @@ -4490,6 +4560,10 @@ async def generate( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def embed( @@ -4713,6 +4787,10 @@ async def embed( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def rerank( @@ -4932,6 +5010,10 @@ async def rerank( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def classify( @@ -5141,6 +5223,10 @@ async def classify( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def summarize( @@ -5355,6 +5441,10 @@ async def summarize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def tokenize( @@ -5537,6 +5627,10 @@ async def tokenize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def detokenize( @@ -5719,6 +5813,10 @@ async def detokenize( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def check_api_key( @@ -5887,4 +5985,8 @@ async def check_api_key( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/cohere/v2/raw_client.py b/src/cohere/v2/raw_client.py index 8df5500ed..e72a08674 100644 --- a/src/cohere/v2/raw_client.py +++ b/src/cohere/v2/raw_client.py @@ -9,6 +9,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.http_sse._api import EventSource +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_sse_obj from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata @@ -44,6 +45,7 @@ from .types.v2chat_stream_response import V2ChatStreamResponse from .types.v2embed_request_truncate import V2EmbedRequestTruncate from .types.v2rerank_response import V2RerankResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -393,6 +395,13 @@ def _iter(): raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), body=_response.text ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) yield _stream() @@ -712,6 +721,10 @@ def chat( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def embed( @@ -959,6 +972,10 @@ def embed( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def rerank( @@ -1170,6 +1187,10 @@ def rerank( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) @@ -1517,6 +1538,13 @@ async def _iter(): raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), body=_response.text ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) yield await _stream() @@ -1836,6 +1864,10 @@ async def chat( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def embed( @@ -2083,6 +2115,10 @@ async def embed( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def rerank( @@ -2294,4 +2330,8 @@ async def rerank( _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e + ) raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)