From 191c5d9b2324f84bb5ba1d9cb46a8d62fad9723d Mon Sep 17 00:00:00 2001
From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 13 Nov 2025 02:33:50 +0000
Subject: [PATCH 1/2] SDK regeneration
---
.fern/metadata.json | 58 +++++++++
poetry.lock | 6 +-
pyproject.toml | 4 +-
reference.md | 114 ++++++++++++++++-
src/elevenlabs/__init__.py | 63 +++++++++-
.../conversational_ai/agents/client.py | 20 ++-
.../conversational_ai/agents/raw_client.py | 10 ++
.../conversational_ai/conversations/client.py | 64 +++++++++-
.../conversations/feedback/client.py | 16 +--
.../conversations/feedback/raw_client.py | 24 ++--
.../conversations/raw_client.py | 66 ++++++++--
.../knowledge_base/documents/client.py | 8 +-
.../knowledge_base/documents/raw_client.py | 16 +--
.../mcp_servers/tool_approvals/client.py | 8 +-
.../mcp_servers/tool_approvals/raw_client.py | 8 +-
.../mcp_servers/tool_configs/raw_client.py | 24 ++--
.../conversational_ai/phone_numbers/client.py | 10 +-
.../phone_numbers/raw_client.py | 16 +--
.../conversational_ai/tests/client.py | 12 +-
.../tests/invocations/client.py | 18 ++-
.../tests/invocations/raw_client.py | 26 ++--
.../conversational_ai/tests/raw_client.py | 16 +--
.../conversational_ai/tools/client.py | 116 ++++++++++++++++--
.../conversational_ai/tools/raw_client.py | 16 +--
src/elevenlabs/core/client_wrapper.py | 3 +-
src/elevenlabs/core/pydantic_utilities.py | 4 +-
src/elevenlabs/dubbing/audio/raw_client.py | 24 ++--
.../dubbing/transcript/raw_client.py | 24 ++--
src/elevenlabs/errors/conflict_error.py | 2 +-
src/elevenlabs/errors/forbidden_error.py | 2 +-
src/elevenlabs/errors/not_found_error.py | 2 +-
src/elevenlabs/errors/too_early_error.py | 2 +-
src/elevenlabs/errors/unauthorized_error.py | 2 +-
src/elevenlabs/music/client.py | 4 +-
src/elevenlabs/music/raw_client.py | 4 +-
.../service_accounts/api_keys/client.py | 16 +--
.../service_accounts/api_keys/raw_client.py | 32 ++---
.../speech_to_text/transcripts/client.py | 10 +-
.../speech_to_text/transcripts/raw_client.py | 40 +++---
...o_text_convert_request_webhook_metadata.py | 2 +-
src/elevenlabs/types/__init__.py | 65 +++++++++-
src/elevenlabs/types/add_project_request.py | 2 +-
.../types/add_sharing_voice_request.py | 2 +-
src/elevenlabs/types/age.py | 2 +-
src/elevenlabs/types/agent_metadata.py | 1 +
...tion_webhook_tool_config_external_input.py | 98 +++++++++++++++
...ion_webhook_tool_config_external_output.py | 98 +++++++++++++++
..._integration_webhook_tool_config_output.py | 16 +--
.../types/array_json_schema_property_input.py | 3 +-
.../array_json_schema_property_output.py | 3 +-
.../types/character_usage_response.py | 2 +-
...ion_config_client_override_config_input.py | 6 +
...on_config_client_override_config_output.py | 6 +
...nversation_config_client_override_input.py | 6 +
...versation_config_client_override_output.py | 6 +
.../types/conversation_feedback_type.py | 5 +
...versation_history_feedback_common_model.py | 4 +
...pt_other_tools_result_common_model_type.py | 2 +-
...rsation_initiation_client_data_internal.py | 2 +-
...on_initiation_client_data_request_input.py | 2 +-
...n_initiation_client_data_request_output.py | 2 +-
.../types/conversation_initiation_source.py | 1 +
.../conversation_summary_response_model.py | 2 +
.../create_audio_native_project_request.py | 2 +-
.../types/create_transcript_request.py | 2 +-
.../data_collection_result_common_model.py | 2 +-
.../types/delete_chapter_request.py | 2 +-
.../types/delete_project_request.py | 2 +-
.../types/discount_resposne_model.py | 28 +++++
.../types/edit_voice_settings_request.py | 2 +-
src/elevenlabs/types/fine_tuning_response.py | 2 +-
src/elevenlabs/types/gender.py | 2 +-
.../types/get_agent_response_model.py | 10 ++
src/elevenlabs/types/get_chapter_request.py | 2 +-
.../types/get_chapter_snapshots_request.py | 2 +-
src/elevenlabs/types/get_chapters_request.py | 2 +-
.../types/get_conversation_response_model.py | 1 +
.../types/get_phone_number_response.py | 2 +-
src/elevenlabs/types/get_project_request.py | 2 +-
src/elevenlabs/types/get_projects_request.py | 2 +-
...get_pronunciation_dictionaries_response.py | 2 +-
.../get_pronunciation_dictionary_response.py | 2 +-
...et_test_suite_invocation_response_model.py | 1 +
src/elevenlabs/types/history_item_response.py | 4 +-
src/elevenlabs/types/invoice_response.py | 10 +-
src/elevenlabs/types/language_preset_input.py | 5 +
.../types/language_preset_output.py | 5 +
.../object_json_schema_property_input.py | 1 +
.../object_json_schema_property_output.py | 1 -
src/elevenlabs/types/object_override_input.py | 4 +-
.../types/object_override_output.py | 4 +-
.../types/project_extended_response.py | 8 +-
...ect_extended_response_model_assets_item.py | 92 ++++++++++++++
.../project_external_audio_response_model.py | 40 ++++++
...roject_snapshot_extended_response_model.py | 8 +-
.../types/project_snapshot_response.py | 4 +-
.../types/project_video_response_model.py | 48 ++++++++
...ct_video_thumbnail_sheet_response_model.py | 22 ++++
src/elevenlabs/types/prompt_agent.py | 2 +-
...prompt_agent_api_model_input_tools_item.py | 2 +-
...rompt_agent_api_model_output_tools_item.py | 18 +--
...odel_workflow_override_input_tools_item.py | 2 +-
...del_workflow_override_output_tools_item.py | 18 +--
src/elevenlabs/types/prompt_agent_db_model.py | 2 +-
.../types/remove_member_from_group_request.py | 2 +-
.../types/save_voice_preview_request.py | 2 +-
src/elevenlabs/types/soft_timeout_config.py | 32 +++++
.../types/soft_timeout_config_override.py | 23 ++++
.../soft_timeout_config_override_config.py | 23 ++++
.../soft_timeout_config_workflow_override.py | 28 +++++
.../types/speech_history_item_response.py | 4 +-
.../test_invocation_summary_response_model.py | 10 ++
.../types/text_to_speech_request.py | 2 +-
.../types/text_to_speech_stream_request.py | 2 +-
...o_speech_stream_with_timestamps_request.py | 2 +-
.../text_to_speech_with_timestamps_request.py | 2 +-
src/elevenlabs/types/tool.py | 12 +-
.../types/tool_request_model_tool_config.py | 1 +
.../types/tool_response_model_tool_config.py | 31 ++---
.../types/tts_conversational_model.py | 7 +-
src/elevenlabs/types/tts_model_family.py | 2 +-
src/elevenlabs/types/turn_config.py | 11 ++
src/elevenlabs/types/turn_config_override.py | 24 ++++
.../types/turn_config_override_config.py | 24 ++++
.../types/turn_config_workflow_override.py | 11 ++
.../types/unit_test_run_response_model.py | 1 +
.../update_audio_native_project_request.py | 2 +-
.../types/update_chapter_request.py | 2 +-
.../types/update_project_request.py | 2 +-
...date_pronunciation_dictionaries_request.py | 2 +-
.../webhook_tool_api_schema_config_input.py | 6 +
...ol_api_schema_config_input_content_type.py | 7 ++
.../webhook_tool_api_schema_config_output.py | 6 +
...l_api_schema_config_output_content_type.py | 7 ++
src/elevenlabs/types/widget_config.py | 6 +
.../types/widget_config_response.py | 6 +
.../types/widget_end_feedback_config.py | 24 ++++
.../types/widget_end_feedback_type.py | 5 +
...flow_tool_nested_tools_step_model_input.py | 8 +-
...low_tool_nested_tools_step_model_output.py | 8 +-
...ow_tool_response_model_input_steps_item.py | 9 +-
...w_tool_response_model_output_steps_item.py | 9 +-
.../types/workspace_resource_type.py | 4 +
src/elevenlabs/workspace/resources/client.py | 16 +--
.../workspace/resources/raw_client.py | 32 ++---
145 files changed, 1668 insertions(+), 369 deletions(-)
create mode 100644 .fern/metadata.json
create mode 100644 src/elevenlabs/types/api_integration_webhook_tool_config_external_input.py
create mode 100644 src/elevenlabs/types/api_integration_webhook_tool_config_external_output.py
create mode 100644 src/elevenlabs/types/conversation_feedback_type.py
create mode 100644 src/elevenlabs/types/discount_resposne_model.py
create mode 100644 src/elevenlabs/types/project_extended_response_model_assets_item.py
create mode 100644 src/elevenlabs/types/project_external_audio_response_model.py
create mode 100644 src/elevenlabs/types/project_video_response_model.py
create mode 100644 src/elevenlabs/types/project_video_thumbnail_sheet_response_model.py
create mode 100644 src/elevenlabs/types/soft_timeout_config.py
create mode 100644 src/elevenlabs/types/soft_timeout_config_override.py
create mode 100644 src/elevenlabs/types/soft_timeout_config_override_config.py
create mode 100644 src/elevenlabs/types/soft_timeout_config_workflow_override.py
create mode 100644 src/elevenlabs/types/turn_config_override.py
create mode 100644 src/elevenlabs/types/turn_config_override_config.py
create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config_input_content_type.py
create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config_output_content_type.py
create mode 100644 src/elevenlabs/types/widget_end_feedback_config.py
create mode 100644 src/elevenlabs/types/widget_end_feedback_type.py
diff --git a/.fern/metadata.json b/.fern/metadata.json
new file mode 100644
index 00000000..759a375f
--- /dev/null
+++ b/.fern/metadata.json
@@ -0,0 +1,58 @@
+{
+ "cliVersion": "0.107.3",
+ "generatorName": "fernapi/fern-python-sdk",
+ "generatorVersion": "4.36.1",
+ "generatorConfig": {
+ "timeout_in_seconds": 240,
+ "default_bytes_stream_chunk_size": 1024,
+ "inline_request_params": false,
+ "follow_redirects_by_default": true,
+ "recursion_limit": 5000,
+ "pydantic_config": {
+ "skip_validation": true
+ },
+ "client": {
+ "class_name": "BaseElevenLabs",
+ "filename": "base_client.py",
+ "exported_class_name": "ElevenLabs",
+ "exported_filename": "client.py"
+ },
+ "extra_dependencies": {
+ "requests": ">=2.20",
+ "websockets": ">=11.0",
+ "pyaudio": {
+ "version": ">=0.2.14",
+ "optional": true
+ }
+ },
+ "extra_dev_dependencies": {
+ "types-pyaudio": "^0.2.16.20240516"
+ },
+ "extras": {
+ "pyaudio": [
+ "pyaudio"
+ ]
+ },
+ "additional_init_exports": [
+ {
+ "from": "play",
+ "imports": [
+ "play",
+ "save",
+ "stream"
+ ]
+ },
+ {
+ "from": "realtime",
+ "imports": [
+ "RealtimeEvents",
+ "RealtimeAudioOptions",
+ "RealtimeUrlOptions",
+ "AudioFormat",
+ "CommitStrategy",
+ "RealtimeConnection"
+ ]
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index 24470df9..b7c94053 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -38,13 +38,13 @@ trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
-version = "2025.10.5"
+version = "2025.11.12"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
files = [
- {file = "certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de"},
- {file = "certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43"},
+ {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"},
+ {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index 7258ab60..43981146 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "elevenlabs"
[tool.poetry]
name = "elevenlabs"
-version = "v2.22.1"
+version = "0.0.0"
description = ""
readme = "README.md"
authors = []
@@ -31,7 +31,7 @@ packages = [
{ include = "elevenlabs", from = "src"}
]
-[project.urls]
+[tool.poetry.urls]
Repository = 'https://github.com/elevenlabs/elevenlabs-python'
[tool.poetry.dependencies]
diff --git a/reference.md b/reference.md
index d2a3ee9d..0f1714cd 100644
--- a/reference.md
+++ b/reference.md
@@ -7009,6 +7009,8 @@ client.conversational_ai.conversations.list(
call_successful="success",
call_start_before_unix=1,
call_start_after_unix=1,
+ call_duration_min_secs=1,
+ call_duration_max_secs=1,
user_id="user_id",
page_size=1,
summary_mode="exclude",
@@ -7069,6 +7071,22 @@ client.conversational_ai.conversations.list(
-
+**call_duration_min_secs:** `typing.Optional[int]` — Minimum call duration in seconds.
+
+
+
+
+
+-
+
+**call_duration_max_secs:** `typing.Optional[int]` — Maximum call duration in seconds.
+
+
+
+
+
+-
+
**user_id:** `typing.Optional[str]` — Filter conversations by the user ID who initiated them.
@@ -7077,6 +7095,30 @@ client.conversational_ai.conversations.list(
-
+**evaluation_params:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Evaluation filters. Repeat param. Format: criteria_id:result. Example: eval=value_framing:success
+
+
+
+
+
+-
+
+**data_collection_params:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Data collection filters. Repeat param. Format: id:op:value where op is one of eq|neq|gt|gte|lt|lte|in|exists|missing. For in, pipe-delimit values.
+
+
+
+
+
+-
+
+**tool_names:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Filter conversations by tool names used during the call.
+
+
+
+
+
+-
+
**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -8185,6 +8227,14 @@ client.conversational_ai.agents.run_tests(
-
+**branch_id:** `typing.Optional[str]` — ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9522,8 +9572,12 @@ Add a new tool to the available tools in the workspace.
```python
from elevenlabs import (
ElevenLabs,
+ LiteralJsonSchemaProperty,
+ ObjectJsonSchemaPropertyInput,
+ QueryParamsJsonSchema,
ToolRequestModel,
ToolRequestModelToolConfig_ApiIntegrationWebhook,
+ WebhookToolApiSchemaConfigInput,
)
client = ElevenLabs(
@@ -9536,6 +9590,28 @@ client.conversational_ai.tools.create(
description="description",
api_integration_id="api_integration_id",
api_integration_connection_id="api_integration_connection_id",
+ base_api_schema=WebhookToolApiSchemaConfigInput(
+ url="https://example.com/agents/{agent_id}",
+ method="GET",
+ path_params_schema={
+ "agent_id": LiteralJsonSchemaProperty(
+ type="string",
+ )
+ },
+ query_params_schema=QueryParamsJsonSchema(
+ properties={
+ "key": LiteralJsonSchemaProperty(
+ type="string",
+ description="My property",
+ is_system_provided=False,
+ dynamic_variable="",
+ constant_value="",
+ )
+ },
+ ),
+ request_body_schema=ObjectJsonSchemaPropertyInput(),
+ request_headers={"Authorization": "Bearer {api_key}"},
+ ),
),
),
)
@@ -9743,8 +9819,12 @@ Update tool that is available in the workspace.
```python
from elevenlabs import (
ElevenLabs,
+ LiteralJsonSchemaProperty,
+ ObjectJsonSchemaPropertyInput,
+ QueryParamsJsonSchema,
ToolRequestModel,
ToolRequestModelToolConfig_ApiIntegrationWebhook,
+ WebhookToolApiSchemaConfigInput,
)
client = ElevenLabs(
@@ -9758,6 +9838,28 @@ client.conversational_ai.tools.update(
description="description",
api_integration_id="api_integration_id",
api_integration_connection_id="api_integration_connection_id",
+ base_api_schema=WebhookToolApiSchemaConfigInput(
+ url="https://example.com/agents/{agent_id}",
+ method="GET",
+ path_params_schema={
+ "agent_id": LiteralJsonSchemaProperty(
+ type="string",
+ )
+ },
+ query_params_schema=QueryParamsJsonSchema(
+ properties={
+ "key": LiteralJsonSchemaProperty(
+ type="string",
+ description="My property",
+ is_system_provided=False,
+ dynamic_variable="",
+ constant_value="",
+ )
+ },
+ ),
+ request_body_schema=ObjectJsonSchemaPropertyInput(),
+ request_headers={"Authorization": "Bearer {api_key}"},
+ ),
),
),
)
@@ -11714,7 +11816,7 @@ client.conversational_ai.conversations.feedback.create(
-
-**feedback:** `UserFeedbackScore` — Either 'like' or 'dislike' to indicate the feedback for the conversation.
+**feedback:** `typing.Optional[UserFeedbackScore]` — Either 'like' or 'dislike' to indicate the feedback for the conversation.
@@ -12881,7 +12983,7 @@ client.conversational_ai.mcp_servers.tool_approvals.create(
-
-**input_schema:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The input schema of the MCP tool (the schema defined on the MCP server before ElevenLabs does any extra processing)
+**input_schema:** `typing.Optional[typing.Dict[str, typing.Any]]` — The input schema of the MCP tool (the schema defined on the MCP server before ElevenLabs does any extra processing)
@@ -13644,6 +13746,14 @@ client.conversational_ai.tests.invocations.resubmit(
-
+**branch_id:** `typing.Optional[str]` — ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py
index cc9b5331..0f5b2b37 100644
--- a/src/elevenlabs/__init__.py
+++ b/src/elevenlabs/__init__.py
@@ -68,6 +68,8 @@
ApiIntegrationWebhookOverridesInputRequestHeadersValue,
ApiIntegrationWebhookOverridesOutput,
ApiIntegrationWebhookOverridesOutputRequestHeadersValue,
+ ApiIntegrationWebhookToolConfigExternalInput,
+ ApiIntegrationWebhookToolConfigExternalOutput,
ApiIntegrationWebhookToolConfigInput,
ApiIntegrationWebhookToolConfigOutput,
ArrayJsonSchemaPropertyInput,
@@ -595,6 +597,7 @@
ConversationConfigOverrideConfig,
ConversationConfigWorkflowOverride,
ConversationDeletionSettings,
+ ConversationFeedbackType,
ConversationHistoryAnalysisCommonModel,
ConversationHistoryBatchCallModel,
ConversationHistoryElevenAssistantCommonModel,
@@ -712,6 +715,7 @@
DetailedMusicResponse,
DialogueInput,
DialogueInputResponseModel,
+ DiscountResposneModel,
DoDubbingResponse,
DocumentUsageModeEnum,
DocxExportOptions,
@@ -971,10 +975,14 @@
ProjectExtendedResponseModelAccessLevel,
ProjectExtendedResponseModelApplyTextNormalization,
ProjectExtendedResponseModelAspectRatio,
+ ProjectExtendedResponseModelAssetsItem,
+ ProjectExtendedResponseModelAssetsItem_Audio,
+ ProjectExtendedResponseModelAssetsItem_Video,
ProjectExtendedResponseModelFiction,
ProjectExtendedResponseModelQualityPreset,
ProjectExtendedResponseModelSourceType,
ProjectExtendedResponseModelTargetAudience,
+ ProjectExternalAudioResponseModel,
ProjectResponse,
ProjectResponseModelAccessLevel,
ProjectResponseModelAspectRatio,
@@ -985,6 +993,8 @@
ProjectSnapshotResponse,
ProjectSnapshotsResponse,
ProjectState,
+ ProjectVideoResponseModel,
+ ProjectVideoThumbnailSheetResponseModel,
PromptAgent,
PromptAgentApiModelInput,
PromptAgentApiModelInputBackupLlmConfig,
@@ -1100,6 +1110,10 @@
SipUriTransferDestination,
SkipTurnToolConfig,
SkipTurnToolResponseModel,
+ SoftTimeoutConfig,
+ SoftTimeoutConfigOverride,
+ SoftTimeoutConfigOverrideConfig,
+ SoftTimeoutConfigWorkflowOverride,
SongMetadata,
SongSection,
SortDirection,
@@ -1184,7 +1198,6 @@
ToolResponseModelToolConfig,
ToolResponseModelToolConfig_ApiIntegrationWebhook,
ToolResponseModelToolConfig_Client,
- ToolResponseModelToolConfig_Mcp,
ToolResponseModelToolConfig_System,
ToolResponseModelToolConfig_Webhook,
ToolType,
@@ -1210,6 +1223,8 @@
TtsOptimizeStreamingLatency,
TtsOutputFormat,
TurnConfig,
+ TurnConfigOverride,
+ TurnConfigOverrideConfig,
TurnConfigWorkflowOverride,
TurnEagerness,
TurnMode,
@@ -1271,9 +1286,11 @@
WebhookAuthMethodType,
WebhookEventType,
WebhookToolApiSchemaConfigInput,
+ WebhookToolApiSchemaConfigInputContentType,
WebhookToolApiSchemaConfigInputMethod,
WebhookToolApiSchemaConfigInputRequestHeadersValue,
WebhookToolApiSchemaConfigOutput,
+ WebhookToolApiSchemaConfigOutputContentType,
WebhookToolApiSchemaConfigOutputMethod,
WebhookToolApiSchemaConfigOutputRequestHeadersValue,
WebhookToolConfigInput,
@@ -1296,6 +1313,8 @@
WidgetConfigResponseModelAvatar_Image,
WidgetConfigResponseModelAvatar_Orb,
WidgetConfigResponseModelAvatar_Url,
+ WidgetEndFeedbackConfig,
+ WidgetEndFeedbackType,
WidgetExpandable,
WidgetFeedbackMode,
WidgetLanguagePreset,
@@ -1601,6 +1620,8 @@
"ApiIntegrationWebhookOverridesInputRequestHeadersValue": ".types",
"ApiIntegrationWebhookOverridesOutput": ".types",
"ApiIntegrationWebhookOverridesOutputRequestHeadersValue": ".types",
+ "ApiIntegrationWebhookToolConfigExternalInput": ".types",
+ "ApiIntegrationWebhookToolConfigExternalOutput": ".types",
"ApiIntegrationWebhookToolConfigInput": ".types",
"ApiIntegrationWebhookToolConfigOutput": ".types",
"ArrayJsonSchemaPropertyInput": ".types",
@@ -2158,6 +2179,7 @@
"ConversationConfigOverrideConfig": ".types",
"ConversationConfigWorkflowOverride": ".types",
"ConversationDeletionSettings": ".types",
+ "ConversationFeedbackType": ".types",
"ConversationHistoryAnalysisCommonModel": ".types",
"ConversationHistoryBatchCallModel": ".types",
"ConversationHistoryElevenAssistantCommonModel": ".types",
@@ -2275,6 +2297,7 @@
"DetailedMusicResponse": ".types",
"DialogueInput": ".types",
"DialogueInputResponseModel": ".types",
+ "DiscountResposneModel": ".types",
"DoDubbingResponse": ".types",
"DocumentUsageModeEnum": ".types",
"DocxExportOptions": ".types",
@@ -2549,10 +2572,14 @@
"ProjectExtendedResponseModelAccessLevel": ".types",
"ProjectExtendedResponseModelApplyTextNormalization": ".types",
"ProjectExtendedResponseModelAspectRatio": ".types",
+ "ProjectExtendedResponseModelAssetsItem": ".types",
+ "ProjectExtendedResponseModelAssetsItem_Audio": ".types",
+ "ProjectExtendedResponseModelAssetsItem_Video": ".types",
"ProjectExtendedResponseModelFiction": ".types",
"ProjectExtendedResponseModelQualityPreset": ".types",
"ProjectExtendedResponseModelSourceType": ".types",
"ProjectExtendedResponseModelTargetAudience": ".types",
+ "ProjectExternalAudioResponseModel": ".types",
"ProjectResponse": ".types",
"ProjectResponseModelAccessLevel": ".types",
"ProjectResponseModelAspectRatio": ".types",
@@ -2563,6 +2590,8 @@
"ProjectSnapshotResponse": ".types",
"ProjectSnapshotsResponse": ".types",
"ProjectState": ".types",
+ "ProjectVideoResponseModel": ".types",
+ "ProjectVideoThumbnailSheetResponseModel": ".types",
"PromptAgent": ".types",
"PromptAgentApiModelInput": ".types",
"PromptAgentApiModelInputBackupLlmConfig": ".types",
@@ -2688,6 +2717,10 @@
"SipUriTransferDestination": ".types",
"SkipTurnToolConfig": ".types",
"SkipTurnToolResponseModel": ".types",
+ "SoftTimeoutConfig": ".types",
+ "SoftTimeoutConfigOverride": ".types",
+ "SoftTimeoutConfigOverrideConfig": ".types",
+ "SoftTimeoutConfigWorkflowOverride": ".types",
"SongMetadata": ".types",
"SongSection": ".types",
"SortDirection": ".types",
@@ -2793,7 +2826,6 @@
"ToolResponseModelToolConfig": ".types",
"ToolResponseModelToolConfig_ApiIntegrationWebhook": ".types",
"ToolResponseModelToolConfig_Client": ".types",
- "ToolResponseModelToolConfig_Mcp": ".types",
"ToolResponseModelToolConfig_System": ".types",
"ToolResponseModelToolConfig_Webhook": ".types",
"ToolType": ".types",
@@ -2819,6 +2851,8 @@
"TtsOptimizeStreamingLatency": ".types",
"TtsOutputFormat": ".types",
"TurnConfig": ".types",
+ "TurnConfigOverride": ".types",
+ "TurnConfigOverrideConfig": ".types",
"TurnConfigWorkflowOverride": ".types",
"TurnEagerness": ".types",
"TurnMode": ".types",
@@ -2884,9 +2918,11 @@
"WebhookAuthMethodType": ".types",
"WebhookEventType": ".types",
"WebhookToolApiSchemaConfigInput": ".types",
+ "WebhookToolApiSchemaConfigInputContentType": ".types",
"WebhookToolApiSchemaConfigInputMethod": ".types",
"WebhookToolApiSchemaConfigInputRequestHeadersValue": ".types",
"WebhookToolApiSchemaConfigOutput": ".types",
+ "WebhookToolApiSchemaConfigOutputContentType": ".types",
"WebhookToolApiSchemaConfigOutputMethod": ".types",
"WebhookToolApiSchemaConfigOutputRequestHeadersValue": ".types",
"WebhookToolConfigInput": ".types",
@@ -2909,6 +2945,8 @@
"WidgetConfigResponseModelAvatar_Image": ".types",
"WidgetConfigResponseModelAvatar_Orb": ".types",
"WidgetConfigResponseModelAvatar_Url": ".types",
+ "WidgetEndFeedbackConfig": ".types",
+ "WidgetEndFeedbackType": ".types",
"WidgetExpandable": ".types",
"WidgetFeedbackMode": ".types",
"WidgetLanguagePreset": ".types",
@@ -3140,6 +3178,8 @@ def __dir__():
"ApiIntegrationWebhookOverridesInputRequestHeadersValue",
"ApiIntegrationWebhookOverridesOutput",
"ApiIntegrationWebhookOverridesOutputRequestHeadersValue",
+ "ApiIntegrationWebhookToolConfigExternalInput",
+ "ApiIntegrationWebhookToolConfigExternalOutput",
"ApiIntegrationWebhookToolConfigInput",
"ApiIntegrationWebhookToolConfigOutput",
"ArrayJsonSchemaPropertyInput",
@@ -3697,6 +3737,7 @@ def __dir__():
"ConversationConfigOverrideConfig",
"ConversationConfigWorkflowOverride",
"ConversationDeletionSettings",
+ "ConversationFeedbackType",
"ConversationHistoryAnalysisCommonModel",
"ConversationHistoryBatchCallModel",
"ConversationHistoryElevenAssistantCommonModel",
@@ -3814,6 +3855,7 @@ def __dir__():
"DetailedMusicResponse",
"DialogueInput",
"DialogueInputResponseModel",
+ "DiscountResposneModel",
"DoDubbingResponse",
"DocumentUsageModeEnum",
"DocxExportOptions",
@@ -4088,10 +4130,14 @@ def __dir__():
"ProjectExtendedResponseModelAccessLevel",
"ProjectExtendedResponseModelApplyTextNormalization",
"ProjectExtendedResponseModelAspectRatio",
+ "ProjectExtendedResponseModelAssetsItem",
+ "ProjectExtendedResponseModelAssetsItem_Audio",
+ "ProjectExtendedResponseModelAssetsItem_Video",
"ProjectExtendedResponseModelFiction",
"ProjectExtendedResponseModelQualityPreset",
"ProjectExtendedResponseModelSourceType",
"ProjectExtendedResponseModelTargetAudience",
+ "ProjectExternalAudioResponseModel",
"ProjectResponse",
"ProjectResponseModelAccessLevel",
"ProjectResponseModelAspectRatio",
@@ -4102,6 +4148,8 @@ def __dir__():
"ProjectSnapshotResponse",
"ProjectSnapshotsResponse",
"ProjectState",
+ "ProjectVideoResponseModel",
+ "ProjectVideoThumbnailSheetResponseModel",
"PromptAgent",
"PromptAgentApiModelInput",
"PromptAgentApiModelInputBackupLlmConfig",
@@ -4227,6 +4275,10 @@ def __dir__():
"SipUriTransferDestination",
"SkipTurnToolConfig",
"SkipTurnToolResponseModel",
+ "SoftTimeoutConfig",
+ "SoftTimeoutConfigOverride",
+ "SoftTimeoutConfigOverrideConfig",
+ "SoftTimeoutConfigWorkflowOverride",
"SongMetadata",
"SongSection",
"SortDirection",
@@ -4332,7 +4384,6 @@ def __dir__():
"ToolResponseModelToolConfig",
"ToolResponseModelToolConfig_ApiIntegrationWebhook",
"ToolResponseModelToolConfig_Client",
- "ToolResponseModelToolConfig_Mcp",
"ToolResponseModelToolConfig_System",
"ToolResponseModelToolConfig_Webhook",
"ToolType",
@@ -4358,6 +4409,8 @@ def __dir__():
"TtsOptimizeStreamingLatency",
"TtsOutputFormat",
"TurnConfig",
+ "TurnConfigOverride",
+ "TurnConfigOverrideConfig",
"TurnConfigWorkflowOverride",
"TurnEagerness",
"TurnMode",
@@ -4423,9 +4476,11 @@ def __dir__():
"WebhookAuthMethodType",
"WebhookEventType",
"WebhookToolApiSchemaConfigInput",
+ "WebhookToolApiSchemaConfigInputContentType",
"WebhookToolApiSchemaConfigInputMethod",
"WebhookToolApiSchemaConfigInputRequestHeadersValue",
"WebhookToolApiSchemaConfigOutput",
+ "WebhookToolApiSchemaConfigOutputContentType",
"WebhookToolApiSchemaConfigOutputMethod",
"WebhookToolApiSchemaConfigOutputRequestHeadersValue",
"WebhookToolConfigInput",
@@ -4448,6 +4503,8 @@ def __dir__():
"WidgetConfigResponseModelAvatar_Image",
"WidgetConfigResponseModelAvatar_Orb",
"WidgetConfigResponseModelAvatar_Url",
+ "WidgetEndFeedbackConfig",
+ "WidgetEndFeedbackType",
"WidgetExpandable",
"WidgetFeedbackMode",
"WidgetLanguagePreset",
diff --git a/src/elevenlabs/conversational_ai/agents/client.py b/src/elevenlabs/conversational_ai/agents/client.py
index 4d00fd01..71ea6a73 100644
--- a/src/elevenlabs/conversational_ai/agents/client.py
+++ b/src/elevenlabs/conversational_ai/agents/client.py
@@ -479,6 +479,7 @@ def run_tests(
*,
tests: typing.Sequence[SingleTestRunRequestModel],
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetTestSuiteInvocationResponseModel:
"""
@@ -495,6 +496,9 @@ def run_tests(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -520,7 +524,11 @@ def run_tests(
)
"""
_response = self._raw_client.run_tests(
- agent_id, tests=tests, agent_config_override=agent_config_override, request_options=request_options
+ agent_id,
+ tests=tests,
+ agent_config_override=agent_config_override,
+ branch_id=branch_id,
+ request_options=request_options,
)
return _response.data
@@ -1071,6 +1079,7 @@ async def run_tests(
*,
tests: typing.Sequence[SingleTestRunRequestModel],
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetTestSuiteInvocationResponseModel:
"""
@@ -1087,6 +1096,9 @@ async def run_tests(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1120,7 +1132,11 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._raw_client.run_tests(
- agent_id, tests=tests, agent_config_override=agent_config_override, request_options=request_options
+ agent_id,
+ tests=tests,
+ agent_config_override=agent_config_override,
+ branch_id=branch_id,
+ request_options=request_options,
)
return _response.data
diff --git a/src/elevenlabs/conversational_ai/agents/raw_client.py b/src/elevenlabs/conversational_ai/agents/raw_client.py
index 0ce5e718..cb3f36b9 100644
--- a/src/elevenlabs/conversational_ai/agents/raw_client.py
+++ b/src/elevenlabs/conversational_ai/agents/raw_client.py
@@ -604,6 +604,7 @@ def run_tests(
*,
tests: typing.Sequence[SingleTestRunRequestModel],
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetTestSuiteInvocationResponseModel]:
"""
@@ -620,6 +621,9 @@ def run_tests(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -640,6 +644,7 @@ def run_tests(
annotation=AdhocAgentConfigOverrideForTestRequestModel,
direction="write",
),
+ "branch_id": branch_id,
},
headers={
"content-type": "application/json",
@@ -1249,6 +1254,7 @@ async def run_tests(
*,
tests: typing.Sequence[SingleTestRunRequestModel],
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetTestSuiteInvocationResponseModel]:
"""
@@ -1265,6 +1271,9 @@ async def run_tests(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1285,6 +1294,7 @@ async def run_tests(
annotation=AdhocAgentConfigOverrideForTestRequestModel,
direction="write",
),
+ "branch_id": branch_id,
},
headers={
"content-type": "application/json",
diff --git a/src/elevenlabs/conversational_ai/conversations/client.py b/src/elevenlabs/conversational_ai/conversations/client.py
index 5424d3a4..44b459ba 100644
--- a/src/elevenlabs/conversational_ai/conversations/client.py
+++ b/src/elevenlabs/conversational_ai/conversations/client.py
@@ -131,7 +131,12 @@ def list(
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
+ call_duration_min_secs: typing.Optional[int] = None,
+ call_duration_max_secs: typing.Optional[int] = None,
user_id: typing.Optional[str] = None,
+ evaluation_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ data_collection_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ tool_names: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
page_size: typing.Optional[int] = None,
summary_mode: typing.Optional[ConversationsListRequestSummaryMode] = None,
search: typing.Optional[str] = None,
@@ -157,9 +162,24 @@ def list(
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
+ call_duration_min_secs : typing.Optional[int]
+ Minimum call duration in seconds.
+
+ call_duration_max_secs : typing.Optional[int]
+ Maximum call duration in seconds.
+
user_id : typing.Optional[str]
Filter conversations by the user ID who initiated them.
+ evaluation_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Evaluation filters. Repeat param. Format: criteria_id:result. Example: eval=value_framing:success
+
+ data_collection_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Data collection filters. Repeat param. Format: id:op:value where op is one of eq|neq|gt|gte|lt|lte|in|exists|missing. For in, pipe-delimit values.
+
+ tool_names : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Filter conversations by tool names used during the call.
+
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -190,6 +210,8 @@ def list(
call_successful="success",
call_start_before_unix=1,
call_start_after_unix=1,
+ call_duration_min_secs=1,
+ call_duration_max_secs=1,
user_id="user_id",
page_size=1,
summary_mode="exclude",
@@ -202,7 +224,12 @@ def list(
call_successful=call_successful,
call_start_before_unix=call_start_before_unix,
call_start_after_unix=call_start_after_unix,
+ call_duration_min_secs=call_duration_min_secs,
+ call_duration_max_secs=call_duration_max_secs,
user_id=user_id,
+ evaluation_params=evaluation_params,
+ data_collection_params=data_collection_params,
+ tool_names=tool_names,
page_size=page_size,
summary_mode=summary_mode,
search=search,
@@ -243,9 +270,7 @@ def get(
_response = self._raw_client.get(conversation_id, request_options=request_options)
return _response.data
- def delete(
- self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ def delete(self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Delete a particular conversation
@@ -259,7 +284,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -421,7 +446,12 @@ async def list(
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
+ call_duration_min_secs: typing.Optional[int] = None,
+ call_duration_max_secs: typing.Optional[int] = None,
user_id: typing.Optional[str] = None,
+ evaluation_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ data_collection_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ tool_names: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
page_size: typing.Optional[int] = None,
summary_mode: typing.Optional[ConversationsListRequestSummaryMode] = None,
search: typing.Optional[str] = None,
@@ -447,9 +477,24 @@ async def list(
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
+ call_duration_min_secs : typing.Optional[int]
+ Minimum call duration in seconds.
+
+ call_duration_max_secs : typing.Optional[int]
+ Maximum call duration in seconds.
+
user_id : typing.Optional[str]
Filter conversations by the user ID who initiated them.
+ evaluation_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Evaluation filters. Repeat param. Format: criteria_id:result. Example: eval=value_framing:success
+
+ data_collection_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Data collection filters. Repeat param. Format: id:op:value where op is one of eq|neq|gt|gte|lt|lte|in|exists|missing. For in, pipe-delimit values.
+
+ tool_names : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Filter conversations by tool names used during the call.
+
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -485,6 +530,8 @@ async def main() -> None:
call_successful="success",
call_start_before_unix=1,
call_start_after_unix=1,
+ call_duration_min_secs=1,
+ call_duration_max_secs=1,
user_id="user_id",
page_size=1,
summary_mode="exclude",
@@ -500,7 +547,12 @@ async def main() -> None:
call_successful=call_successful,
call_start_before_unix=call_start_before_unix,
call_start_after_unix=call_start_after_unix,
+ call_duration_min_secs=call_duration_min_secs,
+ call_duration_max_secs=call_duration_max_secs,
user_id=user_id,
+ evaluation_params=evaluation_params,
+ data_collection_params=data_collection_params,
+ tool_names=tool_names,
page_size=page_size,
summary_mode=summary_mode,
search=search,
@@ -551,7 +603,7 @@ async def main() -> None:
async def delete(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Delete a particular conversation
@@ -565,7 +617,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
diff --git a/src/elevenlabs/conversational_ai/conversations/feedback/client.py b/src/elevenlabs/conversational_ai/conversations/feedback/client.py
index 51762bb7..63648960 100644
--- a/src/elevenlabs/conversational_ai/conversations/feedback/client.py
+++ b/src/elevenlabs/conversational_ai/conversations/feedback/client.py
@@ -30,9 +30,9 @@ def create(
self,
conversation_id: str,
*,
- feedback: UserFeedbackScore,
+ feedback: typing.Optional[UserFeedbackScore] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Send the feedback for the given conversation
@@ -41,7 +41,7 @@ def create(
conversation_id : str
The id of the conversation you're taking the action on.
- feedback : UserFeedbackScore
+ feedback : typing.Optional[UserFeedbackScore]
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
@@ -49,7 +49,7 @@ def create(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -87,9 +87,9 @@ async def create(
self,
conversation_id: str,
*,
- feedback: UserFeedbackScore,
+ feedback: typing.Optional[UserFeedbackScore] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Send the feedback for the given conversation
@@ -98,7 +98,7 @@ async def create(
conversation_id : str
The id of the conversation you're taking the action on.
- feedback : UserFeedbackScore
+ feedback : typing.Optional[UserFeedbackScore]
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
@@ -106,7 +106,7 @@ async def create(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
diff --git a/src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py b/src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py
index daf0acee..0f8d1e8d 100644
--- a/src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py
+++ b/src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py
@@ -25,9 +25,9 @@ def create(
self,
conversation_id: str,
*,
- feedback: UserFeedbackScore,
+ feedback: typing.Optional[UserFeedbackScore] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Send the feedback for the given conversation
@@ -36,7 +36,7 @@ def create(
conversation_id : str
The id of the conversation you're taking the action on.
- feedback : UserFeedbackScore
+ feedback : typing.Optional[UserFeedbackScore]
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
@@ -44,7 +44,7 @@ def create(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -64,9 +64,9 @@ def create(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -96,9 +96,9 @@ async def create(
self,
conversation_id: str,
*,
- feedback: UserFeedbackScore,
+ feedback: typing.Optional[UserFeedbackScore] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Send the feedback for the given conversation
@@ -107,7 +107,7 @@ async def create(
conversation_id : str
The id of the conversation you're taking the action on.
- feedback : UserFeedbackScore
+ feedback : typing.Optional[UserFeedbackScore]
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
@@ -115,7 +115,7 @@ async def create(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -135,9 +135,9 @@ async def create(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/conversational_ai/conversations/raw_client.py b/src/elevenlabs/conversational_ai/conversations/raw_client.py
index 7b23b8cc..fb9f0fde 100644
--- a/src/elevenlabs/conversational_ai/conversations/raw_client.py
+++ b/src/elevenlabs/conversational_ai/conversations/raw_client.py
@@ -153,7 +153,12 @@ def list(
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
+ call_duration_min_secs: typing.Optional[int] = None,
+ call_duration_max_secs: typing.Optional[int] = None,
user_id: typing.Optional[str] = None,
+ evaluation_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ data_collection_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ tool_names: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
page_size: typing.Optional[int] = None,
summary_mode: typing.Optional[ConversationsListRequestSummaryMode] = None,
search: typing.Optional[str] = None,
@@ -179,9 +184,24 @@ def list(
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
+ call_duration_min_secs : typing.Optional[int]
+ Minimum call duration in seconds.
+
+ call_duration_max_secs : typing.Optional[int]
+ Maximum call duration in seconds.
+
user_id : typing.Optional[str]
Filter conversations by the user ID who initiated them.
+ evaluation_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Evaluation filters. Repeat param. Format: criteria_id:result. Example: eval=value_framing:success
+
+ data_collection_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Data collection filters. Repeat param. Format: id:op:value where op is one of eq|neq|gt|gte|lt|lte|in|exists|missing. For in, pipe-delimit values.
+
+ tool_names : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Filter conversations by tool names used during the call.
+
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -208,7 +228,12 @@ def list(
"call_successful": call_successful,
"call_start_before_unix": call_start_before_unix,
"call_start_after_unix": call_start_after_unix,
+ "call_duration_min_secs": call_duration_min_secs,
+ "call_duration_max_secs": call_duration_max_secs,
"user_id": user_id,
+ "evaluation_params": evaluation_params,
+ "data_collection_params": data_collection_params,
+ "tool_names": tool_names,
"page_size": page_size,
"summary_mode": summary_mode,
"search": search,
@@ -293,7 +318,7 @@ def get(
def delete(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Delete a particular conversation
@@ -307,7 +332,7 @@ def delete(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -320,9 +345,9 @@ def delete(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -478,7 +503,12 @@ async def list(
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
+ call_duration_min_secs: typing.Optional[int] = None,
+ call_duration_max_secs: typing.Optional[int] = None,
user_id: typing.Optional[str] = None,
+ evaluation_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ data_collection_params: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ tool_names: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
page_size: typing.Optional[int] = None,
summary_mode: typing.Optional[ConversationsListRequestSummaryMode] = None,
search: typing.Optional[str] = None,
@@ -504,9 +534,24 @@ async def list(
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
+ call_duration_min_secs : typing.Optional[int]
+ Minimum call duration in seconds.
+
+ call_duration_max_secs : typing.Optional[int]
+ Maximum call duration in seconds.
+
user_id : typing.Optional[str]
Filter conversations by the user ID who initiated them.
+ evaluation_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Evaluation filters. Repeat param. Format: criteria_id:result. Example: eval=value_framing:success
+
+ data_collection_params : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Data collection filters. Repeat param. Format: id:op:value where op is one of eq|neq|gt|gte|lt|lte|in|exists|missing. For in, pipe-delimit values.
+
+ tool_names : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ Filter conversations by tool names used during the call.
+
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -533,7 +578,12 @@ async def list(
"call_successful": call_successful,
"call_start_before_unix": call_start_before_unix,
"call_start_after_unix": call_start_after_unix,
+ "call_duration_min_secs": call_duration_min_secs,
+ "call_duration_max_secs": call_duration_max_secs,
"user_id": user_id,
+ "evaluation_params": evaluation_params,
+ "data_collection_params": data_collection_params,
+ "tool_names": tool_names,
"page_size": page_size,
"summary_mode": summary_mode,
"search": search,
@@ -618,7 +668,7 @@ async def get(
async def delete(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Delete a particular conversation
@@ -632,7 +682,7 @@ async def delete(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -645,9 +695,9 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py b/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
index f9838af2..5079b6f5 100644
--- a/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
+++ b/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
@@ -192,7 +192,7 @@ def delete(
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Delete a document from the knowledge base
@@ -209,7 +209,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -556,7 +556,7 @@ async def delete(
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Delete a document from the knowledge base
@@ -573,7 +573,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
diff --git a/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py b/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
index f3937a88..e95d4eb7 100644
--- a/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
+++ b/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
@@ -277,7 +277,7 @@ def delete(
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Delete a document from the knowledge base
@@ -294,7 +294,7 @@ def delete(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -310,9 +310,9 @@ def delete(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -757,7 +757,7 @@ async def delete(
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Delete a document from the knowledge base
@@ -774,7 +774,7 @@ async def delete(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -790,9 +790,9 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/client.py b/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/client.py
index 0f8228f9..7c142845 100644
--- a/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/client.py
+++ b/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/client.py
@@ -33,7 +33,7 @@ def create(
*,
tool_name: str,
tool_description: str,
- input_schema: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_schema: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
approval_policy: typing.Optional[McpToolApprovalPolicy] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> McpServerResponseModel:
@@ -51,7 +51,7 @@ def create(
tool_description : str
The description of the MCP tool
- input_schema : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ input_schema : typing.Optional[typing.Dict[str, typing.Any]]
The input schema of the MCP tool (the schema defined on the MCP server before ElevenLabs does any extra processing)
approval_policy : typing.Optional[McpToolApprovalPolicy]
@@ -147,7 +147,7 @@ async def create(
*,
tool_name: str,
tool_description: str,
- input_schema: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_schema: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
approval_policy: typing.Optional[McpToolApprovalPolicy] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> McpServerResponseModel:
@@ -165,7 +165,7 @@ async def create(
tool_description : str
The description of the MCP tool
- input_schema : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ input_schema : typing.Optional[typing.Dict[str, typing.Any]]
The input schema of the MCP tool (the schema defined on the MCP server before ElevenLabs does any extra processing)
approval_policy : typing.Optional[McpToolApprovalPolicy]
diff --git a/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/raw_client.py b/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/raw_client.py
index 86af213e..44bc7b6b 100644
--- a/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/raw_client.py
+++ b/src/elevenlabs/conversational_ai/mcp_servers/tool_approvals/raw_client.py
@@ -28,7 +28,7 @@ def create(
*,
tool_name: str,
tool_description: str,
- input_schema: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_schema: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
approval_policy: typing.Optional[McpToolApprovalPolicy] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[McpServerResponseModel]:
@@ -46,7 +46,7 @@ def create(
tool_description : str
The description of the MCP tool
- input_schema : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ input_schema : typing.Optional[typing.Dict[str, typing.Any]]
The input schema of the MCP tool (the schema defined on the MCP server before ElevenLabs does any extra processing)
approval_policy : typing.Optional[McpToolApprovalPolicy]
@@ -165,7 +165,7 @@ async def create(
*,
tool_name: str,
tool_description: str,
- input_schema: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_schema: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
approval_policy: typing.Optional[McpToolApprovalPolicy] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[McpServerResponseModel]:
@@ -183,7 +183,7 @@ async def create(
tool_description : str
The description of the MCP tool
- input_schema : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ input_schema : typing.Optional[typing.Dict[str, typing.Any]]
The input schema of the MCP tool (the schema defined on the MCP server before ElevenLabs does any extra processing)
approval_policy : typing.Optional[McpToolApprovalPolicy]
diff --git a/src/elevenlabs/conversational_ai/mcp_servers/tool_configs/raw_client.py b/src/elevenlabs/conversational_ai/mcp_servers/tool_configs/raw_client.py
index ebbf05e1..0c3d3f24 100644
--- a/src/elevenlabs/conversational_ai/mcp_servers/tool_configs/raw_client.py
+++ b/src/elevenlabs/conversational_ai/mcp_servers/tool_configs/raw_client.py
@@ -113,9 +113,9 @@ def create(
raise ConflictError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -177,9 +177,9 @@ def get(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -336,9 +336,9 @@ def update(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -448,9 +448,9 @@ async def create(
raise ConflictError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -512,9 +512,9 @@ async def get(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -671,9 +671,9 @@ async def update(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/elevenlabs/conversational_ai/phone_numbers/client.py b/src/elevenlabs/conversational_ai/phone_numbers/client.py
index bda88e04..a949a7f6 100644
--- a/src/elevenlabs/conversational_ai/phone_numbers/client.py
+++ b/src/elevenlabs/conversational_ai/phone_numbers/client.py
@@ -134,9 +134,7 @@ def get(
_response = self._raw_client.get(phone_number_id, request_options=request_options)
return _response.data
- def delete(
- self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ def delete(self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Delete Phone Number by ID
@@ -150,7 +148,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -365,7 +363,7 @@ async def main() -> None:
async def delete(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Delete Phone Number by ID
@@ -379,7 +377,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
diff --git a/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py b/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
index be9eaa2e..dc0ad666 100644
--- a/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
+++ b/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
@@ -184,7 +184,7 @@ def get(
def delete(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Delete Phone Number by ID
@@ -198,7 +198,7 @@ def delete(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -211,9 +211,9 @@ def delete(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -473,7 +473,7 @@ async def get(
async def delete(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Delete Phone Number by ID
@@ -487,7 +487,7 @@ async def delete(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -500,9 +500,9 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/conversational_ai/tests/client.py b/src/elevenlabs/conversational_ai/tests/client.py
index 0fe0cacb..8da3bc86 100644
--- a/src/elevenlabs/conversational_ai/tests/client.py
+++ b/src/elevenlabs/conversational_ai/tests/client.py
@@ -280,9 +280,7 @@ def update(
)
return _response.data
- def delete(
- self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ def delete(self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Deletes an agent response test by ID.
@@ -296,7 +294,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -683,9 +681,7 @@ async def main() -> None:
)
return _response.data
- async def delete(
- self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ async def delete(self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Deletes an agent response test by ID.
@@ -699,7 +695,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
diff --git a/src/elevenlabs/conversational_ai/tests/invocations/client.py b/src/elevenlabs/conversational_ai/tests/invocations/client.py
index e19bcaea..2597fdc5 100644
--- a/src/elevenlabs/conversational_ai/tests/invocations/client.py
+++ b/src/elevenlabs/conversational_ai/tests/invocations/client.py
@@ -116,8 +116,9 @@ def resubmit(
test_run_ids: typing.Sequence[str],
agent_id: str,
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Resubmits specific test runs from a test invocation.
@@ -135,12 +136,15 @@ def resubmit(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -161,6 +165,7 @@ def resubmit(
test_run_ids=test_run_ids,
agent_id=agent_id,
agent_config_override=agent_config_override,
+ branch_id=branch_id,
request_options=request_options,
)
return _response.data
@@ -285,8 +290,9 @@ async def resubmit(
test_run_ids: typing.Sequence[str],
agent_id: str,
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Resubmits specific test runs from a test invocation.
@@ -304,12 +310,15 @@ async def resubmit(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -338,6 +347,7 @@ async def main() -> None:
test_run_ids=test_run_ids,
agent_id=agent_id,
agent_config_override=agent_config_override,
+ branch_id=branch_id,
request_options=request_options,
)
return _response.data
diff --git a/src/elevenlabs/conversational_ai/tests/invocations/raw_client.py b/src/elevenlabs/conversational_ai/tests/invocations/raw_client.py
index 8c0175d4..1db31cba 100644
--- a/src/elevenlabs/conversational_ai/tests/invocations/raw_client.py
+++ b/src/elevenlabs/conversational_ai/tests/invocations/raw_client.py
@@ -147,8 +147,9 @@ def resubmit(
test_run_ids: typing.Sequence[str],
agent_id: str,
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Resubmits specific test runs from a test invocation.
@@ -166,12 +167,15 @@ def resubmit(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -185,6 +189,7 @@ def resubmit(
direction="write",
),
"agent_id": agent_id,
+ "branch_id": branch_id,
},
headers={
"content-type": "application/json",
@@ -197,9 +202,9 @@ def resubmit(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -348,8 +353,9 @@ async def resubmit(
test_run_ids: typing.Sequence[str],
agent_id: str,
agent_config_override: typing.Optional[AdhocAgentConfigOverrideForTestRequestModel] = OMIT,
+ branch_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Resubmits specific test runs from a test invocation.
@@ -367,12 +373,15 @@ async def resubmit(
agent_config_override : typing.Optional[AdhocAgentConfigOverrideForTestRequestModel]
Configuration overrides to use for testing. If not provided, the agent's default configuration will be used.
+ branch_id : typing.Optional[str]
+ ID of the branch to run the tests on. If not provided, the tests will be run on the agent default configuration.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -386,6 +395,7 @@ async def resubmit(
direction="write",
),
"agent_id": agent_id,
+ "branch_id": branch_id,
},
headers={
"content-type": "application/json",
@@ -398,9 +408,9 @@ async def resubmit(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/conversational_ai/tests/raw_client.py b/src/elevenlabs/conversational_ai/tests/raw_client.py
index 474f9b85..e0681d89 100644
--- a/src/elevenlabs/conversational_ai/tests/raw_client.py
+++ b/src/elevenlabs/conversational_ai/tests/raw_client.py
@@ -323,7 +323,7 @@ def update(
def delete(
self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Deletes an agent response test by ID.
@@ -337,7 +337,7 @@ def delete(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -350,9 +350,9 @@ def delete(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -791,7 +791,7 @@ async def update(
async def delete(
self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Deletes an agent response test by ID.
@@ -805,7 +805,7 @@ async def delete(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -818,9 +818,9 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/conversational_ai/tools/client.py b/src/elevenlabs/conversational_ai/tools/client.py
index cb745011..abac1764 100644
--- a/src/elevenlabs/conversational_ai/tools/client.py
+++ b/src/elevenlabs/conversational_ai/tools/client.py
@@ -77,8 +77,12 @@ def create(
--------
from elevenlabs import (
ElevenLabs,
+ LiteralJsonSchemaProperty,
+ ObjectJsonSchemaPropertyInput,
+ QueryParamsJsonSchema,
ToolRequestModel,
ToolRequestModelToolConfig_ApiIntegrationWebhook,
+ WebhookToolApiSchemaConfigInput,
)
client = ElevenLabs(
@@ -91,6 +95,28 @@ def create(
description="description",
api_integration_id="api_integration_id",
api_integration_connection_id="api_integration_connection_id",
+ base_api_schema=WebhookToolApiSchemaConfigInput(
+ url="https://example.com/agents/{agent_id}",
+ method="GET",
+ path_params_schema={
+ "agent_id": LiteralJsonSchemaProperty(
+ type="string",
+ )
+ },
+ query_params_schema=QueryParamsJsonSchema(
+ properties={
+ "key": LiteralJsonSchemaProperty(
+ type="string",
+ description="My property",
+ is_system_provided=False,
+ dynamic_variable="",
+ constant_value="",
+ )
+ },
+ ),
+ request_body_schema=ObjectJsonSchemaPropertyInput(),
+ request_headers={"Authorization": "Bearer {api_key}"},
+ ),
),
),
)
@@ -129,9 +155,7 @@ def get(self, tool_id: str, *, request_options: typing.Optional[RequestOptions]
_response = self._raw_client.get(tool_id, request_options=request_options)
return _response.data
- def delete(
- self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ def delete(self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Delete tool from the workspace.
@@ -145,7 +169,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -187,8 +211,12 @@ def update(
--------
from elevenlabs import (
ElevenLabs,
+ LiteralJsonSchemaProperty,
+ ObjectJsonSchemaPropertyInput,
+ QueryParamsJsonSchema,
ToolRequestModel,
ToolRequestModelToolConfig_ApiIntegrationWebhook,
+ WebhookToolApiSchemaConfigInput,
)
client = ElevenLabs(
@@ -202,6 +230,28 @@ def update(
description="description",
api_integration_id="api_integration_id",
api_integration_connection_id="api_integration_connection_id",
+ base_api_schema=WebhookToolApiSchemaConfigInput(
+ url="https://example.com/agents/{agent_id}",
+ method="GET",
+ path_params_schema={
+ "agent_id": LiteralJsonSchemaProperty(
+ type="string",
+ )
+ },
+ query_params_schema=QueryParamsJsonSchema(
+ properties={
+ "key": LiteralJsonSchemaProperty(
+ type="string",
+ description="My property",
+ is_system_provided=False,
+ dynamic_variable="",
+ constant_value="",
+ )
+ },
+ ),
+ request_body_schema=ObjectJsonSchemaPropertyInput(),
+ request_headers={"Authorization": "Bearer {api_key}"},
+ ),
),
),
)
@@ -331,8 +381,12 @@ async def create(
from elevenlabs import (
AsyncElevenLabs,
+ LiteralJsonSchemaProperty,
+ ObjectJsonSchemaPropertyInput,
+ QueryParamsJsonSchema,
ToolRequestModel,
ToolRequestModelToolConfig_ApiIntegrationWebhook,
+ WebhookToolApiSchemaConfigInput,
)
client = AsyncElevenLabs(
@@ -348,6 +402,28 @@ async def main() -> None:
description="description",
api_integration_id="api_integration_id",
api_integration_connection_id="api_integration_connection_id",
+ base_api_schema=WebhookToolApiSchemaConfigInput(
+ url="https://example.com/agents/{agent_id}",
+ method="GET",
+ path_params_schema={
+ "agent_id": LiteralJsonSchemaProperty(
+ type="string",
+ )
+ },
+ query_params_schema=QueryParamsJsonSchema(
+ properties={
+ "key": LiteralJsonSchemaProperty(
+ type="string",
+ description="My property",
+ is_system_provided=False,
+ dynamic_variable="",
+ constant_value="",
+ )
+ },
+ ),
+ request_body_schema=ObjectJsonSchemaPropertyInput(),
+ request_headers={"Authorization": "Bearer {api_key}"},
+ ),
),
),
)
@@ -397,9 +473,7 @@ async def main() -> None:
_response = await self._raw_client.get(tool_id, request_options=request_options)
return _response.data
- async def delete(
- self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ async def delete(self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Delete tool from the workspace.
@@ -413,7 +487,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -465,8 +539,12 @@ async def update(
from elevenlabs import (
AsyncElevenLabs,
+ LiteralJsonSchemaProperty,
+ ObjectJsonSchemaPropertyInput,
+ QueryParamsJsonSchema,
ToolRequestModel,
ToolRequestModelToolConfig_ApiIntegrationWebhook,
+ WebhookToolApiSchemaConfigInput,
)
client = AsyncElevenLabs(
@@ -483,6 +561,28 @@ async def main() -> None:
description="description",
api_integration_id="api_integration_id",
api_integration_connection_id="api_integration_connection_id",
+ base_api_schema=WebhookToolApiSchemaConfigInput(
+ url="https://example.com/agents/{agent_id}",
+ method="GET",
+ path_params_schema={
+ "agent_id": LiteralJsonSchemaProperty(
+ type="string",
+ )
+ },
+ query_params_schema=QueryParamsJsonSchema(
+ properties={
+ "key": LiteralJsonSchemaProperty(
+ type="string",
+ description="My property",
+ is_system_provided=False,
+ dynamic_variable="",
+ constant_value="",
+ )
+ },
+ ),
+ request_body_schema=ObjectJsonSchemaPropertyInput(),
+ request_headers={"Authorization": "Bearer {api_key}"},
+ ),
),
),
)
diff --git a/src/elevenlabs/conversational_ai/tools/raw_client.py b/src/elevenlabs/conversational_ai/tools/raw_client.py
index 5b1c1f12..d52aced9 100644
--- a/src/elevenlabs/conversational_ai/tools/raw_client.py
+++ b/src/elevenlabs/conversational_ai/tools/raw_client.py
@@ -178,7 +178,7 @@ def get(
def delete(
self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Delete tool from the workspace.
@@ -192,7 +192,7 @@ def delete(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -205,9 +205,9 @@ def delete(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -512,7 +512,7 @@ async def get(
async def delete(
self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Delete tool from the workspace.
@@ -526,7 +526,7 @@ async def delete(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -539,9 +539,9 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py
index e550d1b2..1546f1b7 100644
--- a/src/elevenlabs/core/client_wrapper.py
+++ b/src/elevenlabs/core/client_wrapper.py
@@ -22,10 +22,9 @@ def __init__(
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "elevenlabs/v2.22.1",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "elevenlabs",
- "X-Fern-SDK-Version": "v2.22.1",
+ "X-Fern-SDK-Version": "0.0.0",
**(self.get_custom_headers() or {}),
}
if self._api_key is not None:
diff --git a/src/elevenlabs/core/pydantic_utilities.py b/src/elevenlabs/core/pydantic_utilities.py
index 8906cdfa..185e5c4f 100644
--- a/src/elevenlabs/core/pydantic_utilities.py
+++ b/src/elevenlabs/core/pydantic_utilities.py
@@ -220,7 +220,9 @@ def universal_root_validator(
) -> Callable[[AnyCallable], AnyCallable]:
def decorator(func: AnyCallable) -> AnyCallable:
if IS_PYDANTIC_V2:
- return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined]
+ # In Pydantic v2, for RootModel we always use "before" mode
+ # The custom validators transform the input value before the model is created
+ return cast(AnyCallable, pydantic.model_validator(mode="before")(func)) # type: ignore[attr-defined]
return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload]
return decorator
diff --git a/src/elevenlabs/dubbing/audio/raw_client.py b/src/elevenlabs/dubbing/audio/raw_client.py
index 47ff559b..0dd63e41 100644
--- a/src/elevenlabs/dubbing/audio/raw_client.py
+++ b/src/elevenlabs/dubbing/audio/raw_client.py
@@ -62,9 +62,9 @@ def _stream() -> HttpResponse[typing.Iterator[bytes]]:
raise ForbiddenError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -73,9 +73,9 @@ def _stream() -> HttpResponse[typing.Iterator[bytes]]:
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -95,9 +95,9 @@ def _stream() -> HttpResponse[typing.Iterator[bytes]]:
raise TooEarlyError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -158,9 +158,9 @@ async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
raise ForbiddenError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -169,9 +169,9 @@ async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -191,9 +191,9 @@ async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
raise TooEarlyError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/elevenlabs/dubbing/transcript/raw_client.py b/src/elevenlabs/dubbing/transcript/raw_client.py
index c5f069c1..cedf721f 100644
--- a/src/elevenlabs/dubbing/transcript/raw_client.py
+++ b/src/elevenlabs/dubbing/transcript/raw_client.py
@@ -66,9 +66,9 @@ def get_transcript_for_dub(
raise ForbiddenError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -77,9 +77,9 @@ def get_transcript_for_dub(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -99,9 +99,9 @@ def get_transcript_for_dub(
raise TooEarlyError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -161,9 +161,9 @@ async def get_transcript_for_dub(
raise ForbiddenError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -172,9 +172,9 @@ async def get_transcript_for_dub(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -194,9 +194,9 @@ async def get_transcript_for_dub(
raise TooEarlyError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/elevenlabs/errors/conflict_error.py b/src/elevenlabs/errors/conflict_error.py
index d340b91c..be04e01a 100644
--- a/src/elevenlabs/errors/conflict_error.py
+++ b/src/elevenlabs/errors/conflict_error.py
@@ -6,5 +6,5 @@
class ConflictError(ApiError):
- def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
+ def __init__(self, body: typing.Any, headers: typing.Optional[typing.Dict[str, str]] = None):
super().__init__(status_code=409, headers=headers, body=body)
diff --git a/src/elevenlabs/errors/forbidden_error.py b/src/elevenlabs/errors/forbidden_error.py
index 3e390b0d..07d7e45b 100644
--- a/src/elevenlabs/errors/forbidden_error.py
+++ b/src/elevenlabs/errors/forbidden_error.py
@@ -6,5 +6,5 @@
class ForbiddenError(ApiError):
- def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
+ def __init__(self, body: typing.Any, headers: typing.Optional[typing.Dict[str, str]] = None):
super().__init__(status_code=403, headers=headers, body=body)
diff --git a/src/elevenlabs/errors/not_found_error.py b/src/elevenlabs/errors/not_found_error.py
index dcd60e38..75f557df 100644
--- a/src/elevenlabs/errors/not_found_error.py
+++ b/src/elevenlabs/errors/not_found_error.py
@@ -6,5 +6,5 @@
class NotFoundError(ApiError):
- def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
+ def __init__(self, body: typing.Any, headers: typing.Optional[typing.Dict[str, str]] = None):
super().__init__(status_code=404, headers=headers, body=body)
diff --git a/src/elevenlabs/errors/too_early_error.py b/src/elevenlabs/errors/too_early_error.py
index 422acaaa..418b8abb 100644
--- a/src/elevenlabs/errors/too_early_error.py
+++ b/src/elevenlabs/errors/too_early_error.py
@@ -6,5 +6,5 @@
class TooEarlyError(ApiError):
- def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
+ def __init__(self, body: typing.Any, headers: typing.Optional[typing.Dict[str, str]] = None):
super().__init__(status_code=425, headers=headers, body=body)
diff --git a/src/elevenlabs/errors/unauthorized_error.py b/src/elevenlabs/errors/unauthorized_error.py
index c83b25c2..7e48bb6f 100644
--- a/src/elevenlabs/errors/unauthorized_error.py
+++ b/src/elevenlabs/errors/unauthorized_error.py
@@ -6,5 +6,5 @@
class UnauthorizedError(ApiError):
- def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
+ def __init__(self, body: typing.Any, headers: typing.Optional[typing.Dict[str, str]] = None):
super().__init__(status_code=401, headers=headers, body=body)
diff --git a/src/elevenlabs/music/client.py b/src/elevenlabs/music/client.py
index 93f77371..b5af9fc0 100644
--- a/src/elevenlabs/music/client.py
+++ b/src/elevenlabs/music/client.py
@@ -253,7 +253,7 @@ def separate_stems(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
- Separate a music file into individual stems
+ Separate an audio file into individual stems. This endpoint might have high latency, depending on the length of the audio file.
Parameters
----------
@@ -547,7 +547,7 @@ async def separate_stems(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
- Separate a music file into individual stems
+ Separate an audio file into individual stems. This endpoint might have high latency, depending on the length of the audio file.
Parameters
----------
diff --git a/src/elevenlabs/music/raw_client.py b/src/elevenlabs/music/raw_client.py
index 422ab168..1ae1e05a 100644
--- a/src/elevenlabs/music/raw_client.py
+++ b/src/elevenlabs/music/raw_client.py
@@ -337,7 +337,7 @@ def separate_stems(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
"""
- Separate a music file into individual stems
+ Separate an audio file into individual stems. This endpoint might have high latency, depending on the length of the audio file.
Parameters
----------
@@ -720,7 +720,7 @@ async def separate_stems(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
"""
- Separate a music file into individual stems
+ Separate an audio file into individual stems. This endpoint might have high latency, depending on the length of the audio file.
Parameters
----------
diff --git a/src/elevenlabs/service_accounts/api_keys/client.py b/src/elevenlabs/service_accounts/api_keys/client.py
index abf4bb4d..e915b0cf 100644
--- a/src/elevenlabs/service_accounts/api_keys/client.py
+++ b/src/elevenlabs/service_accounts/api_keys/client.py
@@ -121,7 +121,7 @@ def create(
def delete(
self, service_account_user_id: str, api_key_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Delete an existing API key for a service account
@@ -136,7 +136,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -164,7 +164,7 @@ def update(
permissions: BodyEditServiceAccountApiKeyV1ServiceAccountsServiceAccountUserIdApiKeysApiKeyIdPatchPermissions,
character_limit: typing.Optional[int] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Update an existing API key for a service account
@@ -191,7 +191,7 @@ def update(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -340,7 +340,7 @@ async def main() -> None:
async def delete(
self, service_account_user_id: str, api_key_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Delete an existing API key for a service account
@@ -355,7 +355,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -391,7 +391,7 @@ async def update(
permissions: BodyEditServiceAccountApiKeyV1ServiceAccountsServiceAccountUserIdApiKeysApiKeyIdPatchPermissions,
character_limit: typing.Optional[int] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Update an existing API key for a service account
@@ -418,7 +418,7 @@ async def update(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
diff --git a/src/elevenlabs/service_accounts/api_keys/raw_client.py b/src/elevenlabs/service_accounts/api_keys/raw_client.py
index 32ed3ab5..5853780b 100644
--- a/src/elevenlabs/service_accounts/api_keys/raw_client.py
+++ b/src/elevenlabs/service_accounts/api_keys/raw_client.py
@@ -156,7 +156,7 @@ def create(
def delete(
self, service_account_user_id: str, api_key_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Delete an existing API key for a service account
@@ -171,7 +171,7 @@ def delete(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -184,9 +184,9 @@ def delete(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -217,7 +217,7 @@ def update(
permissions: BodyEditServiceAccountApiKeyV1ServiceAccountsServiceAccountUserIdApiKeysApiKeyIdPatchPermissions,
character_limit: typing.Optional[int] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Update an existing API key for a service account
@@ -244,7 +244,7 @@ def update(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -271,9 +271,9 @@ def update(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -426,7 +426,7 @@ async def create(
async def delete(
self, service_account_user_id: str, api_key_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Delete an existing API key for a service account
@@ -441,7 +441,7 @@ async def delete(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -454,9 +454,9 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -487,7 +487,7 @@ async def update(
permissions: BodyEditServiceAccountApiKeyV1ServiceAccountsServiceAccountUserIdApiKeysApiKeyIdPatchPermissions,
character_limit: typing.Optional[int] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Update an existing API key for a service account
@@ -514,7 +514,7 @@ async def update(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -541,9 +541,9 @@ async def update(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/speech_to_text/transcripts/client.py b/src/elevenlabs/speech_to_text/transcripts/client.py
index 8eb9d6cf..4e5b66af 100644
--- a/src/elevenlabs/speech_to_text/transcripts/client.py
+++ b/src/elevenlabs/speech_to_text/transcripts/client.py
@@ -56,9 +56,7 @@ def get(
_response = self._raw_client.get(transcription_id, request_options=request_options)
return _response.data
- def delete(
- self, transcription_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ def delete(self, transcription_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
"""
Delete a previously generated transcript by its ID.
@@ -72,7 +70,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Delete completed successfully.
Examples
@@ -148,7 +146,7 @@ async def main() -> None:
async def delete(
self, transcription_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Delete a previously generated transcript by its ID.
@@ -162,7 +160,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Delete completed successfully.
Examples
diff --git a/src/elevenlabs/speech_to_text/transcripts/raw_client.py b/src/elevenlabs/speech_to_text/transcripts/raw_client.py
index a0e207c3..189f6e67 100644
--- a/src/elevenlabs/speech_to_text/transcripts/raw_client.py
+++ b/src/elevenlabs/speech_to_text/transcripts/raw_client.py
@@ -58,9 +58,9 @@ def get(
raise UnauthorizedError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -69,9 +69,9 @@ def get(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -94,7 +94,7 @@ def get(
def delete(
self, transcription_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Delete a previously generated transcript by its ID.
@@ -108,7 +108,7 @@ def delete(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Delete completed successfully.
"""
_response = self._client_wrapper.httpx_client.request(
@@ -121,9 +121,9 @@ def delete(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -132,9 +132,9 @@ def delete(
raise UnauthorizedError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -198,9 +198,9 @@ async def get(
raise UnauthorizedError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -209,9 +209,9 @@ async def get(
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -234,7 +234,7 @@ async def get(
async def delete(
self, transcription_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Delete a previously generated transcript by its ID.
@@ -248,7 +248,7 @@ async def delete(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Delete completed successfully.
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -261,9 +261,9 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -272,9 +272,9 @@ async def delete(
raise UnauthorizedError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_webhook_metadata.py b/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_webhook_metadata.py
index 9095fa48..a6c7a4e3 100644
--- a/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_webhook_metadata.py
+++ b/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_webhook_metadata.py
@@ -2,4 +2,4 @@
import typing
-SpeechToTextConvertRequestWebhookMetadata = typing.Union[str, typing.Dict[str, typing.Optional[typing.Any]]]
+SpeechToTextConvertRequestWebhookMetadata = typing.Union[str, typing.Dict[str, typing.Any]]
diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py
index 81714956..e513cb15 100644
--- a/src/elevenlabs/types/__init__.py
+++ b/src/elevenlabs/types/__init__.py
@@ -74,6 +74,8 @@
from .api_integration_webhook_overrides_output_request_headers_value import (
ApiIntegrationWebhookOverridesOutputRequestHeadersValue,
)
+ from .api_integration_webhook_tool_config_external_input import ApiIntegrationWebhookToolConfigExternalInput
+ from .api_integration_webhook_tool_config_external_output import ApiIntegrationWebhookToolConfigExternalOutput
from .api_integration_webhook_tool_config_input import ApiIntegrationWebhookToolConfigInput
from .api_integration_webhook_tool_config_output import ApiIntegrationWebhookToolConfigOutput
from .array_json_schema_property_input import ArrayJsonSchemaPropertyInput
@@ -669,6 +671,7 @@
from .conversation_config_override_config import ConversationConfigOverrideConfig
from .conversation_config_workflow_override import ConversationConfigWorkflowOverride
from .conversation_deletion_settings import ConversationDeletionSettings
+ from .conversation_feedback_type import ConversationFeedbackType
from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel
from .conversation_history_batch_call_model import ConversationHistoryBatchCallModel
from .conversation_history_eleven_assistant_common_model import ConversationHistoryElevenAssistantCommonModel
@@ -838,6 +841,7 @@
from .detailed_music_response import DetailedMusicResponse
from .dialogue_input import DialogueInput
from .dialogue_input_response_model import DialogueInputResponseModel
+ from .discount_resposne_model import DiscountResposneModel
from .do_dubbing_response import DoDubbingResponse
from .document_usage_mode_enum import DocumentUsageModeEnum
from .docx_export_options import DocxExportOptions
@@ -1137,10 +1141,16 @@
ProjectExtendedResponseModelApplyTextNormalization,
)
from .project_extended_response_model_aspect_ratio import ProjectExtendedResponseModelAspectRatio
+ from .project_extended_response_model_assets_item import (
+ ProjectExtendedResponseModelAssetsItem,
+ ProjectExtendedResponseModelAssetsItem_Audio,
+ ProjectExtendedResponseModelAssetsItem_Video,
+ )
from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction
from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset
from .project_extended_response_model_source_type import ProjectExtendedResponseModelSourceType
from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience
+ from .project_external_audio_response_model import ProjectExternalAudioResponseModel
from .project_response import ProjectResponse
from .project_response_model_access_level import ProjectResponseModelAccessLevel
from .project_response_model_aspect_ratio import ProjectResponseModelAspectRatio
@@ -1151,6 +1161,8 @@
from .project_snapshot_response import ProjectSnapshotResponse
from .project_snapshots_response import ProjectSnapshotsResponse
from .project_state import ProjectState
+ from .project_video_response_model import ProjectVideoResponseModel
+ from .project_video_thumbnail_sheet_response_model import ProjectVideoThumbnailSheetResponseModel
from .prompt_agent import PromptAgent
from .prompt_agent_api_model_input import PromptAgentApiModelInput
from .prompt_agent_api_model_input_backup_llm_config import (
@@ -1286,6 +1298,10 @@
from .sip_uri_transfer_destination import SipUriTransferDestination
from .skip_turn_tool_config import SkipTurnToolConfig
from .skip_turn_tool_response_model import SkipTurnToolResponseModel
+ from .soft_timeout_config import SoftTimeoutConfig
+ from .soft_timeout_config_override import SoftTimeoutConfigOverride
+ from .soft_timeout_config_override_config import SoftTimeoutConfigOverrideConfig
+ from .soft_timeout_config_workflow_override import SoftTimeoutConfigWorkflowOverride
from .song_metadata import SongMetadata
from .song_section import SongSection
from .sort_direction import SortDirection
@@ -1379,7 +1395,6 @@
ToolResponseModelToolConfig,
ToolResponseModelToolConfig_ApiIntegrationWebhook,
ToolResponseModelToolConfig_Client,
- ToolResponseModelToolConfig_Mcp,
ToolResponseModelToolConfig_System,
ToolResponseModelToolConfig_Webhook,
)
@@ -1406,6 +1421,8 @@
from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency
from .tts_output_format import TtsOutputFormat
from .turn_config import TurnConfig
+ from .turn_config_override import TurnConfigOverride
+ from .turn_config_override_config import TurnConfigOverrideConfig
from .turn_config_workflow_override import TurnConfigWorkflowOverride
from .turn_eagerness import TurnEagerness
from .turn_mode import TurnMode
@@ -1469,11 +1486,13 @@
from .webhook_auth_method_type import WebhookAuthMethodType
from .webhook_event_type import WebhookEventType
from .webhook_tool_api_schema_config_input import WebhookToolApiSchemaConfigInput
+ from .webhook_tool_api_schema_config_input_content_type import WebhookToolApiSchemaConfigInputContentType
from .webhook_tool_api_schema_config_input_method import WebhookToolApiSchemaConfigInputMethod
from .webhook_tool_api_schema_config_input_request_headers_value import (
WebhookToolApiSchemaConfigInputRequestHeadersValue,
)
from .webhook_tool_api_schema_config_output import WebhookToolApiSchemaConfigOutput
+ from .webhook_tool_api_schema_config_output_content_type import WebhookToolApiSchemaConfigOutputContentType
from .webhook_tool_api_schema_config_output_method import WebhookToolApiSchemaConfigOutputMethod
from .webhook_tool_api_schema_config_output_request_headers_value import (
WebhookToolApiSchemaConfigOutputRequestHeadersValue,
@@ -1504,6 +1523,8 @@
WidgetConfigResponseModelAvatar_Orb,
WidgetConfigResponseModelAvatar_Url,
)
+ from .widget_end_feedback_config import WidgetEndFeedbackConfig
+ from .widget_end_feedback_type import WidgetEndFeedbackType
from .widget_expandable import WidgetExpandable
from .widget_feedback_mode import WidgetFeedbackMode
from .widget_language_preset import WidgetLanguagePreset
@@ -1705,6 +1726,8 @@
"ApiIntegrationWebhookOverridesInputRequestHeadersValue": ".api_integration_webhook_overrides_input_request_headers_value",
"ApiIntegrationWebhookOverridesOutput": ".api_integration_webhook_overrides_output",
"ApiIntegrationWebhookOverridesOutputRequestHeadersValue": ".api_integration_webhook_overrides_output_request_headers_value",
+ "ApiIntegrationWebhookToolConfigExternalInput": ".api_integration_webhook_tool_config_external_input",
+ "ApiIntegrationWebhookToolConfigExternalOutput": ".api_integration_webhook_tool_config_external_output",
"ApiIntegrationWebhookToolConfigInput": ".api_integration_webhook_tool_config_input",
"ApiIntegrationWebhookToolConfigOutput": ".api_integration_webhook_tool_config_output",
"ArrayJsonSchemaPropertyInput": ".array_json_schema_property_input",
@@ -2232,6 +2255,7 @@
"ConversationConfigOverrideConfig": ".conversation_config_override_config",
"ConversationConfigWorkflowOverride": ".conversation_config_workflow_override",
"ConversationDeletionSettings": ".conversation_deletion_settings",
+ "ConversationFeedbackType": ".conversation_feedback_type",
"ConversationHistoryAnalysisCommonModel": ".conversation_history_analysis_common_model",
"ConversationHistoryBatchCallModel": ".conversation_history_batch_call_model",
"ConversationHistoryElevenAssistantCommonModel": ".conversation_history_eleven_assistant_common_model",
@@ -2349,6 +2373,7 @@
"DetailedMusicResponse": ".detailed_music_response",
"DialogueInput": ".dialogue_input",
"DialogueInputResponseModel": ".dialogue_input_response_model",
+ "DiscountResposneModel": ".discount_resposne_model",
"DoDubbingResponse": ".do_dubbing_response",
"DocumentUsageModeEnum": ".document_usage_mode_enum",
"DocxExportOptions": ".docx_export_options",
@@ -2608,10 +2633,14 @@
"ProjectExtendedResponseModelAccessLevel": ".project_extended_response_model_access_level",
"ProjectExtendedResponseModelApplyTextNormalization": ".project_extended_response_model_apply_text_normalization",
"ProjectExtendedResponseModelAspectRatio": ".project_extended_response_model_aspect_ratio",
+ "ProjectExtendedResponseModelAssetsItem": ".project_extended_response_model_assets_item",
+ "ProjectExtendedResponseModelAssetsItem_Audio": ".project_extended_response_model_assets_item",
+ "ProjectExtendedResponseModelAssetsItem_Video": ".project_extended_response_model_assets_item",
"ProjectExtendedResponseModelFiction": ".project_extended_response_model_fiction",
"ProjectExtendedResponseModelQualityPreset": ".project_extended_response_model_quality_preset",
"ProjectExtendedResponseModelSourceType": ".project_extended_response_model_source_type",
"ProjectExtendedResponseModelTargetAudience": ".project_extended_response_model_target_audience",
+ "ProjectExternalAudioResponseModel": ".project_external_audio_response_model",
"ProjectResponse": ".project_response",
"ProjectResponseModelAccessLevel": ".project_response_model_access_level",
"ProjectResponseModelAspectRatio": ".project_response_model_aspect_ratio",
@@ -2622,6 +2651,8 @@
"ProjectSnapshotResponse": ".project_snapshot_response",
"ProjectSnapshotsResponse": ".project_snapshots_response",
"ProjectState": ".project_state",
+ "ProjectVideoResponseModel": ".project_video_response_model",
+ "ProjectVideoThumbnailSheetResponseModel": ".project_video_thumbnail_sheet_response_model",
"PromptAgent": ".prompt_agent",
"PromptAgentApiModelInput": ".prompt_agent_api_model_input",
"PromptAgentApiModelInputBackupLlmConfig": ".prompt_agent_api_model_input_backup_llm_config",
@@ -2737,6 +2768,10 @@
"SipUriTransferDestination": ".sip_uri_transfer_destination",
"SkipTurnToolConfig": ".skip_turn_tool_config",
"SkipTurnToolResponseModel": ".skip_turn_tool_response_model",
+ "SoftTimeoutConfig": ".soft_timeout_config",
+ "SoftTimeoutConfigOverride": ".soft_timeout_config_override",
+ "SoftTimeoutConfigOverrideConfig": ".soft_timeout_config_override_config",
+ "SoftTimeoutConfigWorkflowOverride": ".soft_timeout_config_workflow_override",
"SongMetadata": ".song_metadata",
"SongSection": ".song_section",
"SortDirection": ".sort_direction",
@@ -2821,7 +2856,6 @@
"ToolResponseModelToolConfig": ".tool_response_model_tool_config",
"ToolResponseModelToolConfig_ApiIntegrationWebhook": ".tool_response_model_tool_config",
"ToolResponseModelToolConfig_Client": ".tool_response_model_tool_config",
- "ToolResponseModelToolConfig_Mcp": ".tool_response_model_tool_config",
"ToolResponseModelToolConfig_System": ".tool_response_model_tool_config",
"ToolResponseModelToolConfig_Webhook": ".tool_response_model_tool_config",
"ToolType": ".tool_type",
@@ -2847,6 +2881,8 @@
"TtsOptimizeStreamingLatency": ".tts_optimize_streaming_latency",
"TtsOutputFormat": ".tts_output_format",
"TurnConfig": ".turn_config",
+ "TurnConfigOverride": ".turn_config_override",
+ "TurnConfigOverrideConfig": ".turn_config_override_config",
"TurnConfigWorkflowOverride": ".turn_config_workflow_override",
"TurnEagerness": ".turn_eagerness",
"TurnMode": ".turn_mode",
@@ -2908,9 +2944,11 @@
"WebhookAuthMethodType": ".webhook_auth_method_type",
"WebhookEventType": ".webhook_event_type",
"WebhookToolApiSchemaConfigInput": ".webhook_tool_api_schema_config_input",
+ "WebhookToolApiSchemaConfigInputContentType": ".webhook_tool_api_schema_config_input_content_type",
"WebhookToolApiSchemaConfigInputMethod": ".webhook_tool_api_schema_config_input_method",
"WebhookToolApiSchemaConfigInputRequestHeadersValue": ".webhook_tool_api_schema_config_input_request_headers_value",
"WebhookToolApiSchemaConfigOutput": ".webhook_tool_api_schema_config_output",
+ "WebhookToolApiSchemaConfigOutputContentType": ".webhook_tool_api_schema_config_output_content_type",
"WebhookToolApiSchemaConfigOutputMethod": ".webhook_tool_api_schema_config_output_method",
"WebhookToolApiSchemaConfigOutputRequestHeadersValue": ".webhook_tool_api_schema_config_output_request_headers_value",
"WebhookToolConfigInput": ".webhook_tool_config_input",
@@ -2933,6 +2971,8 @@
"WidgetConfigResponseModelAvatar_Image": ".widget_config_response_model_avatar",
"WidgetConfigResponseModelAvatar_Orb": ".widget_config_response_model_avatar",
"WidgetConfigResponseModelAvatar_Url": ".widget_config_response_model_avatar",
+ "WidgetEndFeedbackConfig": ".widget_end_feedback_config",
+ "WidgetEndFeedbackType": ".widget_end_feedback_type",
"WidgetExpandable": ".widget_expandable",
"WidgetFeedbackMode": ".widget_feedback_mode",
"WidgetLanguagePreset": ".widget_language_preset",
@@ -3134,6 +3174,8 @@ def __dir__():
"ApiIntegrationWebhookOverridesInputRequestHeadersValue",
"ApiIntegrationWebhookOverridesOutput",
"ApiIntegrationWebhookOverridesOutputRequestHeadersValue",
+ "ApiIntegrationWebhookToolConfigExternalInput",
+ "ApiIntegrationWebhookToolConfigExternalOutput",
"ApiIntegrationWebhookToolConfigInput",
"ApiIntegrationWebhookToolConfigOutput",
"ArrayJsonSchemaPropertyInput",
@@ -3661,6 +3703,7 @@ def __dir__():
"ConversationConfigOverrideConfig",
"ConversationConfigWorkflowOverride",
"ConversationDeletionSettings",
+ "ConversationFeedbackType",
"ConversationHistoryAnalysisCommonModel",
"ConversationHistoryBatchCallModel",
"ConversationHistoryElevenAssistantCommonModel",
@@ -3778,6 +3821,7 @@ def __dir__():
"DetailedMusicResponse",
"DialogueInput",
"DialogueInputResponseModel",
+ "DiscountResposneModel",
"DoDubbingResponse",
"DocumentUsageModeEnum",
"DocxExportOptions",
@@ -4037,10 +4081,14 @@ def __dir__():
"ProjectExtendedResponseModelAccessLevel",
"ProjectExtendedResponseModelApplyTextNormalization",
"ProjectExtendedResponseModelAspectRatio",
+ "ProjectExtendedResponseModelAssetsItem",
+ "ProjectExtendedResponseModelAssetsItem_Audio",
+ "ProjectExtendedResponseModelAssetsItem_Video",
"ProjectExtendedResponseModelFiction",
"ProjectExtendedResponseModelQualityPreset",
"ProjectExtendedResponseModelSourceType",
"ProjectExtendedResponseModelTargetAudience",
+ "ProjectExternalAudioResponseModel",
"ProjectResponse",
"ProjectResponseModelAccessLevel",
"ProjectResponseModelAspectRatio",
@@ -4051,6 +4099,8 @@ def __dir__():
"ProjectSnapshotResponse",
"ProjectSnapshotsResponse",
"ProjectState",
+ "ProjectVideoResponseModel",
+ "ProjectVideoThumbnailSheetResponseModel",
"PromptAgent",
"PromptAgentApiModelInput",
"PromptAgentApiModelInputBackupLlmConfig",
@@ -4166,6 +4216,10 @@ def __dir__():
"SipUriTransferDestination",
"SkipTurnToolConfig",
"SkipTurnToolResponseModel",
+ "SoftTimeoutConfig",
+ "SoftTimeoutConfigOverride",
+ "SoftTimeoutConfigOverrideConfig",
+ "SoftTimeoutConfigWorkflowOverride",
"SongMetadata",
"SongSection",
"SortDirection",
@@ -4250,7 +4304,6 @@ def __dir__():
"ToolResponseModelToolConfig",
"ToolResponseModelToolConfig_ApiIntegrationWebhook",
"ToolResponseModelToolConfig_Client",
- "ToolResponseModelToolConfig_Mcp",
"ToolResponseModelToolConfig_System",
"ToolResponseModelToolConfig_Webhook",
"ToolType",
@@ -4276,6 +4329,8 @@ def __dir__():
"TtsOptimizeStreamingLatency",
"TtsOutputFormat",
"TurnConfig",
+ "TurnConfigOverride",
+ "TurnConfigOverrideConfig",
"TurnConfigWorkflowOverride",
"TurnEagerness",
"TurnMode",
@@ -4337,9 +4392,11 @@ def __dir__():
"WebhookAuthMethodType",
"WebhookEventType",
"WebhookToolApiSchemaConfigInput",
+ "WebhookToolApiSchemaConfigInputContentType",
"WebhookToolApiSchemaConfigInputMethod",
"WebhookToolApiSchemaConfigInputRequestHeadersValue",
"WebhookToolApiSchemaConfigOutput",
+ "WebhookToolApiSchemaConfigOutputContentType",
"WebhookToolApiSchemaConfigOutputMethod",
"WebhookToolApiSchemaConfigOutputRequestHeadersValue",
"WebhookToolConfigInput",
@@ -4362,6 +4419,8 @@ def __dir__():
"WidgetConfigResponseModelAvatar_Image",
"WidgetConfigResponseModelAvatar_Orb",
"WidgetConfigResponseModelAvatar_Url",
+ "WidgetEndFeedbackConfig",
+ "WidgetEndFeedbackType",
"WidgetExpandable",
"WidgetFeedbackMode",
"WidgetLanguagePreset",
diff --git a/src/elevenlabs/types/add_project_request.py b/src/elevenlabs/types/add_project_request.py
index 7cbbb349..31f40351 100644
--- a/src/elevenlabs/types/add_project_request.py
+++ b/src/elevenlabs/types/add_project_request.py
@@ -2,4 +2,4 @@
import typing
-AddProjectRequest = typing.Optional[typing.Any]
+AddProjectRequest = typing.Any
diff --git a/src/elevenlabs/types/add_sharing_voice_request.py b/src/elevenlabs/types/add_sharing_voice_request.py
index f6a04a45..1a815a22 100644
--- a/src/elevenlabs/types/add_sharing_voice_request.py
+++ b/src/elevenlabs/types/add_sharing_voice_request.py
@@ -2,4 +2,4 @@
import typing
-AddSharingVoiceRequest = typing.Optional[typing.Any]
+AddSharingVoiceRequest = typing.Any
diff --git a/src/elevenlabs/types/age.py b/src/elevenlabs/types/age.py
index 4064f860..080c4d91 100644
--- a/src/elevenlabs/types/age.py
+++ b/src/elevenlabs/types/age.py
@@ -2,4 +2,4 @@
import typing
-Age = typing.Optional[typing.Any]
+Age = typing.Any
diff --git a/src/elevenlabs/types/agent_metadata.py b/src/elevenlabs/types/agent_metadata.py
index 57dac2b8..a789a217 100644
--- a/src/elevenlabs/types/agent_metadata.py
+++ b/src/elevenlabs/types/agent_metadata.py
@@ -9,6 +9,7 @@
class AgentMetadata(UncheckedBaseModel):
agent_id: str
+ branch_id: typing.Optional[str] = None
workflow_node_id: typing.Optional[str] = None
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/api_integration_webhook_tool_config_external_input.py b/src/elevenlabs/types/api_integration_webhook_tool_config_external_input.py
new file mode 100644
index 00000000..54159293
--- /dev/null
+++ b/src/elevenlabs/types/api_integration_webhook_tool_config_external_input.py
@@ -0,0 +1,98 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .api_integration_webhook_overrides_input import ApiIntegrationWebhookOverridesInput
+from .dynamic_variable_assignment import DynamicVariableAssignment
+from .dynamic_variables_config import DynamicVariablesConfig
+from .tool_call_sound_behavior import ToolCallSoundBehavior
+from .tool_call_sound_type import ToolCallSoundType
+from .tool_execution_mode import ToolExecutionMode
+from .webhook_tool_api_schema_config_input import WebhookToolApiSchemaConfigInput
+
+
+class ApiIntegrationWebhookToolConfigExternalInput(UncheckedBaseModel):
+ """
+ When consumed by clients it convenient to include the base API schema even though
+ the stored tool config does not include it.
+ """
+
+ name: str
+ description: str = pydantic.Field()
+ """
+ Description of when the tool should be used and what it does.
+ """
+
+ response_timeout_secs: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum time in seconds to wait for the tool call to complete. Must be between 5 and 120 seconds (inclusive).
+ """
+
+ disable_interruptions: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ If true, the user will not be able to interrupt the agent while this tool is running.
+ """
+
+ force_pre_tool_speech: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ If true, the agent will speak before the tool call.
+ """
+
+ assignments: typing.Optional[typing.List[DynamicVariableAssignment]] = pydantic.Field(default=None)
+ """
+ Configuration for extracting values from tool responses and assigning them to dynamic variables
+ """
+
+ tool_call_sound: typing.Optional[ToolCallSoundType] = pydantic.Field(default=None)
+ """
+ Predefined tool call sound type to play during tool execution. If not specified, no tool call sound will be played.
+ """
+
+ tool_call_sound_behavior: typing.Optional[ToolCallSoundBehavior] = pydantic.Field(default=None)
+ """
+ Determines when the tool call sound should play. 'auto' only plays when there's pre-tool speech, 'always' plays for every tool call.
+ """
+
+ dynamic_variables: typing.Optional[DynamicVariablesConfig] = pydantic.Field(default=None)
+ """
+ Configuration for dynamic variables
+ """
+
+ execution_mode: typing.Optional[ToolExecutionMode] = pydantic.Field(default=None)
+ """
+ Determines when and how the tool executes: 'immediate' executes the tool right away when requested by the LLM, 'post_tool_speech' waits for the agent to finish speaking before executing, 'async' runs the tool in the background without blocking - best for long-running operations.
+ """
+
+ tool_version: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The version of the API integration tool
+ """
+
+ api_integration_id: str
+ api_integration_connection_id: str
+ api_schema_overrides: typing.Optional[ApiIntegrationWebhookOverridesInput] = pydantic.Field(default=None)
+ """
+ User overrides applied on top of the base api_schema
+ """
+
+ base_api_schema: WebhookToolApiSchemaConfigInput = pydantic.Field()
+ """
+ The base API schema from the integration definition
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ApiIntegrationWebhookToolConfigExternalInput)
diff --git a/src/elevenlabs/types/api_integration_webhook_tool_config_external_output.py b/src/elevenlabs/types/api_integration_webhook_tool_config_external_output.py
new file mode 100644
index 00000000..4a102cb3
--- /dev/null
+++ b/src/elevenlabs/types/api_integration_webhook_tool_config_external_output.py
@@ -0,0 +1,98 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .api_integration_webhook_overrides_output import ApiIntegrationWebhookOverridesOutput
+from .dynamic_variable_assignment import DynamicVariableAssignment
+from .dynamic_variables_config import DynamicVariablesConfig
+from .tool_call_sound_behavior import ToolCallSoundBehavior
+from .tool_call_sound_type import ToolCallSoundType
+from .tool_execution_mode import ToolExecutionMode
+from .webhook_tool_api_schema_config_output import WebhookToolApiSchemaConfigOutput
+
+
+class ApiIntegrationWebhookToolConfigExternalOutput(UncheckedBaseModel):
+ """
+ When consumed by clients it convenient to include the base API schema even though
+ the stored tool config does not include it.
+ """
+
+ name: str
+ description: str = pydantic.Field()
+ """
+ Description of when the tool should be used and what it does.
+ """
+
+ response_timeout_secs: int = pydantic.Field()
+ """
+ The maximum time in seconds to wait for the tool call to complete. Must be between 5 and 120 seconds (inclusive).
+ """
+
+ disable_interruptions: bool = pydantic.Field()
+ """
+ If true, the user will not be able to interrupt the agent while this tool is running.
+ """
+
+ force_pre_tool_speech: bool = pydantic.Field()
+ """
+ If true, the agent will speak before the tool call.
+ """
+
+ assignments: typing.List[DynamicVariableAssignment] = pydantic.Field()
+ """
+ Configuration for extracting values from tool responses and assigning them to dynamic variables
+ """
+
+ tool_call_sound: typing.Optional[ToolCallSoundType] = pydantic.Field(default=None)
+ """
+ Predefined tool call sound type to play during tool execution. If not specified, no tool call sound will be played.
+ """
+
+ tool_call_sound_behavior: ToolCallSoundBehavior = pydantic.Field()
+ """
+ Determines when the tool call sound should play. 'auto' only plays when there's pre-tool speech, 'always' plays for every tool call.
+ """
+
+ dynamic_variables: DynamicVariablesConfig = pydantic.Field()
+ """
+ Configuration for dynamic variables
+ """
+
+ execution_mode: ToolExecutionMode = pydantic.Field()
+ """
+ Determines when and how the tool executes: 'immediate' executes the tool right away when requested by the LLM, 'post_tool_speech' waits for the agent to finish speaking before executing, 'async' runs the tool in the background without blocking - best for long-running operations.
+ """
+
+ tool_version: str = pydantic.Field()
+ """
+ The version of the API integration tool
+ """
+
+ api_integration_id: str
+ api_integration_connection_id: str
+ api_schema_overrides: typing.Optional[ApiIntegrationWebhookOverridesOutput] = pydantic.Field(default=None)
+ """
+ User overrides applied on top of the base api_schema
+ """
+
+ base_api_schema: WebhookToolApiSchemaConfigOutput = pydantic.Field()
+ """
+ The base API schema from the integration definition
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ApiIntegrationWebhookToolConfigExternalOutput)
diff --git a/src/elevenlabs/types/api_integration_webhook_tool_config_output.py b/src/elevenlabs/types/api_integration_webhook_tool_config_output.py
index 723f829d..cb556239 100644
--- a/src/elevenlabs/types/api_integration_webhook_tool_config_output.py
+++ b/src/elevenlabs/types/api_integration_webhook_tool_config_output.py
@@ -22,22 +22,22 @@ class ApiIntegrationWebhookToolConfigOutput(UncheckedBaseModel):
Description of when the tool should be used and what it does.
"""
- response_timeout_secs: typing.Optional[int] = pydantic.Field(default=None)
+ response_timeout_secs: int = pydantic.Field()
"""
The maximum time in seconds to wait for the tool call to complete. Must be between 5 and 120 seconds (inclusive).
"""
- disable_interruptions: typing.Optional[bool] = pydantic.Field(default=None)
+ disable_interruptions: bool = pydantic.Field()
"""
If true, the user will not be able to interrupt the agent while this tool is running.
"""
- force_pre_tool_speech: typing.Optional[bool] = pydantic.Field(default=None)
+ force_pre_tool_speech: bool = pydantic.Field()
"""
If true, the agent will speak before the tool call.
"""
- assignments: typing.Optional[typing.List[DynamicVariableAssignment]] = pydantic.Field(default=None)
+ assignments: typing.List[DynamicVariableAssignment] = pydantic.Field()
"""
Configuration for extracting values from tool responses and assigning them to dynamic variables
"""
@@ -47,22 +47,22 @@ class ApiIntegrationWebhookToolConfigOutput(UncheckedBaseModel):
Predefined tool call sound type to play during tool execution. If not specified, no tool call sound will be played.
"""
- tool_call_sound_behavior: typing.Optional[ToolCallSoundBehavior] = pydantic.Field(default=None)
+ tool_call_sound_behavior: ToolCallSoundBehavior = pydantic.Field()
"""
Determines when the tool call sound should play. 'auto' only plays when there's pre-tool speech, 'always' plays for every tool call.
"""
- dynamic_variables: typing.Optional[DynamicVariablesConfig] = pydantic.Field(default=None)
+ dynamic_variables: DynamicVariablesConfig = pydantic.Field()
"""
Configuration for dynamic variables
"""
- execution_mode: typing.Optional[ToolExecutionMode] = pydantic.Field(default=None)
+ execution_mode: ToolExecutionMode = pydantic.Field()
"""
Determines when and how the tool executes: 'immediate' executes the tool right away when requested by the LLM, 'post_tool_speech' waits for the agent to finish speaking before executing, 'async' runs the tool in the background without blocking - best for long-running operations.
"""
- tool_version: typing.Optional[str] = pydantic.Field(default=None)
+ tool_version: str = pydantic.Field()
"""
The version of the API integration tool
"""
diff --git a/src/elevenlabs/types/array_json_schema_property_input.py b/src/elevenlabs/types/array_json_schema_property_input.py
index e3ed3e60..507a6b9c 100644
--- a/src/elevenlabs/types/array_json_schema_property_input.py
+++ b/src/elevenlabs/types/array_json_schema_property_input.py
@@ -24,6 +24,7 @@ class Config:
extra = pydantic.Extra.allow
+from .object_json_schema_property_input import ObjectJsonSchemaPropertyInput # noqa: E402, I001
from .array_json_schema_property_input_items import ArrayJsonSchemaPropertyInputItems # noqa: E402, I001
-update_forward_refs(ArrayJsonSchemaPropertyInput)
+update_forward_refs(ArrayJsonSchemaPropertyInput, ObjectJsonSchemaPropertyInput=ObjectJsonSchemaPropertyInput)
diff --git a/src/elevenlabs/types/array_json_schema_property_output.py b/src/elevenlabs/types/array_json_schema_property_output.py
index be0abc8e..b5a72592 100644
--- a/src/elevenlabs/types/array_json_schema_property_output.py
+++ b/src/elevenlabs/types/array_json_schema_property_output.py
@@ -24,6 +24,7 @@ class Config:
extra = pydantic.Extra.allow
+from .object_json_schema_property_output import ObjectJsonSchemaPropertyOutput # noqa: E402, I001
from .array_json_schema_property_output_items import ArrayJsonSchemaPropertyOutputItems # noqa: E402, I001
-update_forward_refs(ArrayJsonSchemaPropertyOutput)
+update_forward_refs(ArrayJsonSchemaPropertyOutput, ObjectJsonSchemaPropertyOutput=ObjectJsonSchemaPropertyOutput)
diff --git a/src/elevenlabs/types/character_usage_response.py b/src/elevenlabs/types/character_usage_response.py
index dbff3ef0..25d6dece 100644
--- a/src/elevenlabs/types/character_usage_response.py
+++ b/src/elevenlabs/types/character_usage_response.py
@@ -2,4 +2,4 @@
import typing
-CharacterUsageResponse = typing.Optional[typing.Any]
+CharacterUsageResponse = typing.Any
diff --git a/src/elevenlabs/types/conversation_config_client_override_config_input.py b/src/elevenlabs/types/conversation_config_client_override_config_input.py
index 00cbe2ba..cc947979 100644
--- a/src/elevenlabs/types/conversation_config_client_override_config_input.py
+++ b/src/elevenlabs/types/conversation_config_client_override_config_input.py
@@ -8,9 +8,15 @@
from .agent_config_override_config import AgentConfigOverrideConfig
from .conversation_config_override_config import ConversationConfigOverrideConfig
from .tts_conversational_config_override_config import TtsConversationalConfigOverrideConfig
+from .turn_config_override_config import TurnConfigOverrideConfig
class ConversationConfigClientOverrideConfigInput(UncheckedBaseModel):
+ turn: typing.Optional[TurnConfigOverrideConfig] = pydantic.Field(default=None)
+ """
+ Configures overrides for nested fields.
+ """
+
tts: typing.Optional[TtsConversationalConfigOverrideConfig] = pydantic.Field(default=None)
"""
Configures overrides for nested fields.
diff --git a/src/elevenlabs/types/conversation_config_client_override_config_output.py b/src/elevenlabs/types/conversation_config_client_override_config_output.py
index f6ff96af..88ec4fc4 100644
--- a/src/elevenlabs/types/conversation_config_client_override_config_output.py
+++ b/src/elevenlabs/types/conversation_config_client_override_config_output.py
@@ -8,9 +8,15 @@
from .agent_config_override_config import AgentConfigOverrideConfig
from .conversation_config_override_config import ConversationConfigOverrideConfig
from .tts_conversational_config_override_config import TtsConversationalConfigOverrideConfig
+from .turn_config_override_config import TurnConfigOverrideConfig
class ConversationConfigClientOverrideConfigOutput(UncheckedBaseModel):
+ turn: typing.Optional[TurnConfigOverrideConfig] = pydantic.Field(default=None)
+ """
+ Configures overrides for nested fields.
+ """
+
tts: typing.Optional[TtsConversationalConfigOverrideConfig] = pydantic.Field(default=None)
"""
Configures overrides for nested fields.
diff --git a/src/elevenlabs/types/conversation_config_client_override_input.py b/src/elevenlabs/types/conversation_config_client_override_input.py
index c8eaeeec..8a04d1e9 100644
--- a/src/elevenlabs/types/conversation_config_client_override_input.py
+++ b/src/elevenlabs/types/conversation_config_client_override_input.py
@@ -8,9 +8,15 @@
from .agent_config_override_input import AgentConfigOverrideInput
from .conversation_config_override import ConversationConfigOverride
from .tts_conversational_config_override import TtsConversationalConfigOverride
+from .turn_config_override import TurnConfigOverride
class ConversationConfigClientOverrideInput(UncheckedBaseModel):
+ turn: typing.Optional[TurnConfigOverride] = pydantic.Field(default=None)
+ """
+ Configuration for turn detection
+ """
+
tts: typing.Optional[TtsConversationalConfigOverride] = pydantic.Field(default=None)
"""
Configuration for conversational text to speech
diff --git a/src/elevenlabs/types/conversation_config_client_override_output.py b/src/elevenlabs/types/conversation_config_client_override_output.py
index ce918274..abaad9bc 100644
--- a/src/elevenlabs/types/conversation_config_client_override_output.py
+++ b/src/elevenlabs/types/conversation_config_client_override_output.py
@@ -8,9 +8,15 @@
from .agent_config_override_output import AgentConfigOverrideOutput
from .conversation_config_override import ConversationConfigOverride
from .tts_conversational_config_override import TtsConversationalConfigOverride
+from .turn_config_override import TurnConfigOverride
class ConversationConfigClientOverrideOutput(UncheckedBaseModel):
+ turn: typing.Optional[TurnConfigOverride] = pydantic.Field(default=None)
+ """
+ Configuration for turn detection
+ """
+
tts: typing.Optional[TtsConversationalConfigOverride] = pydantic.Field(default=None)
"""
Configuration for conversational text to speech
diff --git a/src/elevenlabs/types/conversation_feedback_type.py b/src/elevenlabs/types/conversation_feedback_type.py
new file mode 100644
index 00000000..dad58f57
--- /dev/null
+++ b/src/elevenlabs/types/conversation_feedback_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ConversationFeedbackType = typing.Union[typing.Literal["thumbs", "rating"], typing.Any]
diff --git a/src/elevenlabs/types/conversation_history_feedback_common_model.py b/src/elevenlabs/types/conversation_history_feedback_common_model.py
index f130bfdc..4b678082 100644
--- a/src/elevenlabs/types/conversation_history_feedback_common_model.py
+++ b/src/elevenlabs/types/conversation_history_feedback_common_model.py
@@ -5,13 +5,17 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
+from .conversation_feedback_type import ConversationFeedbackType
from .user_feedback_score import UserFeedbackScore
class ConversationHistoryFeedbackCommonModel(UncheckedBaseModel):
+ type: typing.Optional[ConversationFeedbackType] = None
overall_score: typing.Optional[UserFeedbackScore] = None
likes: typing.Optional[int] = None
dislikes: typing.Optional[int] = None
+ rating: typing.Optional[int] = None
+ comment: typing.Optional[str] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/conversation_history_transcript_other_tools_result_common_model_type.py b/src/elevenlabs/types/conversation_history_transcript_other_tools_result_common_model_type.py
index 36472fb8..75dd1be8 100644
--- a/src/elevenlabs/types/conversation_history_transcript_other_tools_result_common_model_type.py
+++ b/src/elevenlabs/types/conversation_history_transcript_other_tools_result_common_model_type.py
@@ -3,5 +3,5 @@
import typing
ConversationHistoryTranscriptOtherToolsResultCommonModelType = typing.Union[
- typing.Literal["client", "webhook", "mcp"], typing.Any
+ typing.Literal["client", "webhook", "mcp", "api_integration_webhook"], typing.Any
]
diff --git a/src/elevenlabs/types/conversation_initiation_client_data_internal.py b/src/elevenlabs/types/conversation_initiation_client_data_internal.py
index 393a090d..8b4bc04c 100644
--- a/src/elevenlabs/types/conversation_initiation_client_data_internal.py
+++ b/src/elevenlabs/types/conversation_initiation_client_data_internal.py
@@ -14,7 +14,7 @@
class ConversationInitiationClientDataInternal(UncheckedBaseModel):
conversation_config_override: typing.Optional[ConversationConfigClientOverrideOutput] = None
- custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
+ custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Any]] = None
user_id: typing.Optional[str] = pydantic.Field(default=None)
"""
ID of the end user participating in this conversation (for agent owner's user identification)
diff --git a/src/elevenlabs/types/conversation_initiation_client_data_request_input.py b/src/elevenlabs/types/conversation_initiation_client_data_request_input.py
index 7a4d5fef..156a9ebf 100644
--- a/src/elevenlabs/types/conversation_initiation_client_data_request_input.py
+++ b/src/elevenlabs/types/conversation_initiation_client_data_request_input.py
@@ -14,7 +14,7 @@
class ConversationInitiationClientDataRequestInput(UncheckedBaseModel):
conversation_config_override: typing.Optional[ConversationConfigClientOverrideInput] = None
- custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
+ custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Any]] = None
user_id: typing.Optional[str] = pydantic.Field(default=None)
"""
ID of the end user participating in this conversation (for agent owner's user identification)
diff --git a/src/elevenlabs/types/conversation_initiation_client_data_request_output.py b/src/elevenlabs/types/conversation_initiation_client_data_request_output.py
index 7795d780..ed6fc457 100644
--- a/src/elevenlabs/types/conversation_initiation_client_data_request_output.py
+++ b/src/elevenlabs/types/conversation_initiation_client_data_request_output.py
@@ -14,7 +14,7 @@
class ConversationInitiationClientDataRequestOutput(UncheckedBaseModel):
conversation_config_override: typing.Optional[ConversationConfigClientOverrideOutput] = None
- custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
+ custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Any]] = None
user_id: typing.Optional[str] = pydantic.Field(default=None)
"""
ID of the end user participating in this conversation (for agent owner's user identification)
diff --git a/src/elevenlabs/types/conversation_initiation_source.py b/src/elevenlabs/types/conversation_initiation_source.py
index d44015d8..b6afc603 100644
--- a/src/elevenlabs/types/conversation_initiation_source.py
+++ b/src/elevenlabs/types/conversation_initiation_source.py
@@ -17,6 +17,7 @@
"genesys",
"swift_sdk",
"whatsapp",
+ "flutter_sdk",
],
typing.Any,
]
diff --git a/src/elevenlabs/types/conversation_summary_response_model.py b/src/elevenlabs/types/conversation_summary_response_model.py
index ab3028a3..b4ea5f60 100644
--- a/src/elevenlabs/types/conversation_summary_response_model.py
+++ b/src/elevenlabs/types/conversation_summary_response_model.py
@@ -12,6 +12,7 @@
class ConversationSummaryResponseModel(UncheckedBaseModel):
agent_id: str
+ branch_id: typing.Optional[str] = None
agent_name: typing.Optional[str] = None
conversation_id: str
start_time_unix_secs: int
@@ -22,6 +23,7 @@ class ConversationSummaryResponseModel(UncheckedBaseModel):
transcript_summary: typing.Optional[str] = None
call_summary_title: typing.Optional[str] = None
direction: typing.Optional[ConversationSummaryResponseModelDirection] = None
+ rating: typing.Optional[float] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/create_audio_native_project_request.py b/src/elevenlabs/types/create_audio_native_project_request.py
index 8932716a..2f1f9f9e 100644
--- a/src/elevenlabs/types/create_audio_native_project_request.py
+++ b/src/elevenlabs/types/create_audio_native_project_request.py
@@ -2,4 +2,4 @@
import typing
-CreateAudioNativeProjectRequest = typing.Optional[typing.Any]
+CreateAudioNativeProjectRequest = typing.Any
diff --git a/src/elevenlabs/types/create_transcript_request.py b/src/elevenlabs/types/create_transcript_request.py
index e9e6bc95..9b7c4ae1 100644
--- a/src/elevenlabs/types/create_transcript_request.py
+++ b/src/elevenlabs/types/create_transcript_request.py
@@ -2,4 +2,4 @@
import typing
-CreateTranscriptRequest = typing.Optional[typing.Any]
+CreateTranscriptRequest = typing.Any
diff --git a/src/elevenlabs/types/data_collection_result_common_model.py b/src/elevenlabs/types/data_collection_result_common_model.py
index efcc25b5..50f3eea3 100644
--- a/src/elevenlabs/types/data_collection_result_common_model.py
+++ b/src/elevenlabs/types/data_collection_result_common_model.py
@@ -10,7 +10,7 @@
class DataCollectionResultCommonModel(UncheckedBaseModel):
data_collection_id: str
- value: typing.Optional[typing.Optional[typing.Any]] = None
+ value: typing.Optional[typing.Any] = None
json_schema: typing.Optional[LiteralJsonSchemaProperty] = None
rationale: str
diff --git a/src/elevenlabs/types/delete_chapter_request.py b/src/elevenlabs/types/delete_chapter_request.py
index 4ac8a50e..732fdfab 100644
--- a/src/elevenlabs/types/delete_chapter_request.py
+++ b/src/elevenlabs/types/delete_chapter_request.py
@@ -2,4 +2,4 @@
import typing
-DeleteChapterRequest = typing.Optional[typing.Any]
+DeleteChapterRequest = typing.Any
diff --git a/src/elevenlabs/types/delete_project_request.py b/src/elevenlabs/types/delete_project_request.py
index 66b9ef92..2fa7a30e 100644
--- a/src/elevenlabs/types/delete_project_request.py
+++ b/src/elevenlabs/types/delete_project_request.py
@@ -2,4 +2,4 @@
import typing
-DeleteProjectRequest = typing.Optional[typing.Any]
+DeleteProjectRequest = typing.Any
diff --git a/src/elevenlabs/types/discount_resposne_model.py b/src/elevenlabs/types/discount_resposne_model.py
new file mode 100644
index 00000000..8c43827e
--- /dev/null
+++ b/src/elevenlabs/types/discount_resposne_model.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
+
+class DiscountResposneModel(UncheckedBaseModel):
+ discount_percent_off: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ The discount applied to the invoice. E.g. [20.0f] for 20% off.
+ """
+
+ discount_amount_off: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ The discount applied to the invoice. E.g. [20.0f] for 20 cents off.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/edit_voice_settings_request.py b/src/elevenlabs/types/edit_voice_settings_request.py
index 53087110..a293b2df 100644
--- a/src/elevenlabs/types/edit_voice_settings_request.py
+++ b/src/elevenlabs/types/edit_voice_settings_request.py
@@ -2,4 +2,4 @@
import typing
-EditVoiceSettingsRequest = typing.Optional[typing.Any]
+EditVoiceSettingsRequest = typing.Any
diff --git a/src/elevenlabs/types/fine_tuning_response.py b/src/elevenlabs/types/fine_tuning_response.py
index a43c4d04..1020e0b7 100644
--- a/src/elevenlabs/types/fine_tuning_response.py
+++ b/src/elevenlabs/types/fine_tuning_response.py
@@ -81,7 +81,7 @@ class FineTuningResponse(UncheckedBaseModel):
The next maximum verification attempts reset time in Unix milliseconds.
"""
- finetuning_state: typing.Optional[typing.Optional[typing.Any]] = None
+ finetuning_state: typing.Optional[typing.Any] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/gender.py b/src/elevenlabs/types/gender.py
index d7a687b8..2da834a9 100644
--- a/src/elevenlabs/types/gender.py
+++ b/src/elevenlabs/types/gender.py
@@ -2,4 +2,4 @@
import typing
-Gender = typing.Optional[typing.Any]
+Gender = typing.Any
diff --git a/src/elevenlabs/types/get_agent_response_model.py b/src/elevenlabs/types/get_agent_response_model.py
index 46c1b68b..8e722c48 100644
--- a/src/elevenlabs/types/get_agent_response_model.py
+++ b/src/elevenlabs/types/get_agent_response_model.py
@@ -61,6 +61,16 @@ class GetAgentResponseModel(UncheckedBaseModel):
Agent tags used to categorize the agent
"""
+ version_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the version the agent is on
+ """
+
+ branch_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the branch the agent is on
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/elevenlabs/types/get_chapter_request.py b/src/elevenlabs/types/get_chapter_request.py
index 171f7651..5edd4811 100644
--- a/src/elevenlabs/types/get_chapter_request.py
+++ b/src/elevenlabs/types/get_chapter_request.py
@@ -2,4 +2,4 @@
import typing
-GetChapterRequest = typing.Optional[typing.Any]
+GetChapterRequest = typing.Any
diff --git a/src/elevenlabs/types/get_chapter_snapshots_request.py b/src/elevenlabs/types/get_chapter_snapshots_request.py
index 320aee57..2c5cbefe 100644
--- a/src/elevenlabs/types/get_chapter_snapshots_request.py
+++ b/src/elevenlabs/types/get_chapter_snapshots_request.py
@@ -2,4 +2,4 @@
import typing
-GetChapterSnapshotsRequest = typing.Optional[typing.Any]
+GetChapterSnapshotsRequest = typing.Any
diff --git a/src/elevenlabs/types/get_chapters_request.py b/src/elevenlabs/types/get_chapters_request.py
index bbf7b866..006e8188 100644
--- a/src/elevenlabs/types/get_chapters_request.py
+++ b/src/elevenlabs/types/get_chapters_request.py
@@ -2,4 +2,4 @@
import typing
-GetChaptersRequest = typing.Optional[typing.Any]
+GetChaptersRequest = typing.Any
diff --git a/src/elevenlabs/types/get_conversation_response_model.py b/src/elevenlabs/types/get_conversation_response_model.py
index 29e48163..93175661 100644
--- a/src/elevenlabs/types/get_conversation_response_model.py
+++ b/src/elevenlabs/types/get_conversation_response_model.py
@@ -19,6 +19,7 @@ class GetConversationResponseModel(UncheckedBaseModel):
conversation_id: str
status: GetConversationResponseModelStatus
user_id: typing.Optional[str] = None
+ branch_id: typing.Optional[str] = None
transcript: typing.List[ConversationHistoryTranscriptCommonModelOutput]
metadata: ConversationHistoryMetadataCommonModel
analysis: typing.Optional[ConversationHistoryAnalysisCommonModel] = None
diff --git a/src/elevenlabs/types/get_phone_number_response.py b/src/elevenlabs/types/get_phone_number_response.py
index dbfa4178..da642dcb 100644
--- a/src/elevenlabs/types/get_phone_number_response.py
+++ b/src/elevenlabs/types/get_phone_number_response.py
@@ -2,4 +2,4 @@
import typing
-GetPhoneNumberResponse = typing.Optional[typing.Any]
+GetPhoneNumberResponse = typing.Any
diff --git a/src/elevenlabs/types/get_project_request.py b/src/elevenlabs/types/get_project_request.py
index f6298923..ea6d71b0 100644
--- a/src/elevenlabs/types/get_project_request.py
+++ b/src/elevenlabs/types/get_project_request.py
@@ -2,4 +2,4 @@
import typing
-GetProjectRequest = typing.Optional[typing.Any]
+GetProjectRequest = typing.Any
diff --git a/src/elevenlabs/types/get_projects_request.py b/src/elevenlabs/types/get_projects_request.py
index 7a0b5935..3d4ff95d 100644
--- a/src/elevenlabs/types/get_projects_request.py
+++ b/src/elevenlabs/types/get_projects_request.py
@@ -2,4 +2,4 @@
import typing
-GetProjectsRequest = typing.Optional[typing.Any]
+GetProjectsRequest = typing.Any
diff --git a/src/elevenlabs/types/get_pronunciation_dictionaries_response.py b/src/elevenlabs/types/get_pronunciation_dictionaries_response.py
index b89626c4..8879dcd7 100644
--- a/src/elevenlabs/types/get_pronunciation_dictionaries_response.py
+++ b/src/elevenlabs/types/get_pronunciation_dictionaries_response.py
@@ -2,4 +2,4 @@
import typing
-GetPronunciationDictionariesResponse = typing.Optional[typing.Any]
+GetPronunciationDictionariesResponse = typing.Any
diff --git a/src/elevenlabs/types/get_pronunciation_dictionary_response.py b/src/elevenlabs/types/get_pronunciation_dictionary_response.py
index 1fb041ca..06dd7eab 100644
--- a/src/elevenlabs/types/get_pronunciation_dictionary_response.py
+++ b/src/elevenlabs/types/get_pronunciation_dictionary_response.py
@@ -2,4 +2,4 @@
import typing
-GetPronunciationDictionaryResponse = typing.Optional[typing.Any]
+GetPronunciationDictionaryResponse = typing.Any
diff --git a/src/elevenlabs/types/get_test_suite_invocation_response_model.py b/src/elevenlabs/types/get_test_suite_invocation_response_model.py
index e9d2d6a4..eca5125f 100644
--- a/src/elevenlabs/types/get_test_suite_invocation_response_model.py
+++ b/src/elevenlabs/types/get_test_suite_invocation_response_model.py
@@ -13,6 +13,7 @@
class GetTestSuiteInvocationResponseModel(UncheckedBaseModel):
id: str
agent_id: typing.Optional[str] = None
+ branch_id: typing.Optional[str] = None
created_at: typing.Optional[int] = None
test_runs: typing.List[UnitTestRunResponseModel]
diff --git a/src/elevenlabs/types/history_item_response.py b/src/elevenlabs/types/history_item_response.py
index 6e6bbb81..2e242648 100644
--- a/src/elevenlabs/types/history_item_response.py
+++ b/src/elevenlabs/types/history_item_response.py
@@ -8,8 +8,8 @@
class HistoryItemResponse(UncheckedBaseModel):
- state: typing.Optional[typing.Optional[typing.Any]] = None
- voice_category: typing.Optional[typing.Optional[typing.Any]] = None
+ state: typing.Optional[typing.Any] = None
+ voice_category: typing.Optional[typing.Any] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/invoice_response.py b/src/elevenlabs/types/invoice_response.py
index 0e9f27f3..6065e880 100644
--- a/src/elevenlabs/types/invoice_response.py
+++ b/src/elevenlabs/types/invoice_response.py
@@ -5,6 +5,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
+from .discount_resposne_model import DiscountResposneModel
from .invoice_response_model_payment_intent_status import InvoiceResponseModelPaymentIntentStatus
@@ -26,12 +27,17 @@ class InvoiceResponse(UncheckedBaseModel):
discount_percent_off: typing.Optional[float] = pydantic.Field(default=None)
"""
- The discount applied to the invoice. E.g. [20.0f] for 20% off.
+ Deprecated. Use [discounts] instead. The discount applied to the invoice. E.g. [20.0f] for 20% off.
"""
discount_amount_off: typing.Optional[float] = pydantic.Field(default=None)
"""
- The discount applied to the invoice. E.g. [20.0f] for 20% off.
+ Deprecated. Use [discounts] instead. The discount applied to the invoice. E.g. [20.0f] for 20 cents off.
+ """
+
+ discounts: typing.List[DiscountResposneModel] = pydantic.Field()
+ """
+ The discounts applied to the invoice.
"""
next_payment_attempt_unix: int = pydantic.Field()
diff --git a/src/elevenlabs/types/language_preset_input.py b/src/elevenlabs/types/language_preset_input.py
index 9559254d..11239cc0 100644
--- a/src/elevenlabs/types/language_preset_input.py
+++ b/src/elevenlabs/types/language_preset_input.py
@@ -20,6 +20,11 @@ class LanguagePresetInput(UncheckedBaseModel):
The translation of the first message
"""
+ soft_timeout_translation: typing.Optional[LanguagePresetTranslation] = pydantic.Field(default=None)
+ """
+ The translation of the soft timeout message
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/elevenlabs/types/language_preset_output.py b/src/elevenlabs/types/language_preset_output.py
index 86ce3934..231ec5fb 100644
--- a/src/elevenlabs/types/language_preset_output.py
+++ b/src/elevenlabs/types/language_preset_output.py
@@ -20,6 +20,11 @@ class LanguagePresetOutput(UncheckedBaseModel):
The translation of the first message
"""
+ soft_timeout_translation: typing.Optional[LanguagePresetTranslation] = pydantic.Field(default=None)
+ """
+ The translation of the soft timeout message
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/elevenlabs/types/object_json_schema_property_input.py b/src/elevenlabs/types/object_json_schema_property_input.py
index fcf8fea5..c9785454 100644
--- a/src/elevenlabs/types/object_json_schema_property_input.py
+++ b/src/elevenlabs/types/object_json_schema_property_input.py
@@ -24,6 +24,7 @@ class Config:
smart_union = True
extra = pydantic.Extra.allow
+
from .array_json_schema_property_input import ArrayJsonSchemaPropertyInput # noqa: E402, I001
from .object_json_schema_property_input_properties_value import ObjectJsonSchemaPropertyInputPropertiesValue # noqa: E402, I001
diff --git a/src/elevenlabs/types/object_json_schema_property_output.py b/src/elevenlabs/types/object_json_schema_property_output.py
index 53eccae7..4c5fb590 100644
--- a/src/elevenlabs/types/object_json_schema_property_output.py
+++ b/src/elevenlabs/types/object_json_schema_property_output.py
@@ -25,7 +25,6 @@ class Config:
extra = pydantic.Extra.allow
-
from .array_json_schema_property_output import ArrayJsonSchemaPropertyOutput # noqa: E402, I001
from .object_json_schema_property_output_properties_value import ObjectJsonSchemaPropertyOutputPropertiesValue # noqa: E402, I001
diff --git a/src/elevenlabs/types/object_override_input.py b/src/elevenlabs/types/object_override_input.py
index 7ea9ebdd..3af58f0d 100644
--- a/src/elevenlabs/types/object_override_input.py
+++ b/src/elevenlabs/types/object_override_input.py
@@ -5,7 +5,7 @@
import typing
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
@@ -25,5 +25,3 @@ class Config:
from .object_override_input_properties_value import ObjectOverrideInputPropertiesValue # noqa: E402, I001
-
-update_forward_refs(ObjectOverrideInput)
diff --git a/src/elevenlabs/types/object_override_output.py b/src/elevenlabs/types/object_override_output.py
index 9e7d1cd6..c832ac66 100644
--- a/src/elevenlabs/types/object_override_output.py
+++ b/src/elevenlabs/types/object_override_output.py
@@ -5,7 +5,7 @@
import typing
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
@@ -25,5 +25,3 @@ class Config:
from .object_override_output_properties_value import ObjectOverrideOutputPropertiesValue # noqa: E402, I001
-
-update_forward_refs(ObjectOverrideOutput)
diff --git a/src/elevenlabs/types/project_extended_response.py b/src/elevenlabs/types/project_extended_response.py
index 7dc8d8d5..7d303b14 100644
--- a/src/elevenlabs/types/project_extended_response.py
+++ b/src/elevenlabs/types/project_extended_response.py
@@ -11,6 +11,7 @@
from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel
from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization
from .project_extended_response_model_aspect_ratio import ProjectExtendedResponseModelAspectRatio
+from .project_extended_response_model_assets_item import ProjectExtendedResponseModelAssetsItem
from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction
from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset
from .project_extended_response_model_source_type import ProjectExtendedResponseModelSourceType
@@ -211,11 +212,16 @@ class ProjectExtendedResponse(UncheckedBaseModel):
Whether text normalization is applied to the project.
"""
- experimental: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ experimental: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
"""
Experimental features for the project.
"""
+ assets: typing.List[ProjectExtendedResponseModelAssetsItem] = pydantic.Field()
+ """
+ List of uploaded assets e.g. videos, audios.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/elevenlabs/types/project_extended_response_model_assets_item.py b/src/elevenlabs/types/project_extended_response_model_assets_item.py
new file mode 100644
index 00000000..06832b7b
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_assets_item.py
@@ -0,0 +1,92 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel, UnionMetadata
+from .project_video_thumbnail_sheet_response_model import ProjectVideoThumbnailSheetResponseModel
+
+
+class ProjectExtendedResponseModelAssetsItem_Video(UncheckedBaseModel):
+ type: typing.Literal["video"] = "video"
+ video_id: str
+ filename: str
+ signed_url: str
+ signed_preview_url: typing.Optional[str] = None
+ offset_ms: int
+ duration_ms: int
+ volume_gain_db: float
+ muted: bool
+ width: int
+ height: int
+ codec: str
+ order: str
+ preview_job_progress: float
+ created_at_ms: int
+ updated_at_ms: int
+ error: typing.Optional[str] = None
+ thumbnail_interval_seconds: float
+ thumbnail_size: typing.List[int]
+ thumbnail_sheets: typing.List[ProjectVideoThumbnailSheetResponseModel]
+ start_time_ms: int
+ end_time_ms: int
+ asset_preview_signed_url: typing.Optional[str] = None
+ source_video_id: typing.Optional[str] = None
+ source_asset_id: typing.Optional[str] = None
+ pending_block_ids: typing.List[str]
+ import_speech_progress: typing.Optional[float] = None
+ speech_imported: typing.Optional[bool] = None
+ current_snapshot_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class ProjectExtendedResponseModelAssetsItem_Audio(UncheckedBaseModel):
+ type: typing.Literal["audio"] = "audio"
+ external_audio_id: str
+ filename: str
+ signed_url: str
+ offset_ms: int
+ duration_ms: int
+ start_time_ms: int
+ end_time_ms: int
+ order: str
+ track_id: str
+ created_at_ms: int
+ updated_at_ms: int
+ volume_gain_db: typing.Optional[float] = None
+ muted: typing.Optional[bool] = None
+ fade_in_ms: typing.Optional[int] = None
+ fade_out_ms: typing.Optional[int] = None
+ source_external_audio_id: typing.Optional[str] = None
+ source_asset_id: typing.Optional[str] = None
+ pending_block_ids: typing.List[str]
+ import_speech_progress: typing.Optional[float] = None
+ speech_imported: typing.Optional[bool] = None
+ current_snapshot_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+ProjectExtendedResponseModelAssetsItem = typing_extensions.Annotated[
+ typing.Union[ProjectExtendedResponseModelAssetsItem_Video, ProjectExtendedResponseModelAssetsItem_Audio],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/types/project_external_audio_response_model.py b/src/elevenlabs/types/project_external_audio_response_model.py
new file mode 100644
index 00000000..437ca4f4
--- /dev/null
+++ b/src/elevenlabs/types/project_external_audio_response_model.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
+
+class ProjectExternalAudioResponseModel(UncheckedBaseModel):
+ external_audio_id: str
+ filename: str
+ signed_url: str
+ offset_ms: int
+ duration_ms: int
+ start_time_ms: int
+ end_time_ms: int
+ order: str
+ track_id: str
+ created_at_ms: int
+ updated_at_ms: int
+ volume_gain_db: typing.Optional[float] = None
+ muted: typing.Optional[bool] = None
+ fade_in_ms: typing.Optional[int] = None
+ fade_out_ms: typing.Optional[int] = None
+ source_external_audio_id: typing.Optional[str] = None
+ source_asset_id: typing.Optional[str] = None
+ pending_block_ids: typing.List[str]
+ import_speech_progress: typing.Optional[float] = None
+ speech_imported: typing.Optional[bool] = None
+ current_snapshot_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/project_snapshot_extended_response_model.py b/src/elevenlabs/types/project_snapshot_extended_response_model.py
index 7e71977c..1b9e4844 100644
--- a/src/elevenlabs/types/project_snapshot_extended_response_model.py
+++ b/src/elevenlabs/types/project_snapshot_extended_response_model.py
@@ -29,17 +29,21 @@ class ProjectSnapshotExtendedResponseModel(UncheckedBaseModel):
The name of the project snapshot.
"""
- audio_upload: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ audio_upload: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
"""
(Deprecated)
"""
- zip_upload: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ zip_upload: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
"""
(Deprecated)
"""
character_alignments: typing.List[CharacterAlignmentModel]
+ audio_duration_secs: float = pydantic.Field()
+ """
+ The total duration of the audio in seconds.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/project_snapshot_response.py b/src/elevenlabs/types/project_snapshot_response.py
index da74a85e..0602a1f9 100644
--- a/src/elevenlabs/types/project_snapshot_response.py
+++ b/src/elevenlabs/types/project_snapshot_response.py
@@ -28,12 +28,12 @@ class ProjectSnapshotResponse(UncheckedBaseModel):
The name of the project snapshot.
"""
- audio_upload: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ audio_upload: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
"""
(Deprecated)
"""
- zip_upload: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ zip_upload: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
"""
(Deprecated)
"""
diff --git a/src/elevenlabs/types/project_video_response_model.py b/src/elevenlabs/types/project_video_response_model.py
new file mode 100644
index 00000000..90a72b72
--- /dev/null
+++ b/src/elevenlabs/types/project_video_response_model.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .project_video_thumbnail_sheet_response_model import ProjectVideoThumbnailSheetResponseModel
+
+
+class ProjectVideoResponseModel(UncheckedBaseModel):
+ video_id: str
+ filename: str
+ signed_url: str
+ signed_preview_url: typing.Optional[str] = None
+ offset_ms: int
+ duration_ms: int
+ volume_gain_db: float
+ muted: bool
+ width: int
+ height: int
+ codec: str
+ order: str
+ preview_job_progress: float
+ created_at_ms: int
+ updated_at_ms: int
+ error: typing.Optional[str] = None
+ thumbnail_interval_seconds: float
+ thumbnail_size: typing.List[int]
+ thumbnail_sheets: typing.List[ProjectVideoThumbnailSheetResponseModel]
+ start_time_ms: int
+ end_time_ms: int
+ asset_preview_signed_url: typing.Optional[str] = None
+ source_video_id: typing.Optional[str] = None
+ source_asset_id: typing.Optional[str] = None
+ pending_block_ids: typing.List[str]
+ import_speech_progress: typing.Optional[float] = None
+ speech_imported: typing.Optional[bool] = None
+ current_snapshot_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/project_video_thumbnail_sheet_response_model.py b/src/elevenlabs/types/project_video_thumbnail_sheet_response_model.py
new file mode 100644
index 00000000..63238cdc
--- /dev/null
+++ b/src/elevenlabs/types/project_video_thumbnail_sheet_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
+
+class ProjectVideoThumbnailSheetResponseModel(UncheckedBaseModel):
+ start_thumbnail_index: int
+ thumbnail_count: int
+ signed_cloud_url: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/prompt_agent.py b/src/elevenlabs/types/prompt_agent.py
index 6d3b60eb..85b35a95 100644
--- a/src/elevenlabs/types/prompt_agent.py
+++ b/src/elevenlabs/types/prompt_agent.py
@@ -2,4 +2,4 @@
import typing
-PromptAgent = typing.Optional[typing.Any]
+PromptAgent = typing.Any
diff --git a/src/elevenlabs/types/prompt_agent_api_model_input_tools_item.py b/src/elevenlabs/types/prompt_agent_api_model_input_tools_item.py
index 7f9cea6e..970d7a9b 100644
--- a/src/elevenlabs/types/prompt_agent_api_model_input_tools_item.py
+++ b/src/elevenlabs/types/prompt_agent_api_model_input_tools_item.py
@@ -79,7 +79,7 @@ class Config:
class PromptAgentApiModelInputToolsItem_Mcp(UncheckedBaseModel):
- value: typing.Optional[typing.Any] = None
+ value: typing.Any
type: typing.Literal["mcp"] = "mcp"
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/prompt_agent_api_model_output_tools_item.py b/src/elevenlabs/types/prompt_agent_api_model_output_tools_item.py
index d7f25380..3c952873 100644
--- a/src/elevenlabs/types/prompt_agent_api_model_output_tools_item.py
+++ b/src/elevenlabs/types/prompt_agent_api_model_output_tools_item.py
@@ -26,15 +26,15 @@ class PromptAgentApiModelOutputToolsItem_ApiIntegrationWebhook(UncheckedBaseMode
type: typing.Literal["api_integration_webhook"] = "api_integration_webhook"
name: str
description: str
- response_timeout_secs: typing.Optional[int] = None
- disable_interruptions: typing.Optional[bool] = None
- force_pre_tool_speech: typing.Optional[bool] = None
- assignments: typing.Optional[typing.List[DynamicVariableAssignment]] = None
+ response_timeout_secs: int
+ disable_interruptions: bool
+ force_pre_tool_speech: bool
+ assignments: typing.List[DynamicVariableAssignment]
tool_call_sound: typing.Optional[ToolCallSoundType] = None
- tool_call_sound_behavior: typing.Optional[ToolCallSoundBehavior] = None
- dynamic_variables: typing.Optional[DynamicVariablesConfig] = None
- execution_mode: typing.Optional[ToolExecutionMode] = None
- tool_version: typing.Optional[str] = None
+ tool_call_sound_behavior: ToolCallSoundBehavior
+ dynamic_variables: DynamicVariablesConfig
+ execution_mode: ToolExecutionMode
+ tool_version: str
api_integration_id: str
api_integration_connection_id: str
api_schema_overrides: typing.Optional[ApiIntegrationWebhookOverridesOutput] = None
@@ -79,7 +79,7 @@ class Config:
class PromptAgentApiModelOutputToolsItem_Mcp(UncheckedBaseModel):
- value: typing.Optional[typing.Any] = None
+ value: typing.Any
type: typing.Literal["mcp"] = "mcp"
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/prompt_agent_api_model_workflow_override_input_tools_item.py b/src/elevenlabs/types/prompt_agent_api_model_workflow_override_input_tools_item.py
index c07b8a75..a43e177e 100644
--- a/src/elevenlabs/types/prompt_agent_api_model_workflow_override_input_tools_item.py
+++ b/src/elevenlabs/types/prompt_agent_api_model_workflow_override_input_tools_item.py
@@ -79,7 +79,7 @@ class Config:
class PromptAgentApiModelWorkflowOverrideInputToolsItem_Mcp(UncheckedBaseModel):
- value: typing.Optional[typing.Any] = None
+ value: typing.Any
type: typing.Literal["mcp"] = "mcp"
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/prompt_agent_api_model_workflow_override_output_tools_item.py b/src/elevenlabs/types/prompt_agent_api_model_workflow_override_output_tools_item.py
index e2cd45d7..f051e910 100644
--- a/src/elevenlabs/types/prompt_agent_api_model_workflow_override_output_tools_item.py
+++ b/src/elevenlabs/types/prompt_agent_api_model_workflow_override_output_tools_item.py
@@ -26,15 +26,15 @@ class PromptAgentApiModelWorkflowOverrideOutputToolsItem_ApiIntegrationWebhook(U
type: typing.Literal["api_integration_webhook"] = "api_integration_webhook"
name: str
description: str
- response_timeout_secs: typing.Optional[int] = None
- disable_interruptions: typing.Optional[bool] = None
- force_pre_tool_speech: typing.Optional[bool] = None
- assignments: typing.Optional[typing.List[DynamicVariableAssignment]] = None
+ response_timeout_secs: int
+ disable_interruptions: bool
+ force_pre_tool_speech: bool
+ assignments: typing.List[DynamicVariableAssignment]
tool_call_sound: typing.Optional[ToolCallSoundType] = None
- tool_call_sound_behavior: typing.Optional[ToolCallSoundBehavior] = None
- dynamic_variables: typing.Optional[DynamicVariablesConfig] = None
- execution_mode: typing.Optional[ToolExecutionMode] = None
- tool_version: typing.Optional[str] = None
+ tool_call_sound_behavior: ToolCallSoundBehavior
+ dynamic_variables: DynamicVariablesConfig
+ execution_mode: ToolExecutionMode
+ tool_version: str
api_integration_id: str
api_integration_connection_id: str
api_schema_overrides: typing.Optional[ApiIntegrationWebhookOverridesOutput] = None
@@ -79,7 +79,7 @@ class Config:
class PromptAgentApiModelWorkflowOverrideOutputToolsItem_Mcp(UncheckedBaseModel):
- value: typing.Optional[typing.Any] = None
+ value: typing.Any
type: typing.Literal["mcp"] = "mcp"
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/prompt_agent_db_model.py b/src/elevenlabs/types/prompt_agent_db_model.py
index ba6ae76d..00fdabdf 100644
--- a/src/elevenlabs/types/prompt_agent_db_model.py
+++ b/src/elevenlabs/types/prompt_agent_db_model.py
@@ -8,7 +8,7 @@
class PromptAgentDbModel(UncheckedBaseModel):
- tools: typing.Optional[typing.Optional[typing.Any]] = None
+ tools: typing.Optional[typing.Any] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/remove_member_from_group_request.py b/src/elevenlabs/types/remove_member_from_group_request.py
index 4d73447d..bbc1fd8c 100644
--- a/src/elevenlabs/types/remove_member_from_group_request.py
+++ b/src/elevenlabs/types/remove_member_from_group_request.py
@@ -2,4 +2,4 @@
import typing
-RemoveMemberFromGroupRequest = typing.Optional[typing.Any]
+RemoveMemberFromGroupRequest = typing.Any
diff --git a/src/elevenlabs/types/save_voice_preview_request.py b/src/elevenlabs/types/save_voice_preview_request.py
index a843ff6c..0603e2af 100644
--- a/src/elevenlabs/types/save_voice_preview_request.py
+++ b/src/elevenlabs/types/save_voice_preview_request.py
@@ -2,4 +2,4 @@
import typing
-SaveVoicePreviewRequest = typing.Optional[typing.Any]
+SaveVoicePreviewRequest = typing.Any
diff --git a/src/elevenlabs/types/soft_timeout_config.py b/src/elevenlabs/types/soft_timeout_config.py
new file mode 100644
index 00000000..9bdbff7f
--- /dev/null
+++ b/src/elevenlabs/types/soft_timeout_config.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
+
+class SoftTimeoutConfig(UncheckedBaseModel):
+ """
+ Configuration for soft timeout functionality during LLM response generation.
+ """
+
+ timeout_seconds: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Time in seconds before showing the predefined message while waiting for LLM response. Set to -1 to disable.
+ """
+
+ message: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Message to show when soft timeout is reached while waiting for LLM response
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/soft_timeout_config_override.py b/src/elevenlabs/types/soft_timeout_config_override.py
new file mode 100644
index 00000000..24f9f1eb
--- /dev/null
+++ b/src/elevenlabs/types/soft_timeout_config_override.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
+
+class SoftTimeoutConfigOverride(UncheckedBaseModel):
+ message: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Message to show when soft timeout is reached while waiting for LLM response
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/soft_timeout_config_override_config.py b/src/elevenlabs/types/soft_timeout_config_override_config.py
new file mode 100644
index 00000000..e48931f1
--- /dev/null
+++ b/src/elevenlabs/types/soft_timeout_config_override_config.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
+
+class SoftTimeoutConfigOverrideConfig(UncheckedBaseModel):
+ message: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether to allow overriding the message field.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/soft_timeout_config_workflow_override.py b/src/elevenlabs/types/soft_timeout_config_workflow_override.py
new file mode 100644
index 00000000..1c479b6a
--- /dev/null
+++ b/src/elevenlabs/types/soft_timeout_config_workflow_override.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
+
+class SoftTimeoutConfigWorkflowOverride(UncheckedBaseModel):
+ timeout_seconds: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Time in seconds before showing the predefined message while waiting for LLM response. Set to -1 to disable.
+ """
+
+ message: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Message to show when soft timeout is reached while waiting for LLM response
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/speech_history_item_response.py b/src/elevenlabs/types/speech_history_item_response.py
index 3b1a2989..89fe603f 100644
--- a/src/elevenlabs/types/speech_history_item_response.py
+++ b/src/elevenlabs/types/speech_history_item_response.py
@@ -68,8 +68,8 @@ class SpeechHistoryItemResponse(UncheckedBaseModel):
The content type of the generated item.
"""
- state: typing.Optional[typing.Any] = None
- settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ state: typing.Any
+ settings: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
"""
The settings of the history item.
"""
diff --git a/src/elevenlabs/types/test_invocation_summary_response_model.py b/src/elevenlabs/types/test_invocation_summary_response_model.py
index 2afc25ed..aad50951 100644
--- a/src/elevenlabs/types/test_invocation_summary_response_model.py
+++ b/src/elevenlabs/types/test_invocation_summary_response_model.py
@@ -14,6 +14,16 @@ class TestInvocationSummaryResponseModel(UncheckedBaseModel):
The ID of the test invocation
"""
+ agent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the agent this test invocation belongs to
+ """
+
+ branch_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the branch this test invocation was run on
+ """
+
created_at_unix_secs: int = pydantic.Field()
"""
Creation time of the test invocation in unix seconds
diff --git a/src/elevenlabs/types/text_to_speech_request.py b/src/elevenlabs/types/text_to_speech_request.py
index c357bce4..823ca827 100644
--- a/src/elevenlabs/types/text_to_speech_request.py
+++ b/src/elevenlabs/types/text_to_speech_request.py
@@ -2,4 +2,4 @@
import typing
-TextToSpeechRequest = typing.Optional[typing.Any]
+TextToSpeechRequest = typing.Any
diff --git a/src/elevenlabs/types/text_to_speech_stream_request.py b/src/elevenlabs/types/text_to_speech_stream_request.py
index abf27475..f3ba4782 100644
--- a/src/elevenlabs/types/text_to_speech_stream_request.py
+++ b/src/elevenlabs/types/text_to_speech_stream_request.py
@@ -2,4 +2,4 @@
import typing
-TextToSpeechStreamRequest = typing.Optional[typing.Any]
+TextToSpeechStreamRequest = typing.Any
diff --git a/src/elevenlabs/types/text_to_speech_stream_with_timestamps_request.py b/src/elevenlabs/types/text_to_speech_stream_with_timestamps_request.py
index 9d32037d..43f05139 100644
--- a/src/elevenlabs/types/text_to_speech_stream_with_timestamps_request.py
+++ b/src/elevenlabs/types/text_to_speech_stream_with_timestamps_request.py
@@ -2,4 +2,4 @@
import typing
-TextToSpeechStreamWithTimestampsRequest = typing.Optional[typing.Any]
+TextToSpeechStreamWithTimestampsRequest = typing.Any
diff --git a/src/elevenlabs/types/text_to_speech_with_timestamps_request.py b/src/elevenlabs/types/text_to_speech_with_timestamps_request.py
index 5206f704..3119e8b0 100644
--- a/src/elevenlabs/types/text_to_speech_with_timestamps_request.py
+++ b/src/elevenlabs/types/text_to_speech_with_timestamps_request.py
@@ -2,4 +2,4 @@
import typing
-TextToSpeechWithTimestampsRequest = typing.Optional[typing.Any]
+TextToSpeechWithTimestampsRequest = typing.Any
diff --git a/src/elevenlabs/types/tool.py b/src/elevenlabs/types/tool.py
index 182de2ff..ade9928e 100644
--- a/src/elevenlabs/types/tool.py
+++ b/src/elevenlabs/types/tool.py
@@ -18,16 +18,14 @@ class Tool(UncheckedBaseModel):
name: str
title: typing.Optional[str] = None
description: typing.Optional[str] = None
- input_schema: typing_extensions.Annotated[
- typing.Dict[str, typing.Optional[typing.Any]], FieldMetadata(alias="inputSchema")
- ]
+ input_schema: typing_extensions.Annotated[typing.Dict[str, typing.Any], FieldMetadata(alias="inputSchema")]
output_schema: typing_extensions.Annotated[
- typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="outputSchema")
+ typing.Optional[typing.Dict[str, typing.Any]], FieldMetadata(alias="outputSchema")
] = None
annotations: typing.Optional[ToolAnnotations] = None
- meta: typing_extensions.Annotated[
- typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="_meta")
- ] = None
+ meta: typing_extensions.Annotated[typing.Optional[typing.Dict[str, typing.Any]], FieldMetadata(alias="_meta")] = (
+ None
+ )
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/tool_request_model_tool_config.py b/src/elevenlabs/types/tool_request_model_tool_config.py
index 2c6a64e6..bfb23fe9 100644
--- a/src/elevenlabs/types/tool_request_model_tool_config.py
+++ b/src/elevenlabs/types/tool_request_model_tool_config.py
@@ -38,6 +38,7 @@ class ToolRequestModelToolConfig_ApiIntegrationWebhook(UncheckedBaseModel):
api_integration_id: str
api_integration_connection_id: str
api_schema_overrides: typing.Optional[ApiIntegrationWebhookOverridesInput] = None
+ base_api_schema: WebhookToolApiSchemaConfigInput
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/tool_response_model_tool_config.py b/src/elevenlabs/types/tool_response_model_tool_config.py
index c968c71b..aa00e14d 100644
--- a/src/elevenlabs/types/tool_response_model_tool_config.py
+++ b/src/elevenlabs/types/tool_response_model_tool_config.py
@@ -26,18 +26,19 @@ class ToolResponseModelToolConfig_ApiIntegrationWebhook(UncheckedBaseModel):
type: typing.Literal["api_integration_webhook"] = "api_integration_webhook"
name: str
description: str
- response_timeout_secs: typing.Optional[int] = None
- disable_interruptions: typing.Optional[bool] = None
- force_pre_tool_speech: typing.Optional[bool] = None
- assignments: typing.Optional[typing.List[DynamicVariableAssignment]] = None
+ response_timeout_secs: int
+ disable_interruptions: bool
+ force_pre_tool_speech: bool
+ assignments: typing.List[DynamicVariableAssignment]
tool_call_sound: typing.Optional[ToolCallSoundType] = None
- tool_call_sound_behavior: typing.Optional[ToolCallSoundBehavior] = None
- dynamic_variables: typing.Optional[DynamicVariablesConfig] = None
- execution_mode: typing.Optional[ToolExecutionMode] = None
- tool_version: typing.Optional[str] = None
+ tool_call_sound_behavior: ToolCallSoundBehavior
+ dynamic_variables: DynamicVariablesConfig
+ execution_mode: ToolExecutionMode
+ tool_version: str
api_integration_id: str
api_integration_connection_id: str
api_schema_overrides: typing.Optional[ApiIntegrationWebhookOverridesOutput] = None
+ base_api_schema: WebhookToolApiSchemaConfigOutput
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -78,19 +79,6 @@ class Config:
extra = pydantic.Extra.allow
-class ToolResponseModelToolConfig_Mcp(UncheckedBaseModel):
- value: typing.Optional[typing.Any] = None
- type: typing.Literal["mcp"] = "mcp"
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
-
-
class ToolResponseModelToolConfig_System(UncheckedBaseModel):
"""
The type of tool
@@ -151,7 +139,6 @@ class Config:
typing.Union[
ToolResponseModelToolConfig_ApiIntegrationWebhook,
ToolResponseModelToolConfig_Client,
- ToolResponseModelToolConfig_Mcp,
ToolResponseModelToolConfig_System,
ToolResponseModelToolConfig_Webhook,
],
diff --git a/src/elevenlabs/types/tts_conversational_model.py b/src/elevenlabs/types/tts_conversational_model.py
index 6461071f..969b8f5a 100644
--- a/src/elevenlabs/types/tts_conversational_model.py
+++ b/src/elevenlabs/types/tts_conversational_model.py
@@ -4,7 +4,12 @@
TtsConversationalModel = typing.Union[
typing.Literal[
- "eleven_turbo_v2", "eleven_turbo_v2_5", "eleven_flash_v2", "eleven_flash_v2_5", "eleven_multilingual_v2"
+ "eleven_turbo_v2",
+ "eleven_turbo_v2_5",
+ "eleven_flash_v2",
+ "eleven_flash_v2_5",
+ "eleven_multilingual_v2",
+ "eleven_expressive",
],
typing.Any,
]
diff --git a/src/elevenlabs/types/tts_model_family.py b/src/elevenlabs/types/tts_model_family.py
index e8973140..3f950cad 100644
--- a/src/elevenlabs/types/tts_model_family.py
+++ b/src/elevenlabs/types/tts_model_family.py
@@ -2,4 +2,4 @@
import typing
-TtsModelFamily = typing.Union[typing.Literal["turbo", "flash", "multilingual"], typing.Any]
+TtsModelFamily = typing.Union[typing.Literal["turbo", "flash", "multilingual", "expressive"], typing.Any]
diff --git a/src/elevenlabs/types/turn_config.py b/src/elevenlabs/types/turn_config.py
index 9d762482..47069e51 100644
--- a/src/elevenlabs/types/turn_config.py
+++ b/src/elevenlabs/types/turn_config.py
@@ -5,6 +5,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
+from .soft_timeout_config import SoftTimeoutConfig
from .turn_eagerness import TurnEagerness
@@ -14,11 +15,21 @@ class TurnConfig(UncheckedBaseModel):
Maximum wait time for the user's reply before re-engaging the user
"""
+ initial_wait_time: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ How long the agent will wait for the user to start the conversation if the first message is empty. If not set, uses the regular turn_timeout.
+ """
+
silence_end_call_timeout: typing.Optional[float] = pydantic.Field(default=None)
"""
Maximum wait time since the user last spoke before terminating the call
"""
+ soft_timeout_config: typing.Optional[SoftTimeoutConfig] = pydantic.Field(default=None)
+ """
+ Configuration for soft timeout functionality. Provides immediate feedback during longer LLM responses.
+ """
+
turn_eagerness: typing.Optional[TurnEagerness] = pydantic.Field(default=None)
"""
Controls how eager the agent is to respond. Low = less eager (waits longer), Standard = default eagerness, High = more eager (responds sooner)
diff --git a/src/elevenlabs/types/turn_config_override.py b/src/elevenlabs/types/turn_config_override.py
new file mode 100644
index 00000000..4820f18f
--- /dev/null
+++ b/src/elevenlabs/types/turn_config_override.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .soft_timeout_config_override import SoftTimeoutConfigOverride
+
+
+class TurnConfigOverride(UncheckedBaseModel):
+ soft_timeout_config: typing.Optional[SoftTimeoutConfigOverride] = pydantic.Field(default=None)
+ """
+ Configuration for soft timeout functionality. Provides immediate feedback during longer LLM responses.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/turn_config_override_config.py b/src/elevenlabs/types/turn_config_override_config.py
new file mode 100644
index 00000000..af5124c7
--- /dev/null
+++ b/src/elevenlabs/types/turn_config_override_config.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .soft_timeout_config_override_config import SoftTimeoutConfigOverrideConfig
+
+
+class TurnConfigOverrideConfig(UncheckedBaseModel):
+ soft_timeout_config: typing.Optional[SoftTimeoutConfigOverrideConfig] = pydantic.Field(default=None)
+ """
+ Configures overrides for nested fields.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/turn_config_workflow_override.py b/src/elevenlabs/types/turn_config_workflow_override.py
index e3a5d3bd..1ed9434f 100644
--- a/src/elevenlabs/types/turn_config_workflow_override.py
+++ b/src/elevenlabs/types/turn_config_workflow_override.py
@@ -5,6 +5,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
+from .soft_timeout_config_workflow_override import SoftTimeoutConfigWorkflowOverride
from .turn_eagerness import TurnEagerness
@@ -14,11 +15,21 @@ class TurnConfigWorkflowOverride(UncheckedBaseModel):
Maximum wait time for the user's reply before re-engaging the user
"""
+ initial_wait_time: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ How long the agent will wait for the user to start the conversation if the first message is empty. If not set, uses the regular turn_timeout.
+ """
+
silence_end_call_timeout: typing.Optional[float] = pydantic.Field(default=None)
"""
Maximum wait time since the user last spoke before terminating the call
"""
+ soft_timeout_config: typing.Optional[SoftTimeoutConfigWorkflowOverride] = pydantic.Field(default=None)
+ """
+ Configuration for soft timeout functionality. Provides immediate feedback during longer LLM responses.
+ """
+
turn_eagerness: typing.Optional[TurnEagerness] = pydantic.Field(default=None)
"""
Controls how eager the agent is to respond. Low = less eager (waits longer), Standard = default eagerness, High = more eager (responds sooner)
diff --git a/src/elevenlabs/types/unit_test_run_response_model.py b/src/elevenlabs/types/unit_test_run_response_model.py
index f89590bb..70229ebb 100644
--- a/src/elevenlabs/types/unit_test_run_response_model.py
+++ b/src/elevenlabs/types/unit_test_run_response_model.py
@@ -19,6 +19,7 @@ class UnitTestRunResponseModel(UncheckedBaseModel):
test_info: typing.Optional[UnitTestCommonModel] = None
test_invocation_id: str
agent_id: str
+ branch_id: typing.Optional[str] = None
workflow_node_id: typing.Optional[str] = None
status: TestRunStatus
agent_responses: typing.Optional[typing.List[ConversationHistoryTranscriptCommonModelOutput]] = None
diff --git a/src/elevenlabs/types/update_audio_native_project_request.py b/src/elevenlabs/types/update_audio_native_project_request.py
index ead582cc..bda627d4 100644
--- a/src/elevenlabs/types/update_audio_native_project_request.py
+++ b/src/elevenlabs/types/update_audio_native_project_request.py
@@ -2,4 +2,4 @@
import typing
-UpdateAudioNativeProjectRequest = typing.Optional[typing.Any]
+UpdateAudioNativeProjectRequest = typing.Any
diff --git a/src/elevenlabs/types/update_chapter_request.py b/src/elevenlabs/types/update_chapter_request.py
index b06208a5..aa906135 100644
--- a/src/elevenlabs/types/update_chapter_request.py
+++ b/src/elevenlabs/types/update_chapter_request.py
@@ -2,4 +2,4 @@
import typing
-UpdateChapterRequest = typing.Optional[typing.Any]
+UpdateChapterRequest = typing.Any
diff --git a/src/elevenlabs/types/update_project_request.py b/src/elevenlabs/types/update_project_request.py
index 7c531e3c..606d3e18 100644
--- a/src/elevenlabs/types/update_project_request.py
+++ b/src/elevenlabs/types/update_project_request.py
@@ -2,4 +2,4 @@
import typing
-UpdateProjectRequest = typing.Optional[typing.Any]
+UpdateProjectRequest = typing.Any
diff --git a/src/elevenlabs/types/update_pronunciation_dictionaries_request.py b/src/elevenlabs/types/update_pronunciation_dictionaries_request.py
index 3740632b..a5848f69 100644
--- a/src/elevenlabs/types/update_pronunciation_dictionaries_request.py
+++ b/src/elevenlabs/types/update_pronunciation_dictionaries_request.py
@@ -2,4 +2,4 @@
import typing
-UpdatePronunciationDictionariesRequest = typing.Optional[typing.Any]
+UpdatePronunciationDictionariesRequest = typing.Any
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_input.py b/src/elevenlabs/types/webhook_tool_api_schema_config_input.py
index 78de7627..ceb43f6b 100644
--- a/src/elevenlabs/types/webhook_tool_api_schema_config_input.py
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_input.py
@@ -10,6 +10,7 @@
from .auth_connection_locator import AuthConnectionLocator
from .literal_json_schema_property import LiteralJsonSchemaProperty
from .query_params_json_schema import QueryParamsJsonSchema
+from .webhook_tool_api_schema_config_input_content_type import WebhookToolApiSchemaConfigInputContentType
from .webhook_tool_api_schema_config_input_method import WebhookToolApiSchemaConfigInputMethod
from .webhook_tool_api_schema_config_input_request_headers_value import (
WebhookToolApiSchemaConfigInputRequestHeadersValue,
@@ -53,6 +54,11 @@ class WebhookToolApiSchemaConfigInput(UncheckedBaseModel):
Headers that should be included in the request
"""
+ content_type: typing.Optional[WebhookToolApiSchemaConfigInputContentType] = pydantic.Field(default=None)
+ """
+ Content type for the request body. Only applies to POST/PUT/PATCH requests.
+ """
+
auth_connection: typing.Optional[AuthConnectionLocator] = pydantic.Field(default=None)
"""
Optional auth connection to use for authentication with this webhook
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_input_content_type.py b/src/elevenlabs/types/webhook_tool_api_schema_config_input_content_type.py
new file mode 100644
index 00000000..aa2d7d63
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_input_content_type.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WebhookToolApiSchemaConfigInputContentType = typing.Union[
+ typing.Literal["application/json", "application/x-www-form-urlencoded"], typing.Any
+]
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_output.py b/src/elevenlabs/types/webhook_tool_api_schema_config_output.py
index a110d14f..9cbd75fe 100644
--- a/src/elevenlabs/types/webhook_tool_api_schema_config_output.py
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_output.py
@@ -10,6 +10,7 @@
from .auth_connection_locator import AuthConnectionLocator
from .literal_json_schema_property import LiteralJsonSchemaProperty
from .query_params_json_schema import QueryParamsJsonSchema
+from .webhook_tool_api_schema_config_output_content_type import WebhookToolApiSchemaConfigOutputContentType
from .webhook_tool_api_schema_config_output_method import WebhookToolApiSchemaConfigOutputMethod
from .webhook_tool_api_schema_config_output_request_headers_value import (
WebhookToolApiSchemaConfigOutputRequestHeadersValue,
@@ -53,6 +54,11 @@ class WebhookToolApiSchemaConfigOutput(UncheckedBaseModel):
Headers that should be included in the request
"""
+ content_type: typing.Optional[WebhookToolApiSchemaConfigOutputContentType] = pydantic.Field(default=None)
+ """
+ Content type for the request body. Only applies to POST/PUT/PATCH requests.
+ """
+
auth_connection: typing.Optional[AuthConnectionLocator] = pydantic.Field(default=None)
"""
Optional auth connection to use for authentication with this webhook
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_output_content_type.py b/src/elevenlabs/types/webhook_tool_api_schema_config_output_content_type.py
new file mode 100644
index 00000000..b3d9b9e1
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_output_content_type.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WebhookToolApiSchemaConfigOutputContentType = typing.Union[
+ typing.Literal["application/json", "application/x-www-form-urlencoded"], typing.Any
+]
diff --git a/src/elevenlabs/types/widget_config.py b/src/elevenlabs/types/widget_config.py
index 543364d9..9396e3b0 100644
--- a/src/elevenlabs/types/widget_config.py
+++ b/src/elevenlabs/types/widget_config.py
@@ -7,6 +7,7 @@
from ..core.unchecked_base_model import UncheckedBaseModel
from .embed_variant import EmbedVariant
from .widget_config_output_avatar import WidgetConfigOutputAvatar
+from .widget_end_feedback_config import WidgetEndFeedbackConfig
from .widget_expandable import WidgetExpandable
from .widget_feedback_mode import WidgetFeedbackMode
from .widget_language_preset import WidgetLanguagePreset
@@ -41,6 +42,11 @@ class WidgetConfig(UncheckedBaseModel):
The feedback mode of the widget
"""
+ end_feedback: typing.Optional[WidgetEndFeedbackConfig] = pydantic.Field(default=None)
+ """
+ Configuration for feedback collected at the end of the conversation
+ """
+
bg_color: typing.Optional[str] = pydantic.Field(default=None)
"""
The background color of the widget
diff --git a/src/elevenlabs/types/widget_config_response.py b/src/elevenlabs/types/widget_config_response.py
index 9031b1dc..57594c47 100644
--- a/src/elevenlabs/types/widget_config_response.py
+++ b/src/elevenlabs/types/widget_config_response.py
@@ -7,6 +7,7 @@
from ..core.unchecked_base_model import UncheckedBaseModel
from .embed_variant import EmbedVariant
from .widget_config_response_model_avatar import WidgetConfigResponseModelAvatar
+from .widget_end_feedback_config import WidgetEndFeedbackConfig
from .widget_expandable import WidgetExpandable
from .widget_feedback_mode import WidgetFeedbackMode
from .widget_language_preset_response import WidgetLanguagePresetResponse
@@ -41,6 +42,11 @@ class WidgetConfigResponse(UncheckedBaseModel):
The feedback mode of the widget
"""
+ end_feedback: typing.Optional[WidgetEndFeedbackConfig] = pydantic.Field(default=None)
+ """
+ Configuration for feedback collected at the end of the conversation
+ """
+
bg_color: typing.Optional[str] = pydantic.Field(default=None)
"""
The background color of the widget
diff --git a/src/elevenlabs/types/widget_end_feedback_config.py b/src/elevenlabs/types/widget_end_feedback_config.py
new file mode 100644
index 00000000..c5de7a8f
--- /dev/null
+++ b/src/elevenlabs/types/widget_end_feedback_config.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .widget_end_feedback_type import WidgetEndFeedbackType
+
+
+class WidgetEndFeedbackConfig(UncheckedBaseModel):
+ type: typing.Optional[WidgetEndFeedbackType] = pydantic.Field(default=None)
+ """
+ The type of feedback to collect at the end of the conversation
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/widget_end_feedback_type.py b/src/elevenlabs/types/widget_end_feedback_type.py
new file mode 100644
index 00000000..9502550e
--- /dev/null
+++ b/src/elevenlabs/types/widget_end_feedback_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WidgetEndFeedbackType = typing.Literal["rating"]
diff --git a/src/elevenlabs/types/workflow_tool_nested_tools_step_model_input.py b/src/elevenlabs/types/workflow_tool_nested_tools_step_model_input.py
index 4dc93686..70687f80 100644
--- a/src/elevenlabs/types/workflow_tool_nested_tools_step_model_input.py
+++ b/src/elevenlabs/types/workflow_tool_nested_tools_step_model_input.py
@@ -27,6 +27,12 @@ class Config:
extra = pydantic.Extra.allow
+from .conversation_history_transcript_workflow_tools_result_common_model_input import (
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput,
+) # noqa: E402, I001
from .workflow_tool_nested_tools_step_model_input_results_item import WorkflowToolNestedToolsStepModelInputResultsItem # noqa: E402, I001
-update_forward_refs(WorkflowToolNestedToolsStepModelInput)
+update_forward_refs(
+ WorkflowToolNestedToolsStepModelInput,
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput=ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput,
+)
diff --git a/src/elevenlabs/types/workflow_tool_nested_tools_step_model_output.py b/src/elevenlabs/types/workflow_tool_nested_tools_step_model_output.py
index 7c1ed4c6..e6992491 100644
--- a/src/elevenlabs/types/workflow_tool_nested_tools_step_model_output.py
+++ b/src/elevenlabs/types/workflow_tool_nested_tools_step_model_output.py
@@ -27,6 +27,12 @@ class Config:
extra = pydantic.Extra.allow
+from .conversation_history_transcript_workflow_tools_result_common_model_output import (
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput,
+) # noqa: E402, I001
from .workflow_tool_nested_tools_step_model_output_results_item import WorkflowToolNestedToolsStepModelOutputResultsItem # noqa: E402, I001
-update_forward_refs(WorkflowToolNestedToolsStepModelOutput)
+update_forward_refs(
+ WorkflowToolNestedToolsStepModelOutput,
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput=ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput,
+)
diff --git a/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py b/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py
index b0992e08..b50b37a7 100644
--- a/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py
+++ b/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py
@@ -60,6 +60,10 @@ class Config:
extra = pydantic.Extra.allow
+from .conversation_history_transcript_workflow_tools_result_common_model_input import (
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput,
+) # noqa: E402, I001
+
WorkflowToolResponseModelInputStepsItem = typing_extensions.Annotated[
typing.Union[
WorkflowToolResponseModelInputStepsItem_Edge,
@@ -70,4 +74,7 @@ class Config:
]
from .workflow_tool_nested_tools_step_model_input_results_item import WorkflowToolNestedToolsStepModelInputResultsItem # noqa: E402, I001
-update_forward_refs(WorkflowToolResponseModelInputStepsItem_NestedTools)
+update_forward_refs(
+ WorkflowToolResponseModelInputStepsItem_NestedTools,
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput=ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput,
+)
diff --git a/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py b/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py
index e7b6e178..a2d4f0df 100644
--- a/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py
+++ b/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py
@@ -60,6 +60,10 @@ class Config:
extra = pydantic.Extra.allow
+from .conversation_history_transcript_workflow_tools_result_common_model_output import (
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput,
+) # noqa: E402, I001
+
WorkflowToolResponseModelOutputStepsItem = typing_extensions.Annotated[
typing.Union[
WorkflowToolResponseModelOutputStepsItem_Edge,
@@ -70,4 +74,7 @@ class Config:
]
from .workflow_tool_nested_tools_step_model_output_results_item import WorkflowToolNestedToolsStepModelOutputResultsItem # noqa: E402, I001
-update_forward_refs(WorkflowToolResponseModelOutputStepsItem_NestedTools)
+update_forward_refs(
+ WorkflowToolResponseModelOutputStepsItem_NestedTools,
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput=ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput,
+)
diff --git a/src/elevenlabs/types/workspace_resource_type.py b/src/elevenlabs/types/workspace_resource_type.py
index 90a54c04..214bcaf1 100644
--- a/src/elevenlabs/types/workspace_resource_type.py
+++ b/src/elevenlabs/types/workspace_resource_type.py
@@ -18,12 +18,16 @@
"convai_phone_numbers",
"convai_mcp_servers",
"convai_api_integration_connections",
+ "convai_api_integration_trigger_connections",
"convai_batch_calls",
"convai_agent_response_tests",
"convai_test_suite_invocations",
"convai_crawl_jobs",
"convai_crawl_tasks",
"convai_whatsapp_accounts",
+ "convai_agent_versions",
+ "convai_agent_branches",
+ "convai_agent_versions_deployments",
"dashboard",
"dashboard_configuration",
],
diff --git a/src/elevenlabs/workspace/resources/client.py b/src/elevenlabs/workspace/resources/client.py
index 912900f6..052dc433 100644
--- a/src/elevenlabs/workspace/resources/client.py
+++ b/src/elevenlabs/workspace/resources/client.py
@@ -81,7 +81,7 @@ def share(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Grants a role on a workspace resource to a user or a group. It overrides any existing role this user/service account/group/workspace api key has on the resource. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be shared with the service account associated with the api key. You must have admin access to the resource to share it.
@@ -110,7 +110,7 @@ def share(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -146,7 +146,7 @@ def unshare(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Removes any existing role on a workspace resource from a user, service account, group or workspace api key. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be unshared from the service account associated with the api key. You must have admin access to the resource to unshare it. You cannot remove permissions from the user who created the resource.
@@ -172,7 +172,7 @@ def unshare(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -274,7 +274,7 @@ async def share(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Grants a role on a workspace resource to a user or a group. It overrides any existing role this user/service account/group/workspace api key has on the resource. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be shared with the service account associated with the api key. You must have admin access to the resource to share it.
@@ -303,7 +303,7 @@ async def share(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
@@ -347,7 +347,7 @@ async def unshare(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> typing.Any:
"""
Removes any existing role on a workspace resource from a user, service account, group or workspace api key. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be unshared from the service account associated with the api key. You must have admin access to the resource to unshare it. You cannot remove permissions from the user who created the resource.
@@ -373,7 +373,7 @@ async def unshare(
Returns
-------
- typing.Optional[typing.Any]
+ typing.Any
Successful Response
Examples
diff --git a/src/elevenlabs/workspace/resources/raw_client.py b/src/elevenlabs/workspace/resources/raw_client.py
index e90ead9d..a58334e8 100644
--- a/src/elevenlabs/workspace/resources/raw_client.py
+++ b/src/elevenlabs/workspace/resources/raw_client.py
@@ -95,7 +95,7 @@ def share(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Grants a role on a workspace resource to a user or a group. It overrides any existing role this user/service account/group/workspace api key has on the resource. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be shared with the service account associated with the api key. You must have admin access to the resource to share it.
@@ -124,7 +124,7 @@ def share(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -148,9 +148,9 @@ def share(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -180,7 +180,7 @@ def unshare(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[typing.Optional[typing.Any]]:
+ ) -> HttpResponse[typing.Any]:
"""
Removes any existing role on a workspace resource from a user, service account, group or workspace api key. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be unshared from the service account associated with the api key. You must have admin access to the resource to unshare it. You cannot remove permissions from the user who created the resource.
@@ -206,7 +206,7 @@ def unshare(
Returns
-------
- HttpResponse[typing.Optional[typing.Any]]
+ HttpResponse[typing.Any]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -229,9 +229,9 @@ def unshare(
return HttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -327,7 +327,7 @@ async def share(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Grants a role on a workspace resource to a user or a group. It overrides any existing role this user/service account/group/workspace api key has on the resource. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be shared with the service account associated with the api key. You must have admin access to the resource to share it.
@@ -356,7 +356,7 @@ async def share(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -380,9 +380,9 @@ async def share(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
@@ -412,7 +412,7 @@ async def unshare(
group_id: typing.Optional[str] = OMIT,
workspace_api_key_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
+ ) -> AsyncHttpResponse[typing.Any]:
"""
Removes any existing role on a workspace resource from a user, service account, group or workspace api key. To target a user or service account, pass only the user email. The user must be in your workspace. To target a group, pass only the group id. To target a workspace api key, pass the api key id. The resource will be unshared from the service account associated with the api key. You must have admin access to the resource to unshare it. You cannot remove permissions from the user who created the resource.
@@ -438,7 +438,7 @@ async def unshare(
Returns
-------
- AsyncHttpResponse[typing.Optional[typing.Any]]
+ AsyncHttpResponse[typing.Any]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -461,9 +461,9 @@ async def unshare(
return AsyncHttpResponse(response=_response, data=None)
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
)
From 0889e5b37c8eb7bf5d7c028447628b9db5bfcee6 Mon Sep 17 00:00:00 2001
From: Thomas Baker
Date: Wed, 12 Nov 2025 21:50:03 -0500
Subject: [PATCH 2/2] fix middle imports, verification script
---
...ow_tool_response_model_input_steps_item.py | 6 +-
...w_tool_response_model_output_steps_item.py | 6 +-
verify_types.py | 153 ++++++++++++++++++
3 files changed, 159 insertions(+), 6 deletions(-)
create mode 100644 verify_types.py
diff --git a/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py b/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py
index b50b37a7..4d1957e7 100644
--- a/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py
+++ b/src/elevenlabs/types/workflow_tool_response_model_input_steps_item.py
@@ -60,9 +60,6 @@ class Config:
extra = pydantic.Extra.allow
-from .conversation_history_transcript_workflow_tools_result_common_model_input import (
- ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput,
-) # noqa: E402, I001
WorkflowToolResponseModelInputStepsItem = typing_extensions.Annotated[
typing.Union[
@@ -72,6 +69,9 @@ class Config:
],
UnionMetadata(discriminant="type"),
]
+from .conversation_history_transcript_workflow_tools_result_common_model_input import (
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelInput,
+) # noqa: E402, I001
from .workflow_tool_nested_tools_step_model_input_results_item import WorkflowToolNestedToolsStepModelInputResultsItem # noqa: E402, I001
update_forward_refs(
diff --git a/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py b/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py
index a2d4f0df..218ce95e 100644
--- a/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py
+++ b/src/elevenlabs/types/workflow_tool_response_model_output_steps_item.py
@@ -60,9 +60,6 @@ class Config:
extra = pydantic.Extra.allow
-from .conversation_history_transcript_workflow_tools_result_common_model_output import (
- ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput,
-) # noqa: E402, I001
WorkflowToolResponseModelOutputStepsItem = typing_extensions.Annotated[
typing.Union[
@@ -72,6 +69,9 @@ class Config:
],
UnionMetadata(discriminant="type"),
]
+from .conversation_history_transcript_workflow_tools_result_common_model_output import (
+ ConversationHistoryTranscriptWorkflowToolsResultCommonModelOutput,
+) # noqa: E402, I001
from .workflow_tool_nested_tools_step_model_output_results_item import WorkflowToolNestedToolsStepModelOutputResultsItem # noqa: E402, I001
update_forward_refs(
diff --git a/verify_types.py b/verify_types.py
new file mode 100644
index 00000000..549f76a3
--- /dev/null
+++ b/verify_types.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python3
+"""
+Script to verify all types in src/elevenlabs/types/ can be imported and instantiated.
+Allows pydantic ValidationError but catches other errors (like circular imports).
+Each type is tested in a separate Python process to avoid import cache issues.
+"""
+import subprocess
+import sys
+from pathlib import Path
+from concurrent.futures import ThreadPoolExecutor, as_completed
+
+# Color codes for terminal output
+RED = '\033[91m'
+GREEN = '\033[92m'
+YELLOW = '\033[93m'
+RESET = '\033[0m'
+
+def get_class_name_from_file(file_path: Path) -> str | None:
+ """Convert a file name to the expected class name using PascalCase."""
+ # Remove .py extension
+ name = file_path.stem
+
+ # Skip special files
+ if name.startswith('__'):
+ return None
+
+ # Convert snake_case to PascalCase
+ parts = name.split('_')
+ class_name = ''.join(word.capitalize() for word in parts)
+
+ return class_name
+
+def test_type_import_subprocess(module_name: str, class_name: str) -> tuple[bool, str | None]:
+ """
+ Test importing and instantiating a type in a separate subprocess.
+ Returns (success, error_message)
+ """
+ # Python code to test import in isolation
+ test_code = f"""
+import sys
+from pydantic import ValidationError as PydanticValidationError
+
+try:
+ # Try to import the type
+ from elevenlabs.types.{module_name} import {class_name}
+
+ # Try to instantiate with no args (expect ValidationError)
+ try:
+ {class_name}()
+ except PydanticValidationError:
+ pass # Expected - type requires arguments
+ except TypeError:
+ pass # Some types might not be Pydantic models (enums, etc.)
+ except Exception:
+ pass # Still fine, as long as import worked
+
+ sys.exit(0) # Success
+
+except ImportError as e:
+ print(f"ImportError: {{e}}", file=sys.stderr)
+ sys.exit(1)
+except Exception as e:
+ print(f"{{type(e).__name__}}: {{e}}", file=sys.stderr)
+ sys.exit(1)
+"""
+
+ try:
+ result = subprocess.run(
+ ["poetry", "run", "python", "-c", test_code],
+ capture_output=True,
+ text=True,
+ timeout=10,
+ cwd="/Users/thomas/Projects/elevenlabs-python"
+ )
+
+ if result.returncode == 0:
+ return True, None
+ else:
+ error_msg = result.stderr.strip() if result.stderr else "Unknown error"
+ return False, error_msg
+
+ except subprocess.TimeoutExpired:
+ return False, "Timeout: Import took longer than 10 seconds"
+ except Exception as e:
+ return False, f"Subprocess error: {str(e)}"
+
+def main():
+ types_dir = Path("src/elevenlabs/types")
+
+ if not types_dir.exists():
+ print(f"{RED}Error: {types_dir} does not exist{RESET}")
+ sys.exit(1)
+
+ # Get all Python files
+ py_files = sorted([f for f in types_dir.glob("*.py") if not f.name.startswith("__")])
+
+ print(f"Testing {len(py_files)} type files in separate processes...\n")
+ print(f"This may take a few minutes...\n")
+
+ failures = []
+ successes = []
+ skipped = []
+
+ # Test types in parallel for speed
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_file = {}
+
+ for py_file in py_files:
+ module_name = py_file.stem
+ class_name = get_class_name_from_file(py_file)
+
+ if class_name is None:
+ skipped.append(module_name)
+ continue
+
+ future = executor.submit(test_type_import_subprocess, module_name, class_name)
+ future_to_file[future] = (module_name, class_name)
+
+ for future in as_completed(future_to_file):
+ module_name, class_name = future_to_file[future]
+ try:
+ success, error = future.result()
+
+ if success:
+ successes.append((module_name, class_name))
+ print(f"{GREEN}✓{RESET} {module_name}.{class_name}")
+ else:
+ failures.append((module_name, class_name, error))
+ print(f"{RED}✗{RESET} {module_name}.{class_name}")
+ print(f" {RED}{error}{RESET}")
+ except Exception as e:
+ failures.append((module_name, class_name, f"Test execution failed: {str(e)}"))
+ print(f"{RED}✗{RESET} {module_name}.{class_name}")
+ print(f" {RED}Test execution failed: {str(e)}{RESET}")
+
+ # Print summary
+ print(f"\n{'='*80}")
+ print(f"Summary:")
+ print(f" {GREEN}Successful: {len(successes)}{RESET}")
+ print(f" {RED}Failed: {len(failures)}{RESET}")
+ print(f" {YELLOW}Skipped: {len(skipped)}{RESET}")
+
+ if failures:
+ print(f"\n{RED}Failed imports:{RESET}")
+ for module_name, class_name, error in failures:
+ print(f" - {module_name}.{class_name}: {error}")
+ sys.exit(1)
+ else:
+ print(f"\n{GREEN}All types can be imported successfully!{RESET}")
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()