From 3c851cf3d3ee34538c4352631ab676ab14b4aa0f Mon Sep 17 00:00:00 2001 From: Zixin Yao Date: Mon, 27 Oct 2025 20:15:46 -0700 Subject: [PATCH] update --- .../azure-search-documents/CHANGELOG.md | 84 +- sdk/search/azure-search-documents/assets.json | 2 +- .../azure/search/documents/_api_versions.py | 4 +- .../search/documents/_generated/__init__.py | 2 +- .../documents/_generated/_configuration.py | 6 +- .../_generated/_search_index_client.py | 4 +- .../documents/_generated/_utils/__init__.py | 2 +- .../_generated/_utils/serialization.py | 4 +- .../documents/_generated/aio/__init__.py | 2 +- .../_generated/aio/_configuration.py | 6 +- .../_generated/aio/_search_index_client.py | 4 +- .../_generated/aio/operations/__init__.py | 2 +- .../aio/operations/_documents_operations.py | 70 +- .../documents/_generated/models/__init__.py | 2 +- .../_generated/models/_models_py3.py | 105 +- .../models/_search_index_client_enums.py | 4 +- .../_generated/operations/__init__.py | 2 +- .../operations/_documents_operations.py | 103 +- .../azure/search/documents/_search_client.py | 5 + .../agent/_generated/models/__init__.py | 80 - ..._knowledge_agent_retrieval_client_enums.py | 26 - .../agent/_generated/models/_models_py3.py | 1112 ------- .../search/documents/agent/models/__init__.py | 78 - .../documents/aio/_search_client_async.py | 5 + .../documents/indexes/_generated/__init__.py | 2 +- .../indexes/_generated/_configuration.py | 6 +- .../_generated/_search_service_client.py | 14 +- .../indexes/_generated/_utils/__init__.py | 2 +- .../_generated/_utils/serialization.py | 4 +- .../indexes/_generated/_utils/utils.py | 2 +- .../indexes/_generated/aio/__init__.py | 2 +- .../indexes/_generated/aio/_configuration.py | 6 +- .../_generated/aio/_search_service_client.py | 16 +- .../_generated/aio/operations/__init__.py | 6 +- .../aio/operations/_aliases_operations.py | 29 +- .../operations/_data_sources_operations.py | 29 +- .../aio/operations/_indexers_operations.py | 54 +- .../aio/operations/_indexes_operations.py | 39 +- ...ions.py => _knowledge_bases_operations.py} | 207 +- .../_knowledge_sources_operations.py | 96 +- .../_search_service_client_operations.py | 13 +- .../aio/operations/_skillsets_operations.py | 34 +- .../operations/_synonym_maps_operations.py | 29 +- .../indexes/_generated/models/__init__.py | 98 +- .../indexes/_generated/models/_models_py3.py | 2126 +++++++++--- .../models/_search_service_client_enums.py | 100 +- .../indexes/_generated/operations/__init__.py | 6 +- .../operations/_aliases_operations.py | 39 +- .../operations/_data_sources_operations.py | 39 +- .../operations/_indexers_operations.py | 74 +- .../operations/_indexes_operations.py | 53 +- ...ions.py => _knowledge_bases_operations.py} | 239 +- .../_knowledge_sources_operations.py | 133 +- .../_search_service_client_operations.py | 17 +- .../operations/_skillsets_operations.py | 46 +- .../operations/_synonym_maps_operations.py | 39 +- .../documents/indexes/_search_index_client.py | 109 +- .../indexes/aio/_search_index_client.py | 109 +- .../documents/indexes/models/__init__.py | 92 +- .../search/documents/indexes/models/_index.py | 192 +- .../documents/indexes/models/_models.py | 196 +- .../{agent => knowledgebases}/__init__.py | 4 +- .../_generated/__init__.py | 6 +- .../_generated/_configuration.py | 24 +- .../_knowledge_base_retrieval_client.py} | 28 +- .../_generated/_patch.py | 0 .../_generated/_utils/__init__.py | 2 +- .../_generated/_utils/serialization.py | 4 +- .../_generated/aio/__init__.py | 6 +- .../_generated/aio/_configuration.py | 24 +- .../aio/_knowledge_base_retrieval_client.py} | 28 +- .../_generated/aio/_patch.py | 0 .../_generated/aio}/operations/__init__.py | 2 +- .../_knowledge_retrieval_operations.py | 68 +- .../_generated/aio/operations/_patch.py | 0 .../_generated/models/__init__.py | 140 + .../_knowledge_base_retrieval_client_enums.py | 61 + .../_generated/models/_models_py3.py | 2875 +++++++++++++++++ .../_generated/models/_patch.py | 0 .../_generated}/operations/__init__.py | 2 +- .../_knowledge_retrieval_operations.py | 70 +- .../_generated/operations/_patch.py | 0 .../_generated/py.typed | 0 .../_knowledgebase_client.py} | 46 +- .../{agent => knowledgebases}/aio/__init__.py | 4 +- .../aio/_knowledgebase_client_async.py} | 46 +- .../knowledgebases/models/__init__.py | 146 + .../async_tests/test_buffered_sender_async.py | 20 +- ...knowledge_base_configuration_live_async.py | 163 + .../test_knowledge_base_live_async.py | 236 ++ ...dge_source_remote_sharepoint_live_async.py | 138 + .../test_knowledge_source_web_live_async.py | 139 + .../async_tests/test_search_client_async.py | 63 +- ...earch_client_buffered_sender_live_async.py | 32 +- ...search_client_index_document_live_async.py | 4 +- .../test_search_client_search_live_async.py | 53 +- ...st_search_index_client_alias_live_async.py | 12 +- .../test_search_index_client_async.py | 23 +- ...rch_index_client_data_source_live_async.py | 52 +- .../test_search_index_client_live_async.py | 147 +- ...search_index_client_skillset_live_async.py | 116 +- ...rch_index_client_synonym_map_live_async.py | 8 +- .../test_search_indexer_client_live_async.py | 150 +- .../azure-search-documents/tests/conftest.py | 4 +- .../tests/perfstress_tests/autocomplete.py | 22 +- .../perfstress_tests/search_documents.py | 18 +- .../tests/perfstress_tests/suggest.py | 22 +- .../tests/search_service_preparer.py | 5 +- .../tests/test_buffered_sender.py | 24 +- .../tests/test_index_documents_batch.py | 4 +- .../tests/test_index_field_helpers.py | 4 +- .../test_knowledge_base_configuration_live.py | 163 + .../tests/test_knowledge_base_live.py | 235 ++ ...knowledge_source_remote_sharepoint_live.py | 129 + .../tests/test_knowledge_source_web_live.py | 136 + .../tests/test_models.py | 153 +- .../tests/test_queries.py | 4 +- .../tests/test_regex_flags.py | 40 +- .../tests/test_search_client.py | 182 +- .../tests/test_search_client_basic_live.py | 12 +- ...test_search_client_buffered_sender_live.py | 32 +- .../test_search_client_index_document_live.py | 4 +- .../tests/test_search_client_search_live.py | 58 +- .../tests/test_search_index_client.py | 12 +- .../test_search_index_client_alias_live.py | 4 +- ...st_search_index_client_data_source_live.py | 60 +- .../tests/test_search_index_client_live.py | 129 +- .../test_search_index_client_skillset_live.py | 148 +- ...st_search_index_client_synonym_map_live.py | 4 +- .../tests/test_search_indexer_client_live.py | 100 +- .../tests/test_serialization.py | 26 +- 131 files changed, 9717 insertions(+), 2970 deletions(-) delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/__init__.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_knowledge_agent_retrieval_client_enums.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_models_py3.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/agent/models/__init__.py rename sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/{_knowledge_agents_operations.py => _knowledge_bases_operations.py} (77%) rename sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/{_knowledge_agents_operations.py => _knowledge_bases_operations.py} (79%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/__init__.py (92%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/__init__.py (80%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/_configuration.py (70%) rename sdk/search/azure-search-documents/azure/search/documents/{agent/_generated/_knowledge_agent_retrieval_client.py => knowledgebases/_generated/_knowledge_base_retrieval_client.py} (81%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/_patch.py (100%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/_utils/__init__.py (86%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/_utils/serialization.py (99%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/aio/__init__.py (80%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/aio/_configuration.py (70%) rename sdk/search/azure-search-documents/azure/search/documents/{agent/_generated/aio/_knowledge_agent_retrieval_client.py => knowledgebases/_generated/aio/_knowledge_base_retrieval_client.py} (81%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/aio/_patch.py (100%) rename sdk/search/azure-search-documents/azure/search/documents/{agent/_generated => knowledgebases/_generated/aio}/operations/__init__.py (94%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/aio/operations/_knowledge_retrieval_operations.py (73%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/aio/operations/_patch.py (100%) create mode 100644 sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/__init__.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_knowledge_base_retrieval_client_enums.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_models_py3.py rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/models/_patch.py (100%) rename sdk/search/azure-search-documents/azure/search/documents/{agent/_generated/aio => knowledgebases/_generated}/operations/__init__.py (94%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/operations/_knowledge_retrieval_operations.py (76%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/operations/_patch.py (100%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/_generated/py.typed (100%) rename sdk/search/azure-search-documents/azure/search/documents/{agent/_agent_client.py => knowledgebases/_knowledgebase_client.py} (70%) rename sdk/search/azure-search-documents/azure/search/documents/{agent => knowledgebases}/aio/__init__.py (91%) rename sdk/search/azure-search-documents/azure/search/documents/{agent/aio/_agent_client_async.py => knowledgebases/aio/_knowledgebase_client_async.py} (71%) create mode 100644 sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py create mode 100644 sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_configuration_live_async.py create mode 100644 sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_live_async.py create mode 100644 sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_remote_sharepoint_live_async.py create mode 100644 sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_web_live_async.py create mode 100644 sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py create mode 100644 sdk/search/azure-search-documents/tests/test_knowledge_base_live.py create mode 100644 sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py create mode 100644 sdk/search/azure-search-documents/tests/test_knowledge_source_web_live.py diff --git a/sdk/search/azure-search-documents/CHANGELOG.md b/sdk/search/azure-search-documents/CHANGELOG.md index b444766db8e2..a640496c1019 100644 --- a/sdk/search/azure-search-documents/CHANGELOG.md +++ b/sdk/search/azure-search-documents/CHANGELOG.md @@ -1,14 +1,90 @@ # Release History -## 11.7.0b2 (Unreleased) +## 11.7.0b2 (2025-11-13) ### Features Added -### Breaking Changes +- Added new models: + - `azure.search.documents.indexes.models.AIServices` + - `azure.search.documents.indexes.models.CompletedSynchronizationState` + - `azure.search.documents.indexes.models.ContentUnderstandingSkill` + - `azure.search.documents.indexes.models.ContentUnderstandingSkillChunkingProperties` + - `azure.search.documents.indexes.models.ContentUnderstandingSkillChunkingUnit` + - `azure.search.documents.indexes.models.ContentUnderstandingSkillExtractionOptions` + - `azure.search.documents.indexes.models.IndexedOneLakeKnowledgeSource` + - `azure.search.documents.indexes.models.IndexedOneLakeKnowledgeSourceParameters` + - `azure.search.documents.indexes.models.IndexedSharePointContainerName` + - `azure.search.documents.indexes.models.IndexedSharePointKnowledgeSource` + - `azure.search.documents.indexes.models.IndexedSharePointKnowledgeSourceParameters` + - `azure.search.documents.indexes.models.IndexerRuntime` + - `azure.search.documents.indexes.models.KnowledgeRetrievalLowReasoningEffort` + - `azure.search.documents.indexes.models.KnowledgeRetrievalMediumReasoningEffort` + - `azure.search.documents.indexes.models.KnowledgeRetrievalMinimalReasoningEffort` + - `azure.search.documents.indexes.models.KnowledgeRetrievalOutputMode` + - `azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffort` + - `azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffortKind` + - `azure.search.documents.indexes.models.KnowledgeSourceAzureOpenAIVectorizer` + - `azure.search.documents.indexes.models.KnowledgeSourceContentExtractionMode` + - `azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters` + - `azure.search.documents.indexes.models.KnowledgeSourceIngestionPermissionOption` + - `azure.search.documents.indexes.models.KnowledgeSourceStatistics` + - `azure.search.documents.indexes.models.KnowledgeSourceStatus` + - `azure.search.documents.indexes.models.KnowledgeSourceSynchronizationStatus` + - `azure.search.documents.indexes.models.KnowledgeSourceVectorizer` + - `azure.search.documents.indexes.models.RemoteSharePointKnowledgeSource` + - `azure.search.documents.indexes.models.RemoteSharePointKnowledgeSourceParameters` + - `azure.search.documents.indexes.models.SearchIndexFieldReference` + - `azure.search.documents.indexes.models.ServiceIndexersRuntime` + - `azure.search.documents.indexes.models.SynchronizationState` + - `azure.search.documents.indexes.models.WebKnowledgeSource` + - `azure.search.documents.indexes.models.WebKnowledgeSourceDomain` + - `azure.search.documents.indexes.models.WebKnowledgeSourceDomains` + - `azure.search.documents.indexes.models.WebKnowledgeSourceParameters` + +- Expanded existing models and enums: + - Added support for `avg`, `min`, `max`, and `cardinality` metrics on `azure.search.documents.models.FacetResult`. + - Added `is_adls_gen2` and `ingestion_parameters` options on `azure.search.documents.indexes.models.AzureBlobKnowledgeSourceParameters`. + - Added support for `gpt-5`, `gpt-5-mini`, and `gpt-5-nano` values on `azure.search.documents.indexes.models.AzureOpenAIModelName`. + - Added support for `web`, `remoteSharePoint`, `indexedSharePoint`, and `indexedOneLake` values on `azure.search.documents.indexes.models.KnowledgeSourceKind`. + - Added support for `onelake` and `sharepoint` values on `azure.search.documents.indexes.models.SearchIndexerDataSourceConnection.type`. + - Added `azure.search.documents.indexes.models.SearchField.sensitivity_label`. + - Added `azure.search.documents.indexes.models.SearchIndexerStatus.runtime`. + - Added `azure.search.documents.indexes.models.SearchIndex.purview_enabled`. + - Added `azure.search.documents.indexes.models.SearchServiceLimits.max_cumulative_indexer_runtime_seconds`. + - Added `azure.search.documents.indexes.models.SearchServiceStatistics.indexers_runtime`. + - Added `product` aggregation support to `azure.search.documents.indexes.models.ScoringFunctionAggregation`. + - Added `share_point` to `azure.search.documents.indexes.models.SearchIndexerDataSourceType`. + - Added `include_references`, `include_reference_source_data`, `always_query_source`, and `reranker_threshold` options on `azure.search.documents.knowledgebases.models.SearchIndexKnowledgeSourceParams`. + - Added `error` tracking details on `azure.search.documents.knowledgebases.models.KnowledgeBaseActivityRecord` derivatives. + +- Client and service enhancements: + - Added support for HTTP 206 partial content responses when calling `azure.search.documents.knowledgebases.KnowledgeBaseRetrievalClient.knowledge_retrieval.retrieve`. + - Added optional `x_ms_enable_elevated_read` keyword to `azure.search.documents.SearchClient.search` and `azure.search.documents.aio.SearchClient.search` for elevated document reads. -### Bugs Fixed +### Breaking Changes -### Other Changes +> These changes apply to the latest preview release only and do not affect generally available versions. + +- Knowledge base naming and routing refresh: + - Renamed the knowledge agent surface area to the knowledge base equivalents: + - `azure.search.documents.indexes.models.KnowledgeAgent` -> `azure.search.documents.indexes.models.KnowledgeBase` + - `azure.search.documents.indexes.models.KnowledgeAgentAzureOpenAIModel` -> `azure.search.documents.indexes.models.KnowledgeBaseAzureOpenAIModel` + - `azure.search.documents.indexes.models.KnowledgeAgentModel` -> `azure.search.documents.indexes.models.KnowledgeBaseModel` + - `azure.search.documents.indexes.models.KnowledgeAgentModelKind` -> `azure.search.documents.indexes.models.KnowledgeBaseModelKind` + - Knowledge base clients now target `/knowledgebases` REST routes and accept `knowledge_base_name` instead of the agent name parameter. + - Replaced `azure.search.documents.indexes.models.KnowledgeAgentOutputConfiguration` with `azure.search.documents.indexes.models.KnowledgeBase.output_mode`. + - Replaced `azure.search.documents.indexes.models.KnowledgeAgentOutputConfigurationModality` with `azure.search.documents.indexes.models.KnowledgeRetrievalOutputMode`. + - Removed `azure.search.documents.indexes.models.KnowledgeAgentRequestLimits`; callers should apply request guardrails at the service level. +- Knowledge source parameterization updates: + - Updated `azure.search.documents.indexes.models.AzureBlobKnowledgeSourceParameters` to use `azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters`, replacing the previous `identity`, `embedding_model`, `chat_completion_model`, `ingestion_schedule`, and `disable_image_verbalization` properties with the new `is_adls_gen2` and `ingestion_parameters` shape. + - Updated `azure.search.documents.indexes.models.KnowledgeSourceReference` to carry only the source name, moving the `include_references`, `include_reference_source_data`, `always_query_source`, `max_sub_queries`, and `reranker_threshold` options onto the concrete parameter types. +- Compression configuration cleanup: + - Removed the `default_oversampling` property from `azure.search.documents.indexes.models.BinaryQuantizationCompression`, `azure.search.documents.indexes.models.ScalarQuantizationCompression`, and `azure.search.documents.indexes.models.VectorSearchCompression`. + - Removed the `rerank_with_original_vectors` property from `azure.search.documents.indexes.models.BinaryQuantizationCompression`, `azure.search.documents.indexes.models.ScalarQuantizationCompression`, and `azure.search.documents.indexes.models.VectorSearchCompression`. +- Knowledge source parameter field realignment: + - Replaced `azure.search.documents.indexes.models.SearchIndexKnowledgeSourceParameters.source_data_select` with `azure.search.documents.indexes.models.SearchIndexKnowledgeSourceParameters.source_data_fields`. + - Added `azure.search.documents.indexes.models.SearchIndexKnowledgeSourceParameters.search_fields` for field mapping. + - Added optional `azure.search.documents.indexes.models.SearchIndexKnowledgeSourceParameters.semantic_configuration_name`. ## 11.7.0b1 (2025-09-05) diff --git a/sdk/search/azure-search-documents/assets.json b/sdk/search/azure-search-documents/assets.json index c43acc0bbf7d..4ab0734b6c11 100644 --- a/sdk/search/azure-search-documents/assets.json +++ b/sdk/search/azure-search-documents/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/search/azure-search-documents", - "Tag": "python/search/azure-search-documents_5fa507ca6f" + "Tag": "python/search/azure-search-documents_5403fd605d" } diff --git a/sdk/search/azure-search-documents/azure/search/documents/_api_versions.py b/sdk/search/azure-search-documents/azure/search/documents/_api_versions.py index 63bbb287e1c8..d5a084e0fd25 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_api_versions.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_api_versions.py @@ -11,7 +11,7 @@ class ApiVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta): V2020_06_30 = "2020-06-30" V2023_11_01 = "2023-11-01" V2024_07_01 = "2024-07-01" - V2025_08_01_PREVIEW = "2025-08-01-preview" + V2025_11_01_PREVIEW = "2025-11-01-preview" -DEFAULT_VERSION = ApiVersion.V2025_08_01_PREVIEW +DEFAULT_VERSION = ApiVersion.V2025_11_01_PREVIEW diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py index b24a07c02d65..c6cd95396f11 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py index 5ebd4a3efbe9..5677faaf28d6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -21,13 +21,13 @@ class SearchIndexClientConfiguration: # pylint: disable=too-many-instance-attri :type endpoint: str :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, index_name: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-08-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py index 122aa5e41f78..8cde28c7d859 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -27,7 +27,7 @@ class SearchIndexClient: :type endpoint: str :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/__init__.py index f986b371549e..4473821c4ebe 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/__init__.py @@ -1,4 +1,4 @@ # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/serialization.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/serialization.py index 003e1c89fb35..9b8154c91dc2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/serialization.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_utils/serialization.py @@ -1,7 +1,7 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -785,7 +785,7 @@ def serialize_data(self, data, data_type, **kwargs): # If dependencies is empty, try with current data class # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) if issubclass(enum_type, Enum): return Serializer.serialize_enum(data, enum_obj=enum_type) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py index b24a07c02d65..c6cd95396f11 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py index ed4c0a0a8a8d..562d704a6973 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -21,13 +21,13 @@ class SearchIndexClientConfiguration: # pylint: disable=too-many-instance-attri :type endpoint: str :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, index_name: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-08-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py index 916d6e5380e5..fbcf048dfbbd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -27,7 +27,7 @@ class SearchIndexClient: :type endpoint: str :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py index de96b0faddf2..749e5a6ba198 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py index 29df61ea153e..f35b4acec1c7 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py @@ -1,7 +1,7 @@ # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -112,7 +112,10 @@ async def count(self, request_options: Optional[_models.RequestOptions] = None, if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("int", pipeline_response.http_response) @@ -127,6 +130,7 @@ async def search_get( self, search_text: Optional[str] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, search_options: Optional[_models.SearchOptions] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any @@ -143,6 +147,9 @@ async def search_get( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param search_options: Parameter group. Default value is None. :type search_options: ~azure.search.documents.models.SearchOptions :param request_options: Parameter group. Default value is None. @@ -261,6 +268,7 @@ async def search_get( speller=_speller, semantic_fields=_semantic_fields, x_ms_query_source_authorization=x_ms_query_source_authorization, + x_ms_enable_elevated_read=x_ms_enable_elevated_read, api_version=api_version, headers=_headers, params=_params, @@ -280,7 +288,10 @@ async def search_get( if response.status_code not in [200, 206]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) @@ -295,6 +306,7 @@ async def search_post( self, search_request: _models.SearchRequest, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", @@ -311,6 +323,9 @@ async def search_post( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -326,6 +341,7 @@ async def search_post( self, search_request: IO[bytes], x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", @@ -342,6 +358,9 @@ async def search_post( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -357,6 +376,7 @@ async def search_post( self, search_request: Union[_models.SearchRequest, IO[bytes]], x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any ) -> _models.SearchDocumentsResult: @@ -372,6 +392,9 @@ async def search_post( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :return: SearchDocumentsResult or the result of cls(response) @@ -407,6 +430,7 @@ async def search_post( _request = build_search_post_request( x_ms_client_request_id=_x_ms_client_request_id, x_ms_query_source_authorization=x_ms_query_source_authorization, + x_ms_enable_elevated_read=x_ms_enable_elevated_read, api_version=api_version, content_type=content_type, json=_json, @@ -429,7 +453,10 @@ async def search_post( if response.status_code not in [200, 206]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) @@ -445,6 +472,7 @@ async def get( key: str, selected_fields: Optional[list[str]] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any ) -> dict[str, Any]: @@ -462,6 +490,9 @@ async def get( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :return: dict mapping str to any or the result of cls(response) @@ -491,6 +522,7 @@ async def get( selected_fields=selected_fields, x_ms_client_request_id=_x_ms_client_request_id, x_ms_query_source_authorization=x_ms_query_source_authorization, + x_ms_enable_elevated_read=x_ms_enable_elevated_read, api_version=api_version, headers=_headers, params=_params, @@ -510,7 +542,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("{object}", pipeline_response.http_response) @@ -617,7 +652,10 @@ async def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) @@ -750,7 +788,10 @@ async def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) @@ -883,7 +924,10 @@ async def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("IndexDocumentsResult", pipeline_response.http_response) @@ -986,7 +1030,10 @@ async def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) @@ -1119,7 +1166,10 @@ async def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py index 061cb503b94a..e91cb0f48750 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py index 4e74e0b6cc7a..3d761266bc2b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py @@ -1,18 +1,18 @@ # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Optional, TYPE_CHECKING, Union, MutableMapping +from collections.abc import MutableMapping +from typing import Any, Optional, TYPE_CHECKING, Union from .._utils import serialization as _serialization -JSON = MutableMapping[str, Any] - if TYPE_CHECKING: from .. import models as _models +JSON = MutableMapping[str, Any] class AutocompleteItem(_serialization.Model): @@ -465,8 +465,17 @@ class FacetResult(_serialization.Model): :ivar count: The approximate count of documents falling within the bucket described by this facet. :vartype count: int + :ivar avg: The resulting total avg for the facet when a avg metric is requested. + :vartype avg: float + :ivar min: The resulting total min for the facet when a min metric is requested. + :vartype min: float + :ivar max: The resulting total max for the facet when a max metric is requested. + :vartype max: float :ivar sum: The resulting total sum for the facet when a sum metric is requested. :vartype sum: float + :ivar cardinality: The resulting total cardinality for the facet when a cardinality metric is + requested. + :vartype cardinality: int :ivar facets: The nested facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not contain any nested facets. @@ -475,14 +484,22 @@ class FacetResult(_serialization.Model): _validation = { "count": {"readonly": True}, + "avg": {"readonly": True}, + "min": {"readonly": True}, + "max": {"readonly": True}, "sum": {"readonly": True}, + "cardinality": {"readonly": True}, "facets": {"readonly": True}, } _attribute_map = { "additional_properties": {"key": "", "type": "{object}"}, "count": {"key": "count", "type": "int"}, + "avg": {"key": "avg", "type": "float"}, + "min": {"key": "min", "type": "float"}, + "max": {"key": "max", "type": "float"}, "sum": {"key": "sum", "type": "float"}, + "cardinality": {"key": "cardinality", "type": "int"}, "facets": {"key": "@search\\.facets", "type": "{[FacetResult]}"}, } @@ -495,7 +512,11 @@ def __init__(self, *, additional_properties: Optional[dict[str, Any]] = None, ** super().__init__(**kwargs) self.additional_properties = additional_properties self.count: Optional[int] = None + self.avg: Optional[float] = None + self.min: Optional[float] = None + self.max: Optional[float] = None self.sum: Optional[float] = None + self.cardinality: Optional[int] = None self.facets: Optional[dict[str, list["_models.FacetResult"]]] = None @@ -1920,7 +1941,7 @@ def __init__(self, **kwargs: Any) -> None: class SearchScoreThreshold(VectorThreshold): - """The results of the vector query will filter based on the '\\@search.score' value. Note this is + """The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. @@ -1929,7 +1950,7 @@ class SearchScoreThreshold(VectorThreshold): :ivar kind: The kind of threshold used to filter vector queries. Required. Known values are: "vectorSimilarity" and "searchScore". :vartype kind: str or ~azure.search.documents.models.VectorThresholdKind - :ivar value: The threshold will filter based on the '\\@search.score' value. Note this is the + :ivar value: The threshold will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. Required. :vartype value: float @@ -1947,7 +1968,7 @@ class SearchScoreThreshold(VectorThreshold): def __init__(self, *, value: float, **kwargs: Any) -> None: """ - :keyword value: The threshold will filter based on the '\\@search.score' value. Note this is the + :keyword value: The threshold will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. Required. :paramtype value: float @@ -2385,8 +2406,8 @@ class VectorQuery(_serialization.Model): :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", "text", "imageUrl", and "imageBinary". :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -2426,7 +2447,7 @@ class VectorQuery(_serialization.Model): _attribute_map = { "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, + "k": {"key": "k", "type": "int"}, "fields": {"key": "fields", "type": "str"}, "exhaustive": {"key": "exhaustive", "type": "bool"}, "oversampling": {"key": "oversampling", "type": "float"}, @@ -2448,7 +2469,7 @@ class VectorQuery(_serialization.Model): def __init__( self, *, - k_nearest_neighbors: Optional[int] = None, + k: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -2459,8 +2480,8 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int + :keyword k: Number of nearest neighbors to return as top hits. + :paramtype k: int :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :paramtype fields: str @@ -2495,7 +2516,7 @@ def __init__( """ super().__init__(**kwargs) self.kind: Optional[str] = None - self.k_nearest_neighbors = k_nearest_neighbors + self.k = k self.fields = fields self.exhaustive = exhaustive self.oversampling = oversampling @@ -2514,8 +2535,8 @@ class VectorizableImageBinaryQuery(VectorQuery): :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", "text", "imageUrl", and "imageBinary". :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -2558,7 +2579,7 @@ class VectorizableImageBinaryQuery(VectorQuery): _attribute_map = { "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, + "k": {"key": "k", "type": "int"}, "fields": {"key": "fields", "type": "str"}, "exhaustive": {"key": "exhaustive", "type": "bool"}, "oversampling": {"key": "oversampling", "type": "float"}, @@ -2572,7 +2593,7 @@ class VectorizableImageBinaryQuery(VectorQuery): def __init__( self, *, - k_nearest_neighbors: Optional[int] = None, + k: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -2584,8 +2605,8 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int + :keyword k: Number of nearest neighbors to return as top hits. + :paramtype k: int :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :paramtype fields: str @@ -2622,7 +2643,7 @@ def __init__( :paramtype base64_image: str """ super().__init__( - k_nearest_neighbors=k_nearest_neighbors, + k=k, fields=fields, exhaustive=exhaustive, oversampling=oversampling, @@ -2645,8 +2666,8 @@ class VectorizableImageUrlQuery(VectorQuery): :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", "text", "imageUrl", and "imageBinary". :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -2688,7 +2709,7 @@ class VectorizableImageUrlQuery(VectorQuery): _attribute_map = { "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, + "k": {"key": "k", "type": "int"}, "fields": {"key": "fields", "type": "str"}, "exhaustive": {"key": "exhaustive", "type": "bool"}, "oversampling": {"key": "oversampling", "type": "float"}, @@ -2702,7 +2723,7 @@ class VectorizableImageUrlQuery(VectorQuery): def __init__( self, *, - k_nearest_neighbors: Optional[int] = None, + k: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -2714,8 +2735,8 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int + :keyword k: Number of nearest neighbors to return as top hits. + :paramtype k: int :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :paramtype fields: str @@ -2751,7 +2772,7 @@ def __init__( :paramtype url: str """ super().__init__( - k_nearest_neighbors=k_nearest_neighbors, + k=k, fields=fields, exhaustive=exhaustive, oversampling=oversampling, @@ -2774,8 +2795,8 @@ class VectorizableTextQuery(VectorQuery): :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", "text", "imageUrl", and "imageBinary". :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -2821,7 +2842,7 @@ class VectorizableTextQuery(VectorQuery): _attribute_map = { "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, + "k": {"key": "k", "type": "int"}, "fields": {"key": "fields", "type": "str"}, "exhaustive": {"key": "exhaustive", "type": "bool"}, "oversampling": {"key": "oversampling", "type": "float"}, @@ -2837,7 +2858,7 @@ def __init__( self, *, text: str, - k_nearest_neighbors: Optional[int] = None, + k: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -2849,8 +2870,8 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int + :keyword k: Number of nearest neighbors to return as top hits. + :paramtype k: int :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :paramtype fields: str @@ -2889,7 +2910,7 @@ def __init__( :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType """ super().__init__( - k_nearest_neighbors=k_nearest_neighbors, + k=k, fields=fields, exhaustive=exhaustive, oversampling=oversampling, @@ -2912,8 +2933,8 @@ class VectorizedQuery(VectorQuery): :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", "text", "imageUrl", and "imageBinary". :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -2956,7 +2977,7 @@ class VectorizedQuery(VectorQuery): _attribute_map = { "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, + "k": {"key": "k", "type": "int"}, "fields": {"key": "fields", "type": "str"}, "exhaustive": {"key": "exhaustive", "type": "bool"}, "oversampling": {"key": "oversampling", "type": "float"}, @@ -2971,7 +2992,7 @@ def __init__( self, *, vector: list[float], - k_nearest_neighbors: Optional[int] = None, + k: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -2982,8 +3003,8 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int + :keyword k: Number of nearest neighbors to return as top hits. + :paramtype k: int :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :paramtype fields: str @@ -3019,7 +3040,7 @@ def __init__( :paramtype vector: list[float] """ super().__init__( - k_nearest_neighbors=k_nearest_neighbors, + k=k, fields=fields, exhaustive=exhaustive, oversampling=oversampling, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py index 8297cadb55f0..4fcd7d8d579c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -321,6 +321,8 @@ class ScoringStatistics(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The scoring statistics will be calculated locally for lower latency.""" GLOBAL = "global" """The scoring statistics will be calculated globally for more consistent scoring.""" + GLOBAL_ENUM = "global" + """The scoring statistics will be calculated globally for more consistent scoring.""" class SearchMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py index de96b0faddf2..749e5a6ba198 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py index 64518595356a..75b753900867 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py @@ -1,7 +1,7 @@ # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -37,7 +37,7 @@ def build_count_request(*, x_ms_client_request_id: Optional[str] = None, **kwarg _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -88,12 +88,13 @@ def build_search_get_request( speller: Optional[Union[str, _models.QuerySpellerType]] = None, semantic_fields: Optional[list[str]] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -173,6 +174,10 @@ def build_search_get_request( _headers["x-ms-query-source-authorization"] = _SERIALIZER.header( "x_ms_query_source_authorization", x_ms_query_source_authorization, "str" ) + if x_ms_enable_elevated_read is not None: + _headers["x-ms-enable-elevated-read"] = _SERIALIZER.header( + "x_ms_enable_elevated_read", x_ms_enable_elevated_read, "bool" + ) _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -182,12 +187,13 @@ def build_search_post_request( *, x_ms_client_request_id: Optional[str] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -204,6 +210,10 @@ def build_search_post_request( _headers["x-ms-query-source-authorization"] = _SERIALIZER.header( "x_ms_query_source_authorization", x_ms_query_source_authorization, "str" ) + if x_ms_enable_elevated_read is not None: + _headers["x-ms-enable-elevated-read"] = _SERIALIZER.header( + "x_ms_enable_elevated_read", x_ms_enable_elevated_read, "bool" + ) if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -217,12 +227,13 @@ def build_get_request( selected_fields: Optional[list[str]] = None, x_ms_client_request_id: Optional[str] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -245,6 +256,10 @@ def build_get_request( _headers["x-ms-query-source-authorization"] = _SERIALIZER.header( "x_ms_query_source_authorization", x_ms_query_source_authorization, "str" ) + if x_ms_enable_elevated_read is not None: + _headers["x-ms-enable-elevated-read"] = _SERIALIZER.header( + "x_ms_enable_elevated_read", x_ms_enable_elevated_read, "bool" + ) _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -269,7 +284,7 @@ def build_suggest_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -310,7 +325,7 @@ def build_suggest_post_request(*, x_ms_client_request_id: Optional[str] = None, _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -334,7 +349,7 @@ def build_index_request(*, x_ms_client_request_id: Optional[str] = None, **kwarg _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -372,7 +387,7 @@ def build_autocomplete_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -411,7 +426,7 @@ def build_autocomplete_post_request(*, x_ms_client_request_id: Optional[str] = N _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -502,7 +517,10 @@ def count(self, request_options: Optional[_models.RequestOptions] = None, **kwar if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("int", pipeline_response.http_response) @@ -517,6 +535,7 @@ def search_get( self, search_text: Optional[str] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, search_options: Optional[_models.SearchOptions] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any @@ -533,6 +552,9 @@ def search_get( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param search_options: Parameter group. Default value is None. :type search_options: ~azure.search.documents.models.SearchOptions :param request_options: Parameter group. Default value is None. @@ -651,6 +673,7 @@ def search_get( speller=_speller, semantic_fields=_semantic_fields, x_ms_query_source_authorization=x_ms_query_source_authorization, + x_ms_enable_elevated_read=x_ms_enable_elevated_read, api_version=api_version, headers=_headers, params=_params, @@ -670,7 +693,10 @@ def search_get( if response.status_code not in [200, 206]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) @@ -685,6 +711,7 @@ def search_post( self, search_request: _models.SearchRequest, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", @@ -701,6 +728,9 @@ def search_post( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -716,6 +746,7 @@ def search_post( self, search_request: IO[bytes], x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", @@ -732,6 +763,9 @@ def search_post( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -747,6 +781,7 @@ def search_post( self, search_request: Union[_models.SearchRequest, IO[bytes]], x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any ) -> _models.SearchDocumentsResult: @@ -762,6 +797,9 @@ def search_post( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :return: SearchDocumentsResult or the result of cls(response) @@ -797,6 +835,7 @@ def search_post( _request = build_search_post_request( x_ms_client_request_id=_x_ms_client_request_id, x_ms_query_source_authorization=x_ms_query_source_authorization, + x_ms_enable_elevated_read=x_ms_enable_elevated_read, api_version=api_version, content_type=content_type, json=_json, @@ -819,7 +858,10 @@ def search_post( if response.status_code not in [200, 206]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) @@ -835,6 +877,7 @@ def get( key: str, selected_fields: Optional[list[str]] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any ) -> dict[str, Any]: @@ -852,6 +895,9 @@ def get( executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str + :param x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :type x_ms_enable_elevated_read: bool :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.models.RequestOptions :return: dict mapping str to any or the result of cls(response) @@ -881,6 +927,7 @@ def get( selected_fields=selected_fields, x_ms_client_request_id=_x_ms_client_request_id, x_ms_query_source_authorization=x_ms_query_source_authorization, + x_ms_enable_elevated_read=x_ms_enable_elevated_read, api_version=api_version, headers=_headers, params=_params, @@ -900,7 +947,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("{object}", pipeline_response.http_response) @@ -1007,7 +1057,10 @@ def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) @@ -1140,7 +1193,10 @@ def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) @@ -1273,7 +1329,10 @@ def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("IndexDocumentsResult", pipeline_response.http_response) @@ -1376,7 +1435,10 @@ def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) @@ -1509,7 +1571,10 @@ def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py index 432df9673d5b..7b46cf50f5e8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py @@ -183,6 +183,7 @@ def search( debug: Optional[Union[str, QueryDebugMode]] = None, hybrid_search: Optional[HybridSearch] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, **kwargs: Any ) -> SearchItemPaged[Dict]: # pylint:disable=too-many-locals, disable=redefined-builtin @@ -314,6 +315,9 @@ def search( executed. This token is used to enforce security restrictions on documents. Default value is None. :paramtype x_ms_query_source_authorization: str + :keyword x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :paramtype x_ms_enable_elevated_read: bool :return: List of search results. :rtype: SearchItemPaged[dict] @@ -403,6 +407,7 @@ def search( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) kwargs["x_ms_query_source_authorization"] = x_ms_query_source_authorization + kwargs["x_ms_enable_elevated_read"] = x_ms_enable_elevated_read kwargs["api_version"] = self._api_version return SearchItemPaged(self._client, query, kwargs, page_iterator_class=SearchPageIterator) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/__init__.py deleted file mode 100644 index 39e57d0fa7c6..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - - -from ._models_py3 import ( # type: ignore - ErrorAdditionalInfo, - ErrorDetail, - ErrorResponse, - KnowledgeAgentActivityRecord, - KnowledgeAgentAzureBlobActivityArguments, - KnowledgeAgentAzureBlobActivityRecord, - KnowledgeAgentAzureBlobReference, - KnowledgeAgentMessage, - KnowledgeAgentMessageContent, - KnowledgeAgentMessageImageContent, - KnowledgeAgentMessageImageContentImage, - KnowledgeAgentMessageTextContent, - KnowledgeAgentModelAnswerSynthesisActivityRecord, - KnowledgeAgentModelQueryPlanningActivityRecord, - KnowledgeAgentReference, - KnowledgeAgentRetrievalActivityRecord, - KnowledgeAgentRetrievalRequest, - KnowledgeAgentRetrievalResponse, - KnowledgeAgentSearchIndexActivityArguments, - KnowledgeAgentSearchIndexActivityRecord, - KnowledgeAgentSearchIndexReference, - KnowledgeAgentSemanticRerankerActivityRecord, - KnowledgeSourceParams, - RequestOptions, - SearchIndexKnowledgeSourceParams, -) - -from ._knowledge_agent_retrieval_client_enums import ( # type: ignore - KnowledgeAgentMessageContentType, - KnowledgeSourceKind, -) -from ._patch import __all__ as _patch_all -from ._patch import * -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "ErrorAdditionalInfo", - "ErrorDetail", - "ErrorResponse", - "KnowledgeAgentActivityRecord", - "KnowledgeAgentAzureBlobActivityArguments", - "KnowledgeAgentAzureBlobActivityRecord", - "KnowledgeAgentAzureBlobReference", - "KnowledgeAgentMessage", - "KnowledgeAgentMessageContent", - "KnowledgeAgentMessageImageContent", - "KnowledgeAgentMessageImageContentImage", - "KnowledgeAgentMessageTextContent", - "KnowledgeAgentModelAnswerSynthesisActivityRecord", - "KnowledgeAgentModelQueryPlanningActivityRecord", - "KnowledgeAgentReference", - "KnowledgeAgentRetrievalActivityRecord", - "KnowledgeAgentRetrievalRequest", - "KnowledgeAgentRetrievalResponse", - "KnowledgeAgentSearchIndexActivityArguments", - "KnowledgeAgentSearchIndexActivityRecord", - "KnowledgeAgentSearchIndexReference", - "KnowledgeAgentSemanticRerankerActivityRecord", - "KnowledgeSourceParams", - "RequestOptions", - "SearchIndexKnowledgeSourceParams", - "KnowledgeAgentMessageContentType", - "KnowledgeSourceKind", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_knowledge_agent_retrieval_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_knowledge_agent_retrieval_client_enums.py deleted file mode 100644 index ceabcf4a87b1..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_knowledge_agent_retrieval_client_enums.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum -from azure.core import CaseInsensitiveEnumMeta - - -class KnowledgeAgentMessageContentType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The type of message content.""" - - TEXT = "text" - """Text message content kind.""" - IMAGE = "image" - """Image message content kind.""" - - -class KnowledgeSourceKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The kind of the knowledge source.""" - - SEARCH_INDEX = "searchIndex" - """A knowledge source that reads data from a Search Index.""" - AZURE_BLOB = "azureBlob" - """A knowledge source that read and ingest data from Azure Blob Storage to a Search Index.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_models_py3.py deleted file mode 100644 index 8d63f3d827f2..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_models_py3.py +++ /dev/null @@ -1,1112 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Any, Optional, TYPE_CHECKING, MutableMapping - -from .._utils import serialization as _serialization - -JSON = MutableMapping[str, Any] - -if TYPE_CHECKING: - from .. import models as _models - - -class ErrorAdditionalInfo(_serialization.Model): - """The resource management error additional info. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar type: The additional info type. - :vartype type: str - :ivar info: The additional info. - :vartype info: JSON - """ - - _validation = { - "type": {"readonly": True}, - "info": {"readonly": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "info": {"key": "info", "type": "object"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.type: Optional[str] = None - self.info: Optional[JSON] = None - - -class ErrorDetail(_serialization.Model): - """The error detail. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar code: The error code. - :vartype code: str - :ivar message: The error message. - :vartype message: str - :ivar target: The error target. - :vartype target: str - :ivar details: The error details. - :vartype details: list[~azure.search.documents.agent.models.ErrorDetail] - :ivar additional_info: The error additional info. - :vartype additional_info: list[~azure.search.documents.agent.models.ErrorAdditionalInfo] - """ - - _validation = { - "code": {"readonly": True}, - "message": {"readonly": True}, - "target": {"readonly": True}, - "details": {"readonly": True}, - "additional_info": {"readonly": True}, - } - - _attribute_map = { - "code": {"key": "code", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "target": {"key": "target", "type": "str"}, - "details": {"key": "details", "type": "[ErrorDetail]"}, - "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.code: Optional[str] = None - self.message: Optional[str] = None - self.target: Optional[str] = None - self.details: Optional[list["_models.ErrorDetail"]] = None - self.additional_info: Optional[list["_models.ErrorAdditionalInfo"]] = None - - -class ErrorResponse(_serialization.Model): - """Common error response for all Azure Resource Manager APIs to return error details for failed - operations. (This also follows the OData error response format.). - - :ivar error: The error object. - :vartype error: ~azure.search.documents.agent.models.ErrorDetail - """ - - _attribute_map = { - "error": {"key": "error", "type": "ErrorDetail"}, - } - - def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None: - """ - :keyword error: The error object. - :paramtype error: ~azure.search.documents.agent.models.ErrorDetail - """ - super().__init__(**kwargs) - self.error = error - - -class KnowledgeAgentActivityRecord(_serialization.Model): - """Base type for activity records. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - KnowledgeAgentRetrievalActivityRecord, KnowledgeAgentModelAnswerSynthesisActivityRecord, - KnowledgeAgentModelQueryPlanningActivityRecord, KnowledgeAgentSemanticRerankerActivityRecord - - All required parameters must be populated in order to send to server. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar type: The type of the activity record. Required. - :vartype type: str - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - """ - - _validation = { - "id": {"required": True}, - "type": {"required": True}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "type": {"key": "type", "type": "str"}, - "elapsed_ms": {"key": "elapsedMs", "type": "int"}, - } - - _subtype_map = { - "type": { - "KnowledgeAgentRetrievalActivityRecord": "KnowledgeAgentRetrievalActivityRecord", - "modelAnswerSynthesis": "KnowledgeAgentModelAnswerSynthesisActivityRecord", - "modelQueryPlanning": "KnowledgeAgentModelQueryPlanningActivityRecord", - "semanticReranker": "KnowledgeAgentSemanticRerankerActivityRecord", - } - } - - def __init__( - self, *, id: int, elapsed_ms: Optional[int] = None, **kwargs: Any # pylint: disable=redefined-builtin - ) -> None: - """ - :keyword id: The ID of the activity record. Required. - :paramtype id: int - :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :paramtype elapsed_ms: int - """ - super().__init__(**kwargs) - self.id = id - self.type: Optional[str] = None - self.elapsed_ms = elapsed_ms - - -class KnowledgeAgentAzureBlobActivityArguments(_serialization.Model): - """Represents the arguments the azure blob retrieval activity was run with. - - :ivar search: The search string used to query blob contents. - :vartype search: str - """ - - _attribute_map = { - "search": {"key": "search", "type": "str"}, - } - - def __init__(self, *, search: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword search: The search string used to query blob contents. - :paramtype search: str - """ - super().__init__(**kwargs) - self.search = search - - -class KnowledgeAgentRetrievalActivityRecord(KnowledgeAgentActivityRecord): - """Represents a retrieval activity record. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - KnowledgeAgentAzureBlobActivityRecord, KnowledgeAgentSearchIndexActivityRecord - - All required parameters must be populated in order to send to server. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar type: The type of the activity record. Required. - :vartype type: str - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar knowledge_source_name: The knowledge source for the retrieval activity. - :vartype knowledge_source_name: str - :ivar query_time: The query time for this retrieval activity. - :vartype query_time: ~datetime.datetime - :ivar count: The count of documents retrieved that were sufficiently relevant to pass the - reranker threshold. - :vartype count: int - """ - - _validation = { - "id": {"required": True}, - "type": {"required": True}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "type": {"key": "type", "type": "str"}, - "elapsed_ms": {"key": "elapsedMs", "type": "int"}, - "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, - "query_time": {"key": "queryTime", "type": "iso-8601"}, - "count": {"key": "count", "type": "int"}, - } - - _subtype_map = { - "type": { - "azureBlob": "KnowledgeAgentAzureBlobActivityRecord", - "searchIndex": "KnowledgeAgentSearchIndexActivityRecord", - } - } - - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - knowledge_source_name: Optional[str] = None, - query_time: Optional[datetime.datetime] = None, - count: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the activity record. Required. - :paramtype id: int - :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :paramtype elapsed_ms: int - :keyword knowledge_source_name: The knowledge source for the retrieval activity. - :paramtype knowledge_source_name: str - :keyword query_time: The query time for this retrieval activity. - :paramtype query_time: ~datetime.datetime - :keyword count: The count of documents retrieved that were sufficiently relevant to pass the - reranker threshold. - :paramtype count: int - """ - super().__init__(id=id, elapsed_ms=elapsed_ms, **kwargs) - self.type: str = "KnowledgeAgentRetrievalActivityRecord" - self.knowledge_source_name = knowledge_source_name - self.query_time = query_time - self.count = count - - -class KnowledgeAgentAzureBlobActivityRecord(KnowledgeAgentRetrievalActivityRecord): - """Represents a azure blob retrieval activity record. - - All required parameters must be populated in order to send to server. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar type: The type of the activity record. Required. - :vartype type: str - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar knowledge_source_name: The knowledge source for the retrieval activity. - :vartype knowledge_source_name: str - :ivar query_time: The query time for this retrieval activity. - :vartype query_time: ~datetime.datetime - :ivar count: The count of documents retrieved that were sufficiently relevant to pass the - reranker threshold. - :vartype count: int - :ivar azure_blob_arguments: The azure blob arguments for the retrieval activity. - :vartype azure_blob_arguments: - ~azure.search.documents.agent.models.KnowledgeAgentAzureBlobActivityArguments - """ - - _validation = { - "id": {"required": True}, - "type": {"required": True}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "type": {"key": "type", "type": "str"}, - "elapsed_ms": {"key": "elapsedMs", "type": "int"}, - "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, - "query_time": {"key": "queryTime", "type": "iso-8601"}, - "count": {"key": "count", "type": "int"}, - "azure_blob_arguments": {"key": "azureBlobArguments", "type": "KnowledgeAgentAzureBlobActivityArguments"}, - } - - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - knowledge_source_name: Optional[str] = None, - query_time: Optional[datetime.datetime] = None, - count: Optional[int] = None, - azure_blob_arguments: Optional["_models.KnowledgeAgentAzureBlobActivityArguments"] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the activity record. Required. - :paramtype id: int - :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :paramtype elapsed_ms: int - :keyword knowledge_source_name: The knowledge source for the retrieval activity. - :paramtype knowledge_source_name: str - :keyword query_time: The query time for this retrieval activity. - :paramtype query_time: ~datetime.datetime - :keyword count: The count of documents retrieved that were sufficiently relevant to pass the - reranker threshold. - :paramtype count: int - :keyword azure_blob_arguments: The azure blob arguments for the retrieval activity. - :paramtype azure_blob_arguments: - ~azure.search.documents.agent.models.KnowledgeAgentAzureBlobActivityArguments - """ - super().__init__( - id=id, - elapsed_ms=elapsed_ms, - knowledge_source_name=knowledge_source_name, - query_time=query_time, - count=count, - **kwargs - ) - self.type: str = "azureBlob" - self.azure_blob_arguments = azure_blob_arguments - - -class KnowledgeAgentReference(_serialization.Model): - """Base type for references. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - KnowledgeAgentAzureBlobReference, KnowledgeAgentSearchIndexReference - - All required parameters must be populated in order to send to server. - - :ivar type: The type of the reference. Required. - :vartype type: str - :ivar id: The ID of the reference. Required. - :vartype id: str - :ivar activity_source: The source activity ID for the reference. Required. - :vartype activity_source: int - :ivar source_data: Dictionary of :code:``. - :vartype source_data: dict[str, any] - :ivar reranker_score: The reranker score for the document reference. - :vartype reranker_score: float - """ - - _validation = { - "type": {"required": True}, - "id": {"required": True}, - "activity_source": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "id": {"key": "id", "type": "str"}, - "activity_source": {"key": "activitySource", "type": "int"}, - "source_data": {"key": "sourceData", "type": "{object}"}, - "reranker_score": {"key": "rerankerScore", "type": "float"}, - } - - _subtype_map = { - "type": {"azureBlob": "KnowledgeAgentAzureBlobReference", "searchIndex": "KnowledgeAgentSearchIndexReference"} - } - - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - activity_source: int, - source_data: Optional[dict[str, Any]] = None, - reranker_score: Optional[float] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the reference. Required. - :paramtype id: str - :keyword activity_source: The source activity ID for the reference. Required. - :paramtype activity_source: int - :keyword source_data: Dictionary of :code:``. - :paramtype source_data: dict[str, any] - :keyword reranker_score: The reranker score for the document reference. - :paramtype reranker_score: float - """ - super().__init__(**kwargs) - self.type: Optional[str] = None - self.id = id - self.activity_source = activity_source - self.source_data = source_data - self.reranker_score = reranker_score - - -class KnowledgeAgentAzureBlobReference(KnowledgeAgentReference): - """Represents an Azure Blob Storage document reference. - - All required parameters must be populated in order to send to server. - - :ivar type: The type of the reference. Required. - :vartype type: str - :ivar id: The ID of the reference. Required. - :vartype id: str - :ivar activity_source: The source activity ID for the reference. Required. - :vartype activity_source: int - :ivar source_data: Dictionary of :code:``. - :vartype source_data: dict[str, any] - :ivar reranker_score: The reranker score for the document reference. - :vartype reranker_score: float - :ivar blob_url: The blob URL for the reference. - :vartype blob_url: str - """ - - _validation = { - "type": {"required": True}, - "id": {"required": True}, - "activity_source": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "id": {"key": "id", "type": "str"}, - "activity_source": {"key": "activitySource", "type": "int"}, - "source_data": {"key": "sourceData", "type": "{object}"}, - "reranker_score": {"key": "rerankerScore", "type": "float"}, - "blob_url": {"key": "blobUrl", "type": "str"}, - } - - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - activity_source: int, - source_data: Optional[dict[str, Any]] = None, - reranker_score: Optional[float] = None, - blob_url: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the reference. Required. - :paramtype id: str - :keyword activity_source: The source activity ID for the reference. Required. - :paramtype activity_source: int - :keyword source_data: Dictionary of :code:``. - :paramtype source_data: dict[str, any] - :keyword reranker_score: The reranker score for the document reference. - :paramtype reranker_score: float - :keyword blob_url: The blob URL for the reference. - :paramtype blob_url: str - """ - super().__init__( - id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs - ) - self.type: str = "azureBlob" - self.blob_url = blob_url - - -class KnowledgeAgentMessage(_serialization.Model): - """The natural language message style object. - - All required parameters must be populated in order to send to server. - - :ivar role: The role of the tool response. - :vartype role: str - :ivar content: Required. - :vartype content: list[~azure.search.documents.agent.models.KnowledgeAgentMessageContent] - """ - - _validation = { - "content": {"required": True}, - } - - _attribute_map = { - "role": {"key": "role", "type": "str"}, - "content": {"key": "content", "type": "[KnowledgeAgentMessageContent]"}, - } - - def __init__( - self, *, content: list["_models.KnowledgeAgentMessageContent"], role: Optional[str] = None, **kwargs: Any - ) -> None: - """ - :keyword role: The role of the tool response. - :paramtype role: str - :keyword content: Required. - :paramtype content: list[~azure.search.documents.agent.models.KnowledgeAgentMessageContent] - """ - super().__init__(**kwargs) - self.role = role - self.content = content - - -class KnowledgeAgentMessageContent(_serialization.Model): - """Specifies the type of the message content. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - KnowledgeAgentMessageImageContent, KnowledgeAgentMessageTextContent - - All required parameters must be populated in order to send to server. - - :ivar type: The type of the message. Required. Known values are: "text" and "image". - :vartype type: str or ~azure.search.documents.agent.models.KnowledgeAgentMessageContentType - """ - - _validation = { - "type": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - } - - _subtype_map = {"type": {"image": "KnowledgeAgentMessageImageContent", "text": "KnowledgeAgentMessageTextContent"}} - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.type: Optional[str] = None - - -class KnowledgeAgentMessageImageContent(KnowledgeAgentMessageContent): - """Text message type. - - All required parameters must be populated in order to send to server. - - :ivar type: The type of the message. Required. Known values are: "text" and "image". - :vartype type: str or ~azure.search.documents.agent.models.KnowledgeAgentMessageContentType - :ivar image: Required. - :vartype image: ~azure.search.documents.agent.models.KnowledgeAgentMessageImageContentImage - """ - - _validation = { - "type": {"required": True}, - "image": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "image": {"key": "image", "type": "KnowledgeAgentMessageImageContentImage"}, - } - - def __init__(self, *, image: "_models.KnowledgeAgentMessageImageContentImage", **kwargs: Any) -> None: - """ - :keyword image: Required. - :paramtype image: ~azure.search.documents.agent.models.KnowledgeAgentMessageImageContentImage - """ - super().__init__(**kwargs) - self.type: str = "image" - self.image = image - - -class KnowledgeAgentMessageImageContentImage(_serialization.Model): - """KnowledgeAgentMessageImageContentImage. - - All required parameters must be populated in order to send to server. - - :ivar url: The url of the image. Required. - :vartype url: str - """ - - _validation = { - "url": {"required": True}, - } - - _attribute_map = { - "url": {"key": "url", "type": "str"}, - } - - def __init__(self, *, url: str, **kwargs: Any) -> None: - """ - :keyword url: The url of the image. Required. - :paramtype url: str - """ - super().__init__(**kwargs) - self.url = url - - -class KnowledgeAgentMessageTextContent(KnowledgeAgentMessageContent): - """Text message type. - - All required parameters must be populated in order to send to server. - - :ivar type: The type of the message. Required. Known values are: "text" and "image". - :vartype type: str or ~azure.search.documents.agent.models.KnowledgeAgentMessageContentType - :ivar text: Required. - :vartype text: str - """ - - _validation = { - "type": {"required": True}, - "text": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "text": {"key": "text", "type": "str"}, - } - - def __init__(self, *, text: str, **kwargs: Any) -> None: - """ - :keyword text: Required. - :paramtype text: str - """ - super().__init__(**kwargs) - self.type: str = "text" - self.text = text - - -class KnowledgeAgentModelAnswerSynthesisActivityRecord(KnowledgeAgentActivityRecord): # pylint: disable=name-too-long - """Represents an LLM answer synthesis activity record. - - All required parameters must be populated in order to send to server. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar type: The type of the activity record. Required. - :vartype type: str - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar input_tokens: The number of input tokens for the LLM answer synthesis activity. - :vartype input_tokens: int - :ivar output_tokens: The number of output tokens for the LLM answer synthesis activity. - :vartype output_tokens: int - """ - - _validation = { - "id": {"required": True}, - "type": {"required": True}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "type": {"key": "type", "type": "str"}, - "elapsed_ms": {"key": "elapsedMs", "type": "int"}, - "input_tokens": {"key": "inputTokens", "type": "int"}, - "output_tokens": {"key": "outputTokens", "type": "int"}, - } - - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - input_tokens: Optional[int] = None, - output_tokens: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the activity record. Required. - :paramtype id: int - :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :paramtype elapsed_ms: int - :keyword input_tokens: The number of input tokens for the LLM answer synthesis activity. - :paramtype input_tokens: int - :keyword output_tokens: The number of output tokens for the LLM answer synthesis activity. - :paramtype output_tokens: int - """ - super().__init__(id=id, elapsed_ms=elapsed_ms, **kwargs) - self.type: str = "modelAnswerSynthesis" - self.input_tokens = input_tokens - self.output_tokens = output_tokens - - -class KnowledgeAgentModelQueryPlanningActivityRecord(KnowledgeAgentActivityRecord): # pylint: disable=name-too-long - """Represents an LLM query planning activity record. - - All required parameters must be populated in order to send to server. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar type: The type of the activity record. Required. - :vartype type: str - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar input_tokens: The number of input tokens for the LLM query planning activity. - :vartype input_tokens: int - :ivar output_tokens: The number of output tokens for the LLM query planning activity. - :vartype output_tokens: int - """ - - _validation = { - "id": {"required": True}, - "type": {"required": True}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "type": {"key": "type", "type": "str"}, - "elapsed_ms": {"key": "elapsedMs", "type": "int"}, - "input_tokens": {"key": "inputTokens", "type": "int"}, - "output_tokens": {"key": "outputTokens", "type": "int"}, - } - - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - input_tokens: Optional[int] = None, - output_tokens: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the activity record. Required. - :paramtype id: int - :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :paramtype elapsed_ms: int - :keyword input_tokens: The number of input tokens for the LLM query planning activity. - :paramtype input_tokens: int - :keyword output_tokens: The number of output tokens for the LLM query planning activity. - :paramtype output_tokens: int - """ - super().__init__(id=id, elapsed_ms=elapsed_ms, **kwargs) - self.type: str = "modelQueryPlanning" - self.input_tokens = input_tokens - self.output_tokens = output_tokens - - -class KnowledgeAgentRetrievalRequest(_serialization.Model): - """The input contract for the retrieval request. - - All required parameters must be populated in order to send to server. - - :ivar messages: Required. - :vartype messages: list[~azure.search.documents.agent.models.KnowledgeAgentMessage] - :ivar knowledge_source_params: - :vartype knowledge_source_params: - list[~azure.search.documents.agent.models.KnowledgeSourceParams] - """ - - _validation = { - "messages": {"required": True}, - } - - _attribute_map = { - "messages": {"key": "messages", "type": "[KnowledgeAgentMessage]"}, - "knowledge_source_params": {"key": "knowledgeSourceParams", "type": "[KnowledgeSourceParams]"}, - } - - def __init__( - self, - *, - messages: list["_models.KnowledgeAgentMessage"], - knowledge_source_params: Optional[list["_models.KnowledgeSourceParams"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword messages: Required. - :paramtype messages: list[~azure.search.documents.agent.models.KnowledgeAgentMessage] - :keyword knowledge_source_params: - :paramtype knowledge_source_params: - list[~azure.search.documents.agent.models.KnowledgeSourceParams] - """ - super().__init__(**kwargs) - self.messages = messages - self.knowledge_source_params = knowledge_source_params - - -class KnowledgeAgentRetrievalResponse(_serialization.Model): - """The output contract for the retrieval response. - - :ivar response: - :vartype response: list[~azure.search.documents.agent.models.KnowledgeAgentMessage] - :ivar activity: The activity records for tracking progress and billing implications. - :vartype activity: list[~azure.search.documents.agent.models.KnowledgeAgentActivityRecord] - :ivar references: The references for the retrieval data used in the response. - :vartype references: list[~azure.search.documents.agent.models.KnowledgeAgentReference] - """ - - _attribute_map = { - "response": {"key": "response", "type": "[KnowledgeAgentMessage]"}, - "activity": {"key": "activity", "type": "[KnowledgeAgentActivityRecord]"}, - "references": {"key": "references", "type": "[KnowledgeAgentReference]"}, - } - - def __init__( - self, - *, - response: Optional[list["_models.KnowledgeAgentMessage"]] = None, - activity: Optional[list["_models.KnowledgeAgentActivityRecord"]] = None, - references: Optional[list["_models.KnowledgeAgentReference"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword response: - :paramtype response: list[~azure.search.documents.agent.models.KnowledgeAgentMessage] - :keyword activity: The activity records for tracking progress and billing implications. - :paramtype activity: list[~azure.search.documents.agent.models.KnowledgeAgentActivityRecord] - :keyword references: The references for the retrieval data used in the response. - :paramtype references: list[~azure.search.documents.agent.models.KnowledgeAgentReference] - """ - super().__init__(**kwargs) - self.response = response - self.activity = activity - self.references = references - - -class KnowledgeAgentSearchIndexActivityArguments(_serialization.Model): # pylint: disable=name-too-long - """Represents the arguments the search index retrieval activity was run with. - - :ivar search: The search string used to query the search index. - :vartype search: str - :ivar filter: The filter string. - :vartype filter: str - """ - - _attribute_map = { - "search": {"key": "search", "type": "str"}, - "filter": {"key": "filter", "type": "str"}, - } - - def __init__( - self, - *, - search: Optional[str] = None, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - **kwargs: Any - ) -> None: - """ - :keyword search: The search string used to query the search index. - :paramtype search: str - :keyword filter: The filter string. - :paramtype filter: str - """ - super().__init__(**kwargs) - self.search = search - self.filter = filter - - -class KnowledgeAgentSearchIndexActivityRecord(KnowledgeAgentRetrievalActivityRecord): - """Represents a search index retrieval activity record. - - All required parameters must be populated in order to send to server. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar type: The type of the activity record. Required. - :vartype type: str - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar knowledge_source_name: The knowledge source for the retrieval activity. - :vartype knowledge_source_name: str - :ivar query_time: The query time for this retrieval activity. - :vartype query_time: ~datetime.datetime - :ivar count: The count of documents retrieved that were sufficiently relevant to pass the - reranker threshold. - :vartype count: int - :ivar search_index_arguments: The search index arguments for the retrieval activity. - :vartype search_index_arguments: - ~azure.search.documents.agent.models.KnowledgeAgentSearchIndexActivityArguments - """ - - _validation = { - "id": {"required": True}, - "type": {"required": True}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "type": {"key": "type", "type": "str"}, - "elapsed_ms": {"key": "elapsedMs", "type": "int"}, - "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, - "query_time": {"key": "queryTime", "type": "iso-8601"}, - "count": {"key": "count", "type": "int"}, - "search_index_arguments": {"key": "searchIndexArguments", "type": "KnowledgeAgentSearchIndexActivityArguments"}, - } - - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - knowledge_source_name: Optional[str] = None, - query_time: Optional[datetime.datetime] = None, - count: Optional[int] = None, - search_index_arguments: Optional["_models.KnowledgeAgentSearchIndexActivityArguments"] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the activity record. Required. - :paramtype id: int - :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :paramtype elapsed_ms: int - :keyword knowledge_source_name: The knowledge source for the retrieval activity. - :paramtype knowledge_source_name: str - :keyword query_time: The query time for this retrieval activity. - :paramtype query_time: ~datetime.datetime - :keyword count: The count of documents retrieved that were sufficiently relevant to pass the - reranker threshold. - :paramtype count: int - :keyword search_index_arguments: The search index arguments for the retrieval activity. - :paramtype search_index_arguments: - ~azure.search.documents.agent.models.KnowledgeAgentSearchIndexActivityArguments - """ - super().__init__( - id=id, - elapsed_ms=elapsed_ms, - knowledge_source_name=knowledge_source_name, - query_time=query_time, - count=count, - **kwargs - ) - self.type: str = "searchIndex" - self.search_index_arguments = search_index_arguments - - -class KnowledgeAgentSearchIndexReference(KnowledgeAgentReference): - """Represents an Azure Search document reference. - - All required parameters must be populated in order to send to server. - - :ivar type: The type of the reference. Required. - :vartype type: str - :ivar id: The ID of the reference. Required. - :vartype id: str - :ivar activity_source: The source activity ID for the reference. Required. - :vartype activity_source: int - :ivar source_data: Dictionary of :code:``. - :vartype source_data: dict[str, any] - :ivar reranker_score: The reranker score for the document reference. - :vartype reranker_score: float - :ivar doc_key: The document key for the reference. - :vartype doc_key: str - """ - - _validation = { - "type": {"required": True}, - "id": {"required": True}, - "activity_source": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "id": {"key": "id", "type": "str"}, - "activity_source": {"key": "activitySource", "type": "int"}, - "source_data": {"key": "sourceData", "type": "{object}"}, - "reranker_score": {"key": "rerankerScore", "type": "float"}, - "doc_key": {"key": "docKey", "type": "str"}, - } - - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - activity_source: int, - source_data: Optional[dict[str, Any]] = None, - reranker_score: Optional[float] = None, - doc_key: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the reference. Required. - :paramtype id: str - :keyword activity_source: The source activity ID for the reference. Required. - :paramtype activity_source: int - :keyword source_data: Dictionary of :code:``. - :paramtype source_data: dict[str, any] - :keyword reranker_score: The reranker score for the document reference. - :paramtype reranker_score: float - :keyword doc_key: The document key for the reference. - :paramtype doc_key: str - """ - super().__init__( - id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs - ) - self.type: str = "searchIndex" - self.doc_key = doc_key - - -class KnowledgeAgentSemanticRerankerActivityRecord(KnowledgeAgentActivityRecord): # pylint: disable=name-too-long - """Represents a semantic ranker activity record. - - All required parameters must be populated in order to send to server. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar type: The type of the activity record. Required. - :vartype type: str - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar input_tokens: The number of input tokens for the semantic ranker activity. - :vartype input_tokens: int - """ - - _validation = { - "id": {"required": True}, - "type": {"required": True}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "type": {"key": "type", "type": "str"}, - "elapsed_ms": {"key": "elapsedMs", "type": "int"}, - "input_tokens": {"key": "inputTokens", "type": "int"}, - } - - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - input_tokens: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword id: The ID of the activity record. Required. - :paramtype id: int - :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :paramtype elapsed_ms: int - :keyword input_tokens: The number of input tokens for the semantic ranker activity. - :paramtype input_tokens: int - """ - super().__init__(id=id, elapsed_ms=elapsed_ms, **kwargs) - self.type: str = "semanticReranker" - self.input_tokens = input_tokens - - -class KnowledgeSourceParams(_serialization.Model): - """KnowledgeSourceParams. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - SearchIndexKnowledgeSourceParams - - All required parameters must be populated in order to send to server. - - :ivar knowledge_source_name: The name of the index the params apply to. Required. - :vartype knowledge_source_name: str - :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex" and - "azureBlob". - :vartype kind: str or ~azure.search.documents.agent.models.KnowledgeSourceKind - """ - - _validation = { - "knowledge_source_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - } - - _subtype_map = {"kind": {"searchIndex": "SearchIndexKnowledgeSourceParams"}} - - def __init__(self, *, knowledge_source_name: str, **kwargs: Any) -> None: - """ - :keyword knowledge_source_name: The name of the index the params apply to. Required. - :paramtype knowledge_source_name: str - """ - super().__init__(**kwargs) - self.knowledge_source_name = knowledge_source_name - self.kind: Optional[str] = None - - -class RequestOptions(_serialization.Model): - """Parameter group. - - :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :vartype x_ms_client_request_id: str - """ - - _attribute_map = { - "x_ms_client_request_id": {"key": "x-ms-client-request-id", "type": "str"}, - } - - def __init__(self, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :paramtype x_ms_client_request_id: str - """ - super().__init__(**kwargs) - self.x_ms_client_request_id = x_ms_client_request_id - - -class SearchIndexKnowledgeSourceParams(KnowledgeSourceParams): - """Specifies runtime parameters for a search index knowledge source. - - All required parameters must be populated in order to send to server. - - :ivar knowledge_source_name: The name of the index the params apply to. Required. - :vartype knowledge_source_name: str - :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex" and - "azureBlob". - :vartype kind: str or ~azure.search.documents.agent.models.KnowledgeSourceKind - :ivar filter_add_on: A filter condition applied to the index (e.g., 'State eq VA'). - :vartype filter_add_on: str - """ - - _validation = { - "knowledge_source_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "filter_add_on": {"key": "filterAddOn", "type": "str"}, - } - - def __init__(self, *, knowledge_source_name: str, filter_add_on: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword knowledge_source_name: The name of the index the params apply to. Required. - :paramtype knowledge_source_name: str - :keyword filter_add_on: A filter condition applied to the index (e.g., 'State eq VA'). - :paramtype filter_add_on: str - """ - super().__init__(knowledge_source_name=knowledge_source_name, **kwargs) - self.kind: str = "searchIndex" - self.filter_add_on = filter_add_on diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/agent/models/__init__.py deleted file mode 100644 index 50cfc2635a57..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/models/__init__.py +++ /dev/null @@ -1,78 +0,0 @@ -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -from .._generated.models import ( - KnowledgeAgentActivityRecord, - KnowledgeAgentAzureBlobActivityArguments, - KnowledgeAgentAzureBlobActivityRecord, - KnowledgeAgentAzureBlobReference, - KnowledgeAgentMessage, - KnowledgeAgentMessageContent, - KnowledgeAgentMessageContentType, - KnowledgeAgentMessageImageContent, - KnowledgeAgentMessageImageContentImage, - KnowledgeAgentMessageTextContent, - KnowledgeAgentModelAnswerSynthesisActivityRecord, - KnowledgeAgentModelQueryPlanningActivityRecord, - KnowledgeAgentReference, - KnowledgeAgentRetrievalActivityRecord, - KnowledgeAgentRetrievalRequest, - KnowledgeAgentRetrievalResponse, - KnowledgeAgentSearchIndexActivityArguments, - KnowledgeAgentSearchIndexActivityRecord, - KnowledgeAgentSearchIndexReference, - KnowledgeAgentSemanticRerankerActivityRecord, - KnowledgeSourceParams, - RequestOptions, - SearchIndexKnowledgeSourceParams, -) - - -__all__ = ( - "KnowledgeAgentActivityRecord", - "KnowledgeAgentAzureBlobActivityArguments", - "KnowledgeAgentAzureBlobActivityRecord", - "KnowledgeAgentAzureBlobReference", - "KnowledgeAgentMessage", - "KnowledgeAgentMessageContent", - "KnowledgeAgentMessageContentType", - "KnowledgeAgentMessageImageContent", - "KnowledgeAgentMessageImageContentImage", - "KnowledgeAgentMessageTextContent", - "KnowledgeAgentModelAnswerSynthesisActivityRecord", - "KnowledgeAgentModelQueryPlanningActivityRecord", - "KnowledgeAgentReference", - "KnowledgeAgentRetrievalActivityRecord", - "KnowledgeAgentRetrievalRequest", - "KnowledgeAgentRetrievalResponse", - "KnowledgeAgentSearchIndexActivityArguments", - "KnowledgeAgentSearchIndexActivityRecord", - "KnowledgeAgentSearchIndexReference", - "KnowledgeAgentSemanticRerankerActivityRecord", - "KnowledgeSourceParams", - "RequestOptions", - "SearchIndexKnowledgeSourceParams", -) diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py index 01a9541ae995..a8d4125fb9e2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py @@ -185,6 +185,7 @@ async def search( debug: Optional[Union[str, QueryDebugMode]] = None, hybrid_search: Optional[HybridSearch] = None, x_ms_query_source_authorization: Optional[str] = None, + x_ms_enable_elevated_read: Optional[bool] = None, **kwargs ) -> AsyncSearchItemPaged[Dict]: # pylint:disable=too-many-locals, disable=redefined-builtin @@ -317,6 +318,9 @@ async def search( executed. This token is used to enforce security restrictions on documents. Default value is None. :paramtype x_ms_query_source_authorization: str + :keyword x_ms_enable_elevated_read: A value that enables elevated read that bypass document level + permission checks for the query operation. Default value is None. + :paramtype x_ms_enable_elevated_read: bool :return: A list of documents (dicts) matching the specified search criteria. :return: List of search results. :rtype: AsyncSearchItemPaged[dict] @@ -401,6 +405,7 @@ async def search( query.order_by(order_by) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) kwargs["x_ms_query_source_authorization"] = x_ms_query_source_authorization + kwargs["x_ms_enable_elevated_read"] = x_ms_enable_elevated_read kwargs["api_version"] = self._api_version return AsyncSearchItemPaged(self._client, query, kwargs, page_iterator_class=AsyncSearchPageIterator) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py index 7b3c14c7b1b6..202320be3a96 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py index e900f7a996e1..c4deaa4cc736 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -19,13 +19,13 @@ class SearchServiceClientConfiguration: # pylint: disable=too-many-instance-att :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-08-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py index 7fccc649dc7f..148199307d85 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -20,7 +20,7 @@ DataSourcesOperations, IndexersOperations, IndexesOperations, - KnowledgeAgentsOperations, + KnowledgeBasesOperations, KnowledgeSourcesOperations, SkillsetsOperations, SynonymMapsOperations, @@ -32,8 +32,8 @@ class SearchServiceClient(_SearchServiceClientOperationsMixin): # pylint: disab """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. - :ivar knowledge_agents: KnowledgeAgentsOperations operations - :vartype knowledge_agents: azure.search.documents.indexes.operations.KnowledgeAgentsOperations + :ivar knowledge_bases: KnowledgeBasesOperations operations + :vartype knowledge_bases: azure.search.documents.indexes.operations.KnowledgeBasesOperations :ivar knowledge_sources: KnowledgeSourcesOperations operations :vartype knowledge_sources: azure.search.documents.indexes.operations.KnowledgeSourcesOperations @@ -51,7 +51,7 @@ class SearchServiceClient(_SearchServiceClientOperationsMixin): # pylint: disab :vartype aliases: azure.search.documents.indexes.operations.AliasesOperations :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -85,9 +85,7 @@ def __init__( # pylint: disable=missing-client-constructor-parameter-credential self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self._serialize.client_side_validation = False - self.knowledge_agents = KnowledgeAgentsOperations( - self._client, self._config, self._serialize, self._deserialize - ) + self.knowledge_bases = KnowledgeBasesOperations(self._client, self._config, self._serialize, self._deserialize) self.knowledge_sources = KnowledgeSourcesOperations( self._client, self._config, self._serialize, self._deserialize ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/__init__.py index f986b371549e..4473821c4ebe 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/__init__.py @@ -1,4 +1,4 @@ # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/serialization.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/serialization.py index 003e1c89fb35..9b8154c91dc2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/serialization.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/serialization.py @@ -1,7 +1,7 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -785,7 +785,7 @@ def serialize_data(self, data, data_type, **kwargs): # If dependencies is empty, try with current data class # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) if issubclass(enum_type, Enum): return Serializer.serialize_enum(data, enum_obj=enum_type) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/utils.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/utils.py index 0d508719ab3c..a9d001691686 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_utils/utils.py @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py index 7b3c14c7b1b6..202320be3a96 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py index ef3305669e7a..3d51f5a4964f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -19,13 +19,13 @@ class SearchServiceClientConfiguration: # pylint: disable=too-many-instance-att :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-08-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py index d0f1b99796c9..a02642ae0fa0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -20,7 +20,7 @@ DataSourcesOperations, IndexersOperations, IndexesOperations, - KnowledgeAgentsOperations, + KnowledgeBasesOperations, KnowledgeSourcesOperations, SkillsetsOperations, SynonymMapsOperations, @@ -32,9 +32,9 @@ class SearchServiceClient(_SearchServiceClientOperationsMixin): # pylint: disab """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. - :ivar knowledge_agents: KnowledgeAgentsOperations operations - :vartype knowledge_agents: - azure.search.documents.indexes.aio.operations.KnowledgeAgentsOperations + :ivar knowledge_bases: KnowledgeBasesOperations operations + :vartype knowledge_bases: + azure.search.documents.indexes.aio.operations.KnowledgeBasesOperations :ivar knowledge_sources: KnowledgeSourcesOperations operations :vartype knowledge_sources: azure.search.documents.indexes.aio.operations.KnowledgeSourcesOperations @@ -52,7 +52,7 @@ class SearchServiceClient(_SearchServiceClientOperationsMixin): # pylint: disab :vartype aliases: azure.search.documents.indexes.aio.operations.AliasesOperations :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -86,9 +86,7 @@ def __init__( # pylint: disable=missing-client-constructor-parameter-credential self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self._serialize.client_side_validation = False - self.knowledge_agents = KnowledgeAgentsOperations( - self._client, self._config, self._serialize, self._deserialize - ) + self.knowledge_bases = KnowledgeBasesOperations(self._client, self._config, self._serialize, self._deserialize) self.knowledge_sources = KnowledgeSourcesOperations( self._client, self._config, self._serialize, self._deserialize ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py index ff48959532fe..83f75c712c54 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py @@ -1,7 +1,7 @@ # pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -11,7 +11,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._knowledge_agents_operations import KnowledgeAgentsOperations # type: ignore +from ._knowledge_bases_operations import KnowledgeBasesOperations # type: ignore from ._knowledge_sources_operations import KnowledgeSourcesOperations # type: ignore from ._data_sources_operations import DataSourcesOperations # type: ignore from ._indexers_operations import IndexersOperations # type: ignore @@ -26,7 +26,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "KnowledgeAgentsOperations", + "KnowledgeBasesOperations", "KnowledgeSourcesOperations", "DataSourcesOperations", "IndexersOperations", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py index 5a865916bad7..626f0452b62d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -35,9 +35,9 @@ ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class AliasesOperations: @@ -181,7 +181,10 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) @@ -280,7 +283,10 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -458,7 +464,10 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) @@ -538,7 +547,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -600,7 +612,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py index 7afca4d03c25..a8b3c2cfaeb6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -32,9 +32,9 @@ ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class DataSourcesOperations: @@ -240,7 +240,10 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) @@ -319,7 +322,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -381,7 +387,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) @@ -449,7 +458,10 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListDataSourcesResult", pipeline_response.http_response) @@ -581,7 +593,10 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py index 322e8ff264c7..0a3823b7d167 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py @@ -1,7 +1,7 @@ # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -38,9 +38,9 @@ ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class IndexersOperations: @@ -118,7 +118,10 @@ async def reset( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -273,7 +276,10 @@ async def reset_docs( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -410,7 +416,10 @@ async def resync( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -472,7 +481,10 @@ async def run( if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -675,7 +687,10 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) @@ -754,7 +769,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -816,7 +834,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) @@ -884,7 +905,10 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListIndexersResult", pipeline_response.http_response) @@ -1016,7 +1040,10 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) @@ -1082,7 +1109,10 @@ async def get_status( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerStatus", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py index 4ae89238f423..7ca6a1309afc 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -37,9 +37,9 @@ ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class IndexesOperations: @@ -183,7 +183,10 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) @@ -287,7 +290,10 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -487,7 +493,10 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) @@ -568,7 +577,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -630,7 +642,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) @@ -696,7 +711,10 @@ async def get_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("GetIndexStatisticsResult", pipeline_response.http_response) @@ -838,7 +856,10 @@ async def analyze( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("AnalyzeResult", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_agents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_bases_operations.py similarity index 77% rename from sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_agents_operations.py rename to sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_bases_operations.py index 9f6db1526f1b..a03399e532e5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_agents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_bases_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -26,7 +26,7 @@ from ... import models as _models from ..._utils.serialization import Deserializer, Serializer -from ...operations._knowledge_agents_operations import ( +from ...operations._knowledge_bases_operations import ( build_create_or_update_request, build_create_request, build_delete_request, @@ -35,19 +35,19 @@ ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list -class KnowledgeAgentsOperations: +class KnowledgeBasesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.indexes.aio.SearchServiceClient`'s - :attr:`knowledge_agents` attribute. + :attr:`knowledge_bases` attribute. """ models = _models @@ -62,25 +62,25 @@ def __init__(self, *args, **kwargs) -> None: @overload async def create_or_update( self, - agent_name: str, + knowledge_base_name: str, prefer: Union[str, _models.Enum0], - knowledge_agent: _models.KnowledgeAgent, + knowledge_base: _models.KnowledgeBase, if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent or updates an agent if it already exists. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base or updates an knowledge base if it already exists. - :param agent_name: The name of the agent to create or update. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to create or update. Required. + :type knowledge_base_name: str :param prefer: For HTTP PUT requests, instructs the service to return the created/updated resource on success. "return=representation" Required. :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param knowledge_agent: The definition of the agent to create or update. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The definition of the knowledge base to create or update. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -92,33 +92,33 @@ async def create_or_update( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create_or_update( self, - agent_name: str, + knowledge_base_name: str, prefer: Union[str, _models.Enum0], - knowledge_agent: IO[bytes], + knowledge_base: IO[bytes], if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent or updates an agent if it already exists. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base or updates an knowledge base if it already exists. - :param agent_name: The name of the agent to create or update. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to create or update. Required. + :type knowledge_base_name: str :param prefer: For HTTP PUT requests, instructs the service to return the created/updated resource on success. "return=representation" Required. :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param knowledge_agent: The definition of the agent to create or update. Required. - :type knowledge_agent: IO[bytes] + :param knowledge_base: The definition of the knowledge base to create or update. Required. + :type knowledge_base: IO[bytes] :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -130,32 +130,32 @@ async def create_or_update( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def create_or_update( self, - agent_name: str, + knowledge_base_name: str, prefer: Union[str, _models.Enum0], - knowledge_agent: Union[_models.KnowledgeAgent, IO[bytes]], + knowledge_base: Union[_models.KnowledgeBase, IO[bytes]], if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent or updates an agent if it already exists. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base or updates an knowledge base if it already exists. - :param agent_name: The name of the agent to create or update. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to create or update. Required. + :type knowledge_base_name: str :param prefer: For HTTP PUT requests, instructs the service to return the created/updated resource on success. "return=representation" Required. :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param knowledge_agent: The definition of the agent to create or update. Is either a - KnowledgeAgent type or a IO[bytes] type. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent or IO[bytes] + :param knowledge_base: The definition of the knowledge base to create or update. Is either a + KnowledgeBase type or a IO[bytes] type. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase or IO[bytes] :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -164,8 +164,8 @@ async def create_or_update( :type if_none_match: str :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -181,7 +181,7 @@ async def create_or_update( api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.KnowledgeAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBase] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: @@ -189,13 +189,13 @@ async def create_or_update( content_type = content_type or "application/json" _json = None _content = None - if isinstance(knowledge_agent, (IOBase, bytes)): - _content = knowledge_agent + if isinstance(knowledge_base, (IOBase, bytes)): + _content = knowledge_base else: - _json = self._serialize.body(knowledge_agent, "KnowledgeAgent") + _json = self._serialize.body(knowledge_base, "KnowledgeBase") _request = build_create_or_update_request( - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, prefer=prefer, x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, @@ -221,10 +221,13 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgent", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBase", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -234,16 +237,16 @@ async def create_or_update( @distributed_trace_async async def delete( self, - agent_name: str, + knowledge_base_name: str, if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any ) -> None: - """Deletes an existing agent. + """Deletes an existing knowledge base. - :param agent_name: The name of the agent to delete. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to delete. Required. + :type knowledge_base_name: str :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -275,7 +278,7 @@ async def delete( _x_ms_client_request_id = request_options.x_ms_client_request_id _request = build_delete_request( - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, @@ -297,7 +300,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -305,16 +311,16 @@ async def delete( @distributed_trace_async async def get( - self, agent_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgent: - """Retrieves an agent definition. + self, knowledge_base_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any + ) -> _models.KnowledgeBase: + """Retrieves an knowledge base definition. - :param agent_name: The name of the agent to retrieve. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to retrieve. Required. + :type knowledge_base_name: str :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -329,14 +335,14 @@ async def get( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.KnowledgeAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBase] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id _request = build_get_request( - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, x_ms_client_request_id=_x_ms_client_request_id, api_version=api_version, headers=_headers, @@ -356,10 +362,13 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgent", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBase", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -369,21 +378,21 @@ async def get( @distributed_trace def list( self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.KnowledgeAgent"]: - """Lists all agents available for a search service. + ) -> AsyncItemPaged["_models.KnowledgeBase"]: + """Lists all knowledge bases available for a search service. :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: An iterator like instance of either KnowledgeAgent or the result of cls(response) + :return: An iterator like instance of either KnowledgeBase or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.KnowledgeAgent] + ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.KnowledgeBase] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListKnowledgeAgentsResult] = kwargs.pop("cls", None) + cls: ClsType[_models.ListKnowledgeBasesResult] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -435,8 +444,8 @@ def prepare_request(next_link=None): return _request async def extract_data(pipeline_response): - deserialized = self._deserialize("ListKnowledgeAgentsResult", pipeline_response) - list_of_elem = deserialized.knowledge_agents + deserialized = self._deserialize("ListKnowledgeBasesResult", pipeline_response) + list_of_elem = deserialized.knowledge_bases if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem) @@ -452,7 +461,10 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -462,65 +474,65 @@ async def get_next(next_link=None): @overload async def create( self, - knowledge_agent: _models.KnowledgeAgent, + knowledge_base: _models.KnowledgeBase, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base. - :param knowledge_agent: The definition of the agent to create. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The definition of the knowledge base to create. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create( self, - knowledge_agent: IO[bytes], + knowledge_base: IO[bytes], request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base. - :param knowledge_agent: The definition of the agent to create. Required. - :type knowledge_agent: IO[bytes] + :param knowledge_base: The definition of the knowledge base to create. Required. + :type knowledge_base: IO[bytes] :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def create( self, - knowledge_agent: Union[_models.KnowledgeAgent, IO[bytes]], + knowledge_base: Union[_models.KnowledgeBase, IO[bytes]], request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base. - :param knowledge_agent: The definition of the agent to create. Is either a KnowledgeAgent type - or a IO[bytes] type. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent or IO[bytes] + :param knowledge_base: The definition of the knowledge base to create. Is either a + KnowledgeBase type or a IO[bytes] type. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase or IO[bytes] :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -536,7 +548,7 @@ async def create( api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.KnowledgeAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBase] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: @@ -544,10 +556,10 @@ async def create( content_type = content_type or "application/json" _json = None _content = None - if isinstance(knowledge_agent, (IOBase, bytes)): - _content = knowledge_agent + if isinstance(knowledge_base, (IOBase, bytes)): + _content = knowledge_base else: - _json = self._serialize.body(knowledge_agent, "KnowledgeAgent") + _json = self._serialize.body(knowledge_base, "KnowledgeBase") _request = build_create_request( x_ms_client_request_id=_x_ms_client_request_id, @@ -572,10 +584,13 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgent", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBase", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_sources_operations.py index 7ffab10d66d4..c99490808ea2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_knowledge_sources_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -31,13 +31,14 @@ build_create_request, build_delete_request, build_get_request, + build_get_status_request, build_list_request, ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class KnowledgeSourcesOperations: @@ -221,7 +222,10 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("KnowledgeSource", pipeline_response.http_response) @@ -297,7 +301,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -356,7 +363,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("KnowledgeSource", pipeline_response.http_response) @@ -452,7 +462,10 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -572,7 +585,10 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("KnowledgeSource", pipeline_response.http_response) @@ -581,3 +597,69 @@ async def create( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace_async + async def get_status( + self, source_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any + ) -> _models.KnowledgeSourceStatus: + """Returns the current status and synchronization history of a knowledge source. + + :param source_name: The name of the knowledge source for which to retrieve status. Required. + :type source_name: str + :param request_options: Parameter group. Default value is None. + :type request_options: ~azure.search.documents.indexes.models.RequestOptions + :return: KnowledgeSourceStatus or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeSourceStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.KnowledgeSourceStatus] = kwargs.pop("cls", None) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + + _request = build_get_status_request( + source_name=source_name, + x_ms_client_request_id=_x_ms_client_request_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize("KnowledgeSourceStatus", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py index 3fc8641520c8..864c9d7d7cce 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py @@ -1,7 +1,7 @@ # pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -34,6 +34,7 @@ T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class _SearchServiceClientOperationsMixin( @@ -90,7 +91,10 @@ async def get_service_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchServiceStatistics", pipeline_response.http_response) @@ -187,7 +191,10 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py index eabcb91378bf..c644d5b710be 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -33,9 +33,9 @@ ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class SkillsetsOperations: @@ -256,7 +256,10 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) @@ -335,7 +338,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -397,7 +403,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) @@ -465,7 +474,10 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListSkillsetsResult", pipeline_response.http_response) @@ -599,7 +611,10 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) @@ -741,7 +756,10 @@ async def reset_skills( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py index c226c1d06f96..6d7b65b1a5b5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -32,9 +32,9 @@ ) from .._configuration import SearchServiceClientConfiguration -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class SynonymMapsOperations: @@ -227,7 +227,10 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) @@ -306,7 +309,10 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -368,7 +374,10 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) @@ -436,7 +445,10 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListSynonymMapsResult", pipeline_response.http_response) @@ -568,7 +580,10 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py index a376f02c866a..fd05db95b410 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -12,6 +12,7 @@ from ._models_py3 import ( # type: ignore + AIServices, AIServicesAccountIdentity, AIServicesAccountKey, AIServicesVisionParameters, @@ -44,7 +45,10 @@ CognitiveServicesAccountKey, CommonGramTokenFilter, CommonModelParameters, + CompletedSynchronizationState, ConditionalSkill, + ContentUnderstandingSkill, + ContentUnderstandingSkillChunkingProperties, CorsOptions, CustomAnalyzer, CustomEntity, @@ -84,9 +88,14 @@ HnswParameters, ImageAnalysisSkill, IndexStatisticsSummary, + IndexedOneLakeKnowledgeSource, + IndexedOneLakeKnowledgeSourceParameters, + IndexedSharePointKnowledgeSource, + IndexedSharePointKnowledgeSourceParameters, IndexerCurrentState, IndexerExecutionResult, IndexerResyncBody, + IndexerRuntime, IndexingParameters, IndexingParametersConfiguration, IndexingSchedule, @@ -96,13 +105,20 @@ KeywordMarkerTokenFilter, KeywordTokenizer, KeywordTokenizerV2, - KnowledgeAgent, - KnowledgeAgentAzureOpenAIModel, - KnowledgeAgentModel, - KnowledgeAgentOutputConfiguration, - KnowledgeAgentRequestLimits, + KnowledgeBase, + KnowledgeBaseAzureOpenAIModel, + KnowledgeBaseModel, + KnowledgeRetrievalLowReasoningEffort, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeRetrievalReasoningEffort, KnowledgeSource, + KnowledgeSourceAzureOpenAIVectorizer, + KnowledgeSourceIngestionParameters, KnowledgeSourceReference, + KnowledgeSourceStatistics, + KnowledgeSourceStatus, + KnowledgeSourceVectorizer, LanguageDetectionSkill, LengthTokenFilter, LexicalAnalyzer, @@ -114,7 +130,7 @@ ListIndexStatsSummary, ListIndexersResult, ListIndexesResult, - ListKnowledgeAgentsResult, + ListKnowledgeBasesResult, ListKnowledgeSourcesResult, ListSkillsetsResult, ListSynonymMapsResult, @@ -141,6 +157,8 @@ PatternReplaceTokenFilter, PatternTokenizer, PhoneticTokenFilter, + RemoteSharePointKnowledgeSource, + RemoteSharePointKnowledgeSourceParameters, RequestOptions, RescoringOptions, ResourceCounter, @@ -151,6 +169,7 @@ SearchAlias, SearchField, SearchIndex, + SearchIndexFieldReference, SearchIndexKnowledgeSource, SearchIndexKnowledgeSourceParameters, SearchIndexer, @@ -188,6 +207,7 @@ SemanticSearch, SentimentSkill, SentimentSkillV3, + ServiceIndexersRuntime, ShaperSkill, ShingleTokenFilter, SimilarityAlgorithm, @@ -200,6 +220,7 @@ StemmerTokenFilter, StopAnalyzer, StopwordsTokenFilter, + SynchronizationState, SynonymMap, SynonymTokenFilter, TagScoringFunction, @@ -219,6 +240,10 @@ WebApiSkill, WebApiVectorizer, WebApiVectorizerParameters, + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, WordDelimiterTokenFilter, ) @@ -233,6 +258,8 @@ ChatCompletionExtraParametersBehavior, ChatCompletionResponseFormatType, CjkBigramTokenFilterScripts, + ContentUnderstandingSkillChunkingUnit, + ContentUnderstandingSkillExtractionOptions, CustomEntityLookupSkillLanguage, DocumentIntelligenceLayoutSkillChunkingUnit, DocumentIntelligenceLayoutSkillExtractionOptions, @@ -246,6 +273,7 @@ ImageAnalysisSkillLanguage, ImageDetail, IndexProjectionMode, + IndexedSharePointContainerName, IndexerExecutionEnvironment, IndexerExecutionStatus, IndexerExecutionStatusDetail, @@ -254,9 +282,13 @@ IndexerStatus, IndexingMode, KeyPhraseExtractionSkillLanguage, - KnowledgeAgentModelKind, - KnowledgeAgentOutputConfigurationModality, + KnowledgeBaseModelKind, + KnowledgeRetrievalOutputMode, + KnowledgeRetrievalReasoningEffortKind, + KnowledgeSourceContentExtractionMode, + KnowledgeSourceIngestionPermissionOption, KnowledgeSourceKind, + KnowledgeSourceSynchronizationStatus, LexicalAnalyzerName, LexicalNormalizerName, LexicalTokenizerName, @@ -301,6 +333,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ + "AIServices", "AIServicesAccountIdentity", "AIServicesAccountKey", "AIServicesVisionParameters", @@ -333,7 +366,10 @@ "CognitiveServicesAccountKey", "CommonGramTokenFilter", "CommonModelParameters", + "CompletedSynchronizationState", "ConditionalSkill", + "ContentUnderstandingSkill", + "ContentUnderstandingSkillChunkingProperties", "CorsOptions", "CustomAnalyzer", "CustomEntity", @@ -373,9 +409,14 @@ "HnswParameters", "ImageAnalysisSkill", "IndexStatisticsSummary", + "IndexedOneLakeKnowledgeSource", + "IndexedOneLakeKnowledgeSourceParameters", + "IndexedSharePointKnowledgeSource", + "IndexedSharePointKnowledgeSourceParameters", "IndexerCurrentState", "IndexerExecutionResult", "IndexerResyncBody", + "IndexerRuntime", "IndexingParameters", "IndexingParametersConfiguration", "IndexingSchedule", @@ -385,13 +426,20 @@ "KeywordMarkerTokenFilter", "KeywordTokenizer", "KeywordTokenizerV2", - "KnowledgeAgent", - "KnowledgeAgentAzureOpenAIModel", - "KnowledgeAgentModel", - "KnowledgeAgentOutputConfiguration", - "KnowledgeAgentRequestLimits", + "KnowledgeBase", + "KnowledgeBaseAzureOpenAIModel", + "KnowledgeBaseModel", + "KnowledgeRetrievalLowReasoningEffort", + "KnowledgeRetrievalMediumReasoningEffort", + "KnowledgeRetrievalMinimalReasoningEffort", + "KnowledgeRetrievalReasoningEffort", "KnowledgeSource", + "KnowledgeSourceAzureOpenAIVectorizer", + "KnowledgeSourceIngestionParameters", "KnowledgeSourceReference", + "KnowledgeSourceStatistics", + "KnowledgeSourceStatus", + "KnowledgeSourceVectorizer", "LanguageDetectionSkill", "LengthTokenFilter", "LexicalAnalyzer", @@ -403,7 +451,7 @@ "ListIndexStatsSummary", "ListIndexersResult", "ListIndexesResult", - "ListKnowledgeAgentsResult", + "ListKnowledgeBasesResult", "ListKnowledgeSourcesResult", "ListSkillsetsResult", "ListSynonymMapsResult", @@ -430,6 +478,8 @@ "PatternReplaceTokenFilter", "PatternTokenizer", "PhoneticTokenFilter", + "RemoteSharePointKnowledgeSource", + "RemoteSharePointKnowledgeSourceParameters", "RequestOptions", "RescoringOptions", "ResourceCounter", @@ -440,6 +490,7 @@ "SearchAlias", "SearchField", "SearchIndex", + "SearchIndexFieldReference", "SearchIndexKnowledgeSource", "SearchIndexKnowledgeSourceParameters", "SearchIndexer", @@ -477,6 +528,7 @@ "SemanticSearch", "SentimentSkill", "SentimentSkillV3", + "ServiceIndexersRuntime", "ShaperSkill", "ShingleTokenFilter", "SimilarityAlgorithm", @@ -489,6 +541,7 @@ "StemmerTokenFilter", "StopAnalyzer", "StopwordsTokenFilter", + "SynchronizationState", "SynonymMap", "SynonymTokenFilter", "TagScoringFunction", @@ -508,6 +561,10 @@ "WebApiSkill", "WebApiVectorizer", "WebApiVectorizerParameters", + "WebKnowledgeSource", + "WebKnowledgeSourceDomain", + "WebKnowledgeSourceDomains", + "WebKnowledgeSourceParameters", "WordDelimiterTokenFilter", "AIFoundryModelCatalogName", "AzureOpenAIModelName", @@ -519,6 +576,8 @@ "ChatCompletionExtraParametersBehavior", "ChatCompletionResponseFormatType", "CjkBigramTokenFilterScripts", + "ContentUnderstandingSkillChunkingUnit", + "ContentUnderstandingSkillExtractionOptions", "CustomEntityLookupSkillLanguage", "DocumentIntelligenceLayoutSkillChunkingUnit", "DocumentIntelligenceLayoutSkillExtractionOptions", @@ -532,6 +591,7 @@ "ImageAnalysisSkillLanguage", "ImageDetail", "IndexProjectionMode", + "IndexedSharePointContainerName", "IndexerExecutionEnvironment", "IndexerExecutionStatus", "IndexerExecutionStatusDetail", @@ -540,9 +600,13 @@ "IndexerStatus", "IndexingMode", "KeyPhraseExtractionSkillLanguage", - "KnowledgeAgentModelKind", - "KnowledgeAgentOutputConfigurationModality", + "KnowledgeBaseModelKind", + "KnowledgeRetrievalOutputMode", + "KnowledgeRetrievalReasoningEffortKind", + "KnowledgeSourceContentExtractionMode", + "KnowledgeSourceIngestionPermissionOption", "KnowledgeSourceKind", + "KnowledgeSourceSynchronizationStatus", "LexicalAnalyzerName", "LexicalNormalizerName", "LexicalTokenizerName", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py index fb0e8ee558b2..3e3e973cc671 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py @@ -1,19 +1,51 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime -from typing import Any, Optional, TYPE_CHECKING, Union, MutableMapping +from typing import Any, Optional, TYPE_CHECKING, Union from .._utils import serialization as _serialization -JSON = MutableMapping[str, Any] - if TYPE_CHECKING: from .. import models as _models +JSON = MutableMapping[str, Any] + + +class AIServices(_serialization.Model): + """Parameters for Azure Blob Storage knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar uri: The URI of the AI Services endpoint. Required. + :vartype uri: str + :ivar api_key: The API key for accessing AI Services. + :vartype api_key: str + """ + + _validation = { + "uri": {"required": True}, + } + + _attribute_map = { + "uri": {"key": "uri", "type": "str"}, + "api_key": {"key": "apiKey", "type": "str"}, + } + + def __init__(self, *, uri: str, api_key: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword uri: The URI of the AI Services endpoint. Required. + :paramtype uri: str + :keyword api_key: The API key for accessing AI Services. + :paramtype api_key: str + """ + super().__init__(**kwargs) + self.uri = uri + self.api_key = api_key class CognitiveServicesAccount(_serialization.Model): @@ -667,7 +699,8 @@ class KnowledgeSource(_serialization.Model): """Represents a knowledge source definition. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureBlobKnowledgeSource, SearchIndexKnowledgeSource + AzureBlobKnowledgeSource, IndexedOneLakeKnowledgeSource, IndexedSharePointKnowledgeSource, + RemoteSharePointKnowledgeSource, SearchIndexKnowledgeSource, WebKnowledgeSource All required parameters must be populated in order to send to server. @@ -675,19 +708,19 @@ class KnowledgeSource(_serialization.Model): :vartype name: str :ivar description: Optional user-defined description. :vartype description: str - :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex" and - "azureBlob". + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind - :ivar e_tag: The ETag of the agent. + :ivar e_tag: The ETag of the knowledge base. :vartype e_tag: str :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey """ @@ -704,7 +737,16 @@ class KnowledgeSource(_serialization.Model): "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, } - _subtype_map = {"kind": {"azureBlob": "AzureBlobKnowledgeSource", "searchIndex": "SearchIndexKnowledgeSource"}} + _subtype_map = { + "kind": { + "azureBlob": "AzureBlobKnowledgeSource", + "indexedOneLake": "IndexedOneLakeKnowledgeSource", + "indexedSharePoint": "IndexedSharePointKnowledgeSource", + "remoteSharePoint": "RemoteSharePointKnowledgeSource", + "searchIndex": "SearchIndexKnowledgeSource", + "web": "WebKnowledgeSource", + } + } def __init__( self, @@ -720,16 +762,16 @@ def __init__( :paramtype name: str :keyword description: Optional user-defined description. :paramtype description: str - :keyword e_tag: The ETag of the agent. + :keyword e_tag: The ETag of the knowledge base. :paramtype e_tag: str :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey """ super().__init__(**kwargs) @@ -749,19 +791,19 @@ class AzureBlobKnowledgeSource(KnowledgeSource): :vartype name: str :ivar description: Optional user-defined description. :vartype description: str - :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex" and - "azureBlob". + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind - :ivar e_tag: The ETag of the agent. + :ivar e_tag: The ETag of the knowledge base. :vartype e_tag: str :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey :ivar azure_blob_parameters: The type of the knowledge source. Required. :vartype azure_blob_parameters: @@ -798,16 +840,16 @@ def __init__( :paramtype name: str :keyword description: Optional user-defined description. :paramtype description: str - :keyword e_tag: The ETag of the agent. + :keyword e_tag: The ETag of the knowledge base. :paramtype e_tag: str :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey :keyword azure_blob_parameters: The type of the knowledge source. Required. :paramtype azure_blob_parameters: @@ -825,8 +867,6 @@ class AzureBlobKnowledgeSourceParameters(_serialization.Model): All required parameters must be populated in order to send to server. - :ivar identity: An explicit identity to use for this knowledge source. - :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :ivar connection_string: Key-based connection string or the ResourceId format if using a managed identity. Required. :vartype connection_string: str @@ -834,17 +874,14 @@ class AzureBlobKnowledgeSourceParameters(_serialization.Model): :vartype container_name: str :ivar folder_path: Optional folder path within the container. :vartype folder_path: str - :ivar embedding_model: Optional vectorizer configuration for vectorizing content. - :vartype embedding_model: ~azure.search.documents.indexes.models.VectorSearchVectorizer - :ivar chat_completion_model: Optional chat completion model for image verbalization or context - extraction. - :vartype chat_completion_model: ~azure.search.documents.indexes.models.KnowledgeAgentModel - :ivar ingestion_schedule: Optional schedule for data ingestion. - :vartype ingestion_schedule: ~azure.search.documents.indexes.models.IndexingSchedule + :ivar is_adls_gen2: Set to true if connecting to an ADLS Gen2 storage account. Default is + false. + :vartype is_adls_gen2: bool + :ivar ingestion_parameters: Consolidates all general ingestion settings. + :vartype ingestion_parameters: + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters :ivar created_resources: Resources created by the knowledge source. :vartype created_resources: dict[str, str] - :ivar disable_image_verbalization: Indicates whether image verbalization should be disabled. - :vartype disable_image_verbalization: bool """ _validation = { @@ -854,15 +891,12 @@ class AzureBlobKnowledgeSourceParameters(_serialization.Model): } _attribute_map = { - "identity": {"key": "identity", "type": "SearchIndexerDataIdentity"}, "connection_string": {"key": "connectionString", "type": "str"}, "container_name": {"key": "containerName", "type": "str"}, "folder_path": {"key": "folderPath", "type": "str"}, - "embedding_model": {"key": "embeddingModel", "type": "VectorSearchVectorizer"}, - "chat_completion_model": {"key": "chatCompletionModel", "type": "KnowledgeAgentModel"}, - "ingestion_schedule": {"key": "ingestionSchedule", "type": "IndexingSchedule"}, + "is_adls_gen2": {"key": "isADLSGen2", "type": "bool"}, + "ingestion_parameters": {"key": "ingestionParameters", "type": "KnowledgeSourceIngestionParameters"}, "created_resources": {"key": "createdResources", "type": "{str}"}, - "disable_image_verbalization": {"key": "disableImageVerbalization", "type": "bool"}, } def __init__( @@ -870,17 +904,12 @@ def __init__( *, connection_string: str, container_name: str, - identity: Optional["_models.SearchIndexerDataIdentity"] = None, folder_path: Optional[str] = None, - embedding_model: Optional["_models.VectorSearchVectorizer"] = None, - chat_completion_model: Optional["_models.KnowledgeAgentModel"] = None, - ingestion_schedule: Optional["_models.IndexingSchedule"] = None, - disable_image_verbalization: Optional[bool] = None, + is_adls_gen2: bool = False, + ingestion_parameters: Optional["_models.KnowledgeSourceIngestionParameters"] = None, **kwargs: Any ) -> None: """ - :keyword identity: An explicit identity to use for this knowledge source. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :keyword connection_string: Key-based connection string or the ResourceId format if using a managed identity. Required. :paramtype connection_string: str @@ -888,26 +917,20 @@ def __init__( :paramtype container_name: str :keyword folder_path: Optional folder path within the container. :paramtype folder_path: str - :keyword embedding_model: Optional vectorizer configuration for vectorizing content. - :paramtype embedding_model: ~azure.search.documents.indexes.models.VectorSearchVectorizer - :keyword chat_completion_model: Optional chat completion model for image verbalization or - context extraction. - :paramtype chat_completion_model: ~azure.search.documents.indexes.models.KnowledgeAgentModel - :keyword ingestion_schedule: Optional schedule for data ingestion. - :paramtype ingestion_schedule: ~azure.search.documents.indexes.models.IndexingSchedule - :keyword disable_image_verbalization: Indicates whether image verbalization should be disabled. - :paramtype disable_image_verbalization: bool + :keyword is_adls_gen2: Set to true if connecting to an ADLS Gen2 storage account. Default is + false. + :paramtype is_adls_gen2: bool + :keyword ingestion_parameters: Consolidates all general ingestion settings. + :paramtype ingestion_parameters: + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters """ super().__init__(**kwargs) - self.identity = identity self.connection_string = connection_string self.container_name = container_name self.folder_path = folder_path - self.embedding_model = embedding_model - self.chat_completion_model = chat_completion_model - self.ingestion_schedule = ingestion_schedule + self.is_adls_gen2 = is_adls_gen2 + self.ingestion_parameters = ingestion_parameters self.created_resources: Optional[dict[str, str]] = None - self.disable_image_verbalization = disable_image_verbalization class AzureMachineLearningParameters(_serialization.Model): @@ -1004,9 +1027,9 @@ class SearchIndexerSkill(_serialization.Model): AzureMachineLearningSkill, WebApiSkill, AzureOpenAIEmbeddingSkill, CustomEntityLookupSkill, EntityRecognitionSkill, KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, PIIDetectionSkill, SentimentSkill, SplitSkill, TextTranslationSkill, EntityLinkingSkill, - EntityRecognitionSkillV3, SentimentSkillV3, ConditionalSkill, DocumentExtractionSkill, - DocumentIntelligenceLayoutSkill, ShaperSkill, ImageAnalysisSkill, OcrSkill, - VisionVectorizeSkill + EntityRecognitionSkillV3, SentimentSkillV3, ConditionalSkill, ContentUnderstandingSkill, + DocumentExtractionSkill, DocumentIntelligenceLayoutSkill, ShaperSkill, ImageAnalysisSkill, + OcrSkill, VisionVectorizeSkill All required parameters must be populated in order to send to server. @@ -1063,6 +1086,7 @@ class SearchIndexerSkill(_serialization.Model): "#Microsoft.Skills.Text.V3.EntityRecognitionSkill": "EntityRecognitionSkillV3", "#Microsoft.Skills.Text.V3.SentimentSkill": "SentimentSkillV3", "#Microsoft.Skills.Util.ConditionalSkill": "ConditionalSkill", + "#Microsoft.Skills.Util.ContentUnderstandingSkill": "ContentUnderstandingSkill", "#Microsoft.Skills.Util.DocumentExtractionSkill": "DocumentExtractionSkill", "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": "DocumentIntelligenceLayoutSkill", "#Microsoft.Skills.Util.ShaperSkill": "ShaperSkill", @@ -1307,8 +1331,8 @@ class AzureOpenAIVectorizerParameters(_serialization.Model): :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", - "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", and - "gpt-4.1-nano". + "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", + "gpt-5", "gpt-5-mini", and "gpt-5-nano". :vartype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName """ @@ -1341,8 +1365,8 @@ def __init__( :paramtype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :keyword model_name: The name of the embedding model that is deployed at the provided deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", - "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", and - "gpt-4.1-nano". + "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", + "gpt-5", "gpt-5-mini", and "gpt-5-nano". :paramtype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName """ super().__init__(**kwargs) @@ -1369,8 +1393,8 @@ class AzureOpenAIEmbeddingSkill(SearchIndexerSkill, AzureOpenAIVectorizerParamet :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", - "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", and - "gpt-4.1-nano". + "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", + "gpt-5", "gpt-5-mini", and "gpt-5-nano". :vartype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName :ivar odata_type: A URI fragment specifying the type of skill. Required. :vartype odata_type: str @@ -1443,8 +1467,8 @@ def __init__( :paramtype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :keyword model_name: The name of the embedding model that is deployed at the provided deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", - "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", and - "gpt-4.1-nano". + "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", + "gpt-5", "gpt-5-mini", and "gpt-5-nano". :paramtype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, @@ -1596,16 +1620,6 @@ class VectorSearchCompression(_serialization.Model): :ivar kind: The name of the kind of compression method being configured for use with vector search. Required. Known values are: "scalarQuantization" and "binaryQuantization". :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchCompressionKind - :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated - using compressed vectors are obtained, they will be reranked again by recalculating the - full-precision similarity scores. This will improve recall at the expense of latency. - :vartype rerank_with_original_vectors: bool - :ivar default_oversampling: Default oversampling factor. Oversampling will internally request - more documents (specified by this multiplier) in the initial search. This increases the set of - results that will be reranked using recomputed similarity scores from full-precision vectors. - Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :vartype default_oversampling: float :ivar rescoring_options: Contains the options for rescoring. :vartype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the @@ -1625,8 +1639,6 @@ class VectorSearchCompression(_serialization.Model): _attribute_map = { "compression_name": {"key": "name", "type": "str"}, "kind": {"key": "kind", "type": "str"}, - "rerank_with_original_vectors": {"key": "rerankWithOriginalVectors", "type": "bool"}, - "default_oversampling": {"key": "defaultOversampling", "type": "float"}, "rescoring_options": {"key": "rescoringOptions", "type": "RescoringOptions"}, "truncation_dimension": {"key": "truncationDimension", "type": "int"}, } @@ -1642,8 +1654,6 @@ def __init__( self, *, compression_name: str, - rerank_with_original_vectors: Optional[bool] = None, - default_oversampling: Optional[float] = None, rescoring_options: Optional["_models.RescoringOptions"] = None, truncation_dimension: Optional[int] = None, **kwargs: Any @@ -1651,16 +1661,6 @@ def __init__( """ :keyword compression_name: The name to associate with this particular configuration. Required. :paramtype compression_name: str - :keyword rerank_with_original_vectors: If set to true, once the ordered set of results - calculated using compressed vectors are obtained, they will be reranked again by recalculating - the full-precision similarity scores. This will improve recall at the expense of latency. - :paramtype rerank_with_original_vectors: bool - :keyword default_oversampling: Default oversampling factor. Oversampling will internally - request more documents (specified by this multiplier) in the initial search. This increases the - set of results that will be reranked using recomputed similarity scores from full-precision - vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :paramtype default_oversampling: float :keyword rescoring_options: Contains the options for rescoring. :paramtype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions :keyword truncation_dimension: The number of dimensions to truncate the vectors to. Truncating @@ -1674,8 +1674,6 @@ def __init__( super().__init__(**kwargs) self.compression_name = compression_name self.kind: Optional[str] = None - self.rerank_with_original_vectors = rerank_with_original_vectors - self.default_oversampling = default_oversampling self.rescoring_options = rescoring_options self.truncation_dimension = truncation_dimension @@ -1691,16 +1689,6 @@ class BinaryQuantizationCompression(VectorSearchCompression): :ivar kind: The name of the kind of compression method being configured for use with vector search. Required. Known values are: "scalarQuantization" and "binaryQuantization". :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchCompressionKind - :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated - using compressed vectors are obtained, they will be reranked again by recalculating the - full-precision similarity scores. This will improve recall at the expense of latency. - :vartype rerank_with_original_vectors: bool - :ivar default_oversampling: Default oversampling factor. Oversampling will internally request - more documents (specified by this multiplier) in the initial search. This increases the set of - results that will be reranked using recomputed similarity scores from full-precision vectors. - Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :vartype default_oversampling: float :ivar rescoring_options: Contains the options for rescoring. :vartype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the @@ -1720,8 +1708,6 @@ class BinaryQuantizationCompression(VectorSearchCompression): _attribute_map = { "compression_name": {"key": "name", "type": "str"}, "kind": {"key": "kind", "type": "str"}, - "rerank_with_original_vectors": {"key": "rerankWithOriginalVectors", "type": "bool"}, - "default_oversampling": {"key": "defaultOversampling", "type": "float"}, "rescoring_options": {"key": "rescoringOptions", "type": "RescoringOptions"}, "truncation_dimension": {"key": "truncationDimension", "type": "int"}, } @@ -1730,8 +1716,6 @@ def __init__( self, *, compression_name: str, - rerank_with_original_vectors: Optional[bool] = None, - default_oversampling: Optional[float] = None, rescoring_options: Optional["_models.RescoringOptions"] = None, truncation_dimension: Optional[int] = None, **kwargs: Any @@ -1739,16 +1723,6 @@ def __init__( """ :keyword compression_name: The name to associate with this particular configuration. Required. :paramtype compression_name: str - :keyword rerank_with_original_vectors: If set to true, once the ordered set of results - calculated using compressed vectors are obtained, they will be reranked again by recalculating - the full-precision similarity scores. This will improve recall at the expense of latency. - :paramtype rerank_with_original_vectors: bool - :keyword default_oversampling: Default oversampling factor. Oversampling will internally - request more documents (specified by this multiplier) in the initial search. This increases the - set of results that will be reranked using recomputed similarity scores from full-precision - vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :paramtype default_oversampling: float :keyword rescoring_options: Contains the options for rescoring. :paramtype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions :keyword truncation_dimension: The number of dimensions to truncate the vectors to. Truncating @@ -1761,8 +1735,6 @@ def __init__( """ super().__init__( compression_name=compression_name, - rerank_with_original_vectors=rerank_with_original_vectors, - default_oversampling=default_oversampling, rescoring_options=rescoring_options, truncation_dimension=truncation_dimension, **kwargs @@ -2763,6 +2735,73 @@ def __init__( self.stop = stop +class CompletedSynchronizationState(_serialization.Model): + """Represents the completed state of the last synchronization. + + All required parameters must be populated in order to send to server. + + :ivar start_time: The start time of the last completed synchronization. Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of the last completed synchronization. Required. + :vartype end_time: ~datetime.datetime + :ivar items_updates_processed: The number of item updates successfully processed in the last + synchronization. Required. + :vartype items_updates_processed: int + :ivar items_updates_failed: The number of item updates that failed in the last synchronization. + Required. + :vartype items_updates_failed: int + :ivar items_skipped: The number of items skipped in the last synchronization. Required. + :vartype items_skipped: int + """ + + _validation = { + "start_time": {"required": True}, + "end_time": {"required": True}, + "items_updates_processed": {"required": True}, + "items_updates_failed": {"required": True}, + "items_skipped": {"required": True}, + } + + _attribute_map = { + "start_time": {"key": "startTime", "type": "iso-8601"}, + "end_time": {"key": "endTime", "type": "iso-8601"}, + "items_updates_processed": {"key": "itemsUpdatesProcessed", "type": "int"}, + "items_updates_failed": {"key": "itemsUpdatesFailed", "type": "int"}, + "items_skipped": {"key": "itemsSkipped", "type": "int"}, + } + + def __init__( + self, + *, + start_time: datetime.datetime, + end_time: datetime.datetime, + items_updates_processed: int, + items_updates_failed: int, + items_skipped: int, + **kwargs: Any + ) -> None: + """ + :keyword start_time: The start time of the last completed synchronization. Required. + :paramtype start_time: ~datetime.datetime + :keyword end_time: The end time of the last completed synchronization. Required. + :paramtype end_time: ~datetime.datetime + :keyword items_updates_processed: The number of item updates successfully processed in the last + synchronization. Required. + :paramtype items_updates_processed: int + :keyword items_updates_failed: The number of item updates that failed in the last + synchronization. Required. + :paramtype items_updates_failed: int + :keyword items_skipped: The number of items skipped in the last synchronization. Required. + :paramtype items_skipped: int + """ + super().__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.items_updates_processed = items_updates_processed + self.items_updates_failed = items_updates_failed + self.items_skipped = items_skipped + + class ConditionalSkill(SearchIndexerSkill): """A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. @@ -2837,6 +2876,142 @@ def __init__( self.odata_type: str = "#Microsoft.Skills.Util.ConditionalSkill" +class ContentUnderstandingSkill(SearchIndexerSkill): + """A skill that leverages Azure AI Content Understanding to process and extract structured + insights from documents, enabling enriched, searchable content for enhanced document indexing + and retrieval. + + All required parameters must be populated in order to send to server. + + :ivar odata_type: A URI fragment specifying the type of skill. Required. + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar extraction_options: Controls the cardinality of the content extracted from the document + by the skill. + :vartype extraction_options: list[str or + ~azure.search.documents.indexes.models.ContentUnderstandingSkillExtractionOptions] + :ivar chunking_properties: Controls the cardinality for chunking the content. + :vartype chunking_properties: + ~azure.search.documents.indexes.models.ContentUnderstandingSkillChunkingProperties + """ + + _validation = { + "odata_type": {"required": True}, + "inputs": {"required": True}, + "outputs": {"required": True}, + } + + _attribute_map = { + "odata_type": {"key": "@odata\\.type", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "description": {"key": "description", "type": "str"}, + "context": {"key": "context", "type": "str"}, + "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, + "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, + "extraction_options": {"key": "extractionOptions", "type": "[str]"}, + "chunking_properties": {"key": "chunkingProperties", "type": "ContentUnderstandingSkillChunkingProperties"}, + } + + def __init__( + self, + *, + inputs: list["_models.InputFieldMappingEntry"], + outputs: list["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + extraction_options: Optional[list[Union[str, "_models.ContentUnderstandingSkillExtractionOptions"]]] = None, + chunking_properties: Optional["_models.ContentUnderstandingSkillChunkingProperties"] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Inputs of the skills could be a column in the source data set, or the output + of an upstream skill. Required. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: The output of a skill is either a field in a search index, or a value that + can be consumed as an input by another skill. Required. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword extraction_options: Controls the cardinality of the content extracted from the + document by the skill. + :paramtype extraction_options: list[str or + ~azure.search.documents.indexes.models.ContentUnderstandingSkillExtractionOptions] + :keyword chunking_properties: Controls the cardinality for chunking the content. + :paramtype chunking_properties: + ~azure.search.documents.indexes.models.ContentUnderstandingSkillChunkingProperties + """ + super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type: str = "#Microsoft.Skills.Util.ContentUnderstandingSkill" + self.extraction_options = extraction_options + self.chunking_properties = chunking_properties + + +class ContentUnderstandingSkillChunkingProperties(_serialization.Model): # pylint: disable=name-too-long + """Controls the cardinality for chunking the content. + + :ivar unit: The unit of the chunk. "characters" + :vartype unit: str or + ~azure.search.documents.indexes.models.ContentUnderstandingSkillChunkingUnit + :ivar maximum_length: The maximum chunk length in characters. Default is 500. + :vartype maximum_length: int + :ivar overlap_length: The length of overlap provided between two text chunks. Default is 0. + :vartype overlap_length: int + """ + + _attribute_map = { + "unit": {"key": "unit", "type": "str"}, + "maximum_length": {"key": "maximumLength", "type": "int"}, + "overlap_length": {"key": "overlapLength", "type": "int"}, + } + + def __init__( + self, + *, + unit: Union[str, "_models.ContentUnderstandingSkillChunkingUnit"] = "characters", + maximum_length: Optional[int] = None, + overlap_length: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword unit: The unit of the chunk. "characters" + :paramtype unit: str or + ~azure.search.documents.indexes.models.ContentUnderstandingSkillChunkingUnit + :keyword maximum_length: The maximum chunk length in characters. Default is 500. + :paramtype maximum_length: int + :keyword overlap_length: The length of overlap provided between two text chunks. Default is 0. + :paramtype overlap_length: int + """ + super().__init__(**kwargs) + self.unit = unit + self.maximum_length = maximum_length + self.overlap_length = overlap_length + + class CorsOptions(_serialization.Model): """Defines options to control Cross-Origin Resource Sharing (CORS) for an index. @@ -5351,43 +5526,342 @@ def __init__( self.details = details -class IndexerCurrentState(_serialization.Model): - """Represents all of the state that defines and dictates the indexer's current execution. - - Variables are only populated by the server, and will be ignored when sending a request. +class IndexedOneLakeKnowledgeSource(KnowledgeSource): + """Configuration for OneLake knowledge source. - :ivar mode: The mode the indexer is running in. Known values are: "indexingAllDocs", - "indexingResetDocs", and "indexingResync". - :vartype mode: str or ~azure.search.documents.indexes.models.IndexingMode - :ivar all_docs_initial_tracking_state: Change tracking state used when indexing starts on all - documents in the datasource. - :vartype all_docs_initial_tracking_state: str - :ivar all_docs_final_tracking_state: Change tracking state value when indexing finishes on all - documents in the datasource. - :vartype all_docs_final_tracking_state: str - :ivar reset_docs_initial_tracking_state: Change tracking state used when indexing starts on - select, reset documents in the datasource. - :vartype reset_docs_initial_tracking_state: str - :ivar reset_docs_final_tracking_state: Change tracking state value when indexing finishes on - select, reset documents in the datasource. - :vartype reset_docs_final_tracking_state: str - :ivar reset_document_keys: The list of document keys that have been reset. The document key is - the document's unique identifier for the data in the search index. The indexer will prioritize - selectively re-ingesting these keys. - :vartype reset_document_keys: list[str] - :ivar reset_datasource_document_ids: The list of datasource document ids that have been reset. - The datasource document id is the unique identifier for the data in the datasource. The indexer - will prioritize selectively re-ingesting these ids. - :vartype reset_datasource_document_ids: list[str] - :ivar resync_initial_tracking_state: Change tracking state used when indexing starts on - selective options from the datasource. - :vartype resync_initial_tracking_state: str - :ivar resync_final_tracking_state: Change tracking state value when indexing finishes on - selective options from the datasource. - :vartype resync_final_tracking_state: str - """ + All required parameters must be populated in order to send to server. - _validation = { + :ivar name: The name of the knowledge source. Required. + :vartype name: str + :ivar description: Optional user-defined description. + :vartype description: str + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind + :ivar e_tag: The ETag of the knowledge base. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar indexed_one_lake_parameters: The parameters for the OneLake knowledge source. Required. + :vartype indexed_one_lake_parameters: + ~azure.search.documents.indexes.models.IndexedOneLakeKnowledgeSourceParameters + """ + + _validation = { + "name": {"required": True}, + "kind": {"required": True}, + "indexed_one_lake_parameters": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "description": {"key": "description", "type": "str"}, + "kind": {"key": "kind", "type": "str"}, + "e_tag": {"key": "@odata\\.etag", "type": "str"}, + "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, + "indexed_one_lake_parameters": { + "key": "indexedOneLakeParameters", + "type": "IndexedOneLakeKnowledgeSourceParameters", + }, + } + + def __init__( + self, + *, + name: str, + indexed_one_lake_parameters: "_models.IndexedOneLakeKnowledgeSourceParameters", + description: Optional[str] = None, + e_tag: Optional[str] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the knowledge source. Required. + :paramtype name: str + :keyword description: Optional user-defined description. + :paramtype description: str + :keyword e_tag: The ETag of the knowledge base. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword indexed_one_lake_parameters: The parameters for the OneLake knowledge source. + Required. + :paramtype indexed_one_lake_parameters: + ~azure.search.documents.indexes.models.IndexedOneLakeKnowledgeSourceParameters + """ + super().__init__(name=name, description=description, e_tag=e_tag, encryption_key=encryption_key, **kwargs) + self.kind: str = "indexedOneLake" + self.indexed_one_lake_parameters = indexed_one_lake_parameters + + +class IndexedOneLakeKnowledgeSourceParameters(_serialization.Model): + """Parameters for OneLake knowledge source. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar fabric_workspace_id: OneLake workspace ID. Required. + :vartype fabric_workspace_id: str + :ivar lakehouse_id: Specifies which OneLake lakehouse to access. Required. + :vartype lakehouse_id: str + :ivar target_path: Optional OneLakehouse folder or shortcut to filter OneLake content. + :vartype target_path: str + :ivar ingestion_parameters: Consolidates all general ingestion settings. + :vartype ingestion_parameters: + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters + :ivar created_resources: Resources created by the knowledge source. + :vartype created_resources: dict[str, str] + """ + + _validation = { + "fabric_workspace_id": {"required": True}, + "lakehouse_id": {"required": True}, + "created_resources": {"readonly": True}, + } + + _attribute_map = { + "fabric_workspace_id": {"key": "fabricWorkspaceId", "type": "str"}, + "lakehouse_id": {"key": "lakehouseId", "type": "str"}, + "target_path": {"key": "targetPath", "type": "str"}, + "ingestion_parameters": {"key": "ingestionParameters", "type": "KnowledgeSourceIngestionParameters"}, + "created_resources": {"key": "createdResources", "type": "{str}"}, + } + + def __init__( + self, + *, + fabric_workspace_id: str, + lakehouse_id: str, + target_path: Optional[str] = None, + ingestion_parameters: Optional["_models.KnowledgeSourceIngestionParameters"] = None, + **kwargs: Any + ) -> None: + """ + :keyword fabric_workspace_id: OneLake workspace ID. Required. + :paramtype fabric_workspace_id: str + :keyword lakehouse_id: Specifies which OneLake lakehouse to access. Required. + :paramtype lakehouse_id: str + :keyword target_path: Optional OneLakehouse folder or shortcut to filter OneLake content. + :paramtype target_path: str + :keyword ingestion_parameters: Consolidates all general ingestion settings. + :paramtype ingestion_parameters: + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters + """ + super().__init__(**kwargs) + self.fabric_workspace_id = fabric_workspace_id + self.lakehouse_id = lakehouse_id + self.target_path = target_path + self.ingestion_parameters = ingestion_parameters + self.created_resources: Optional[dict[str, str]] = None + + +class IndexedSharePointKnowledgeSource(KnowledgeSource): + """Configuration for SharePoint knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the knowledge source. Required. + :vartype name: str + :ivar description: Optional user-defined description. + :vartype description: str + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind + :ivar e_tag: The ETag of the knowledge base. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar indexed_share_point_parameters: The parameters for the SharePoint knowledge source. + Required. + :vartype indexed_share_point_parameters: + ~azure.search.documents.indexes.models.IndexedSharePointKnowledgeSourceParameters + """ + + _validation = { + "name": {"required": True}, + "kind": {"required": True}, + "indexed_share_point_parameters": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "description": {"key": "description", "type": "str"}, + "kind": {"key": "kind", "type": "str"}, + "e_tag": {"key": "@odata\\.etag", "type": "str"}, + "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, + "indexed_share_point_parameters": { + "key": "indexedSharePointParameters", + "type": "IndexedSharePointKnowledgeSourceParameters", + }, + } + + def __init__( + self, + *, + name: str, + indexed_share_point_parameters: "_models.IndexedSharePointKnowledgeSourceParameters", + description: Optional[str] = None, + e_tag: Optional[str] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the knowledge source. Required. + :paramtype name: str + :keyword description: Optional user-defined description. + :paramtype description: str + :keyword e_tag: The ETag of the knowledge base. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword indexed_share_point_parameters: The parameters for the SharePoint knowledge source. + Required. + :paramtype indexed_share_point_parameters: + ~azure.search.documents.indexes.models.IndexedSharePointKnowledgeSourceParameters + """ + super().__init__(name=name, description=description, e_tag=e_tag, encryption_key=encryption_key, **kwargs) + self.kind: str = "indexedSharePoint" + self.indexed_share_point_parameters = indexed_share_point_parameters + + +class IndexedSharePointKnowledgeSourceParameters(_serialization.Model): # pylint: disable=name-too-long + """Parameters for SharePoint knowledge source. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar connection_string: SharePoint connection string with format: + SharePointOnlineEndpoint=[SharePoint site url];ApplicationId=[Azure AD App + ID];ApplicationSecret=[Azure AD App client secret];TenantId=[SharePoint site tenant id]. + Required. + :vartype connection_string: str + :ivar container_name: Specifies which SharePoint libraries to access. Required. Known values + are: "defaultSiteLibrary", "allSiteLibraries", and "useQuery". + :vartype container_name: str or + ~azure.search.documents.indexes.models.IndexedSharePointContainerName + :ivar query: Optional query to filter SharePoint content. + :vartype query: str + :ivar ingestion_parameters: Consolidates all general ingestion settings. + :vartype ingestion_parameters: + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters + :ivar created_resources: Resources created by the knowledge source. + :vartype created_resources: dict[str, str] + """ + + _validation = { + "connection_string": {"required": True}, + "container_name": {"required": True}, + "created_resources": {"readonly": True}, + } + + _attribute_map = { + "connection_string": {"key": "connectionString", "type": "str"}, + "container_name": {"key": "containerName", "type": "str"}, + "query": {"key": "query", "type": "str"}, + "ingestion_parameters": {"key": "ingestionParameters", "type": "KnowledgeSourceIngestionParameters"}, + "created_resources": {"key": "createdResources", "type": "{str}"}, + } + + def __init__( + self, + *, + connection_string: str, + container_name: Union[str, "_models.IndexedSharePointContainerName"], + query: Optional[str] = None, + ingestion_parameters: Optional["_models.KnowledgeSourceIngestionParameters"] = None, + **kwargs: Any + ) -> None: + """ + :keyword connection_string: SharePoint connection string with format: + SharePointOnlineEndpoint=[SharePoint site url];ApplicationId=[Azure AD App + ID];ApplicationSecret=[Azure AD App client secret];TenantId=[SharePoint site tenant id]. + Required. + :paramtype connection_string: str + :keyword container_name: Specifies which SharePoint libraries to access. Required. Known values + are: "defaultSiteLibrary", "allSiteLibraries", and "useQuery". + :paramtype container_name: str or + ~azure.search.documents.indexes.models.IndexedSharePointContainerName + :keyword query: Optional query to filter SharePoint content. + :paramtype query: str + :keyword ingestion_parameters: Consolidates all general ingestion settings. + :paramtype ingestion_parameters: + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionParameters + """ + super().__init__(**kwargs) + self.connection_string = connection_string + self.container_name = container_name + self.query = query + self.ingestion_parameters = ingestion_parameters + self.created_resources: Optional[dict[str, str]] = None + + +class IndexerCurrentState(_serialization.Model): + """Represents all of the state that defines and dictates the indexer's current execution. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar mode: The mode the indexer is running in. Known values are: "indexingAllDocs", + "indexingResetDocs", and "indexingResync". + :vartype mode: str or ~azure.search.documents.indexes.models.IndexingMode + :ivar all_docs_initial_tracking_state: Change tracking state used when indexing starts on all + documents in the datasource. + :vartype all_docs_initial_tracking_state: str + :ivar all_docs_final_tracking_state: Change tracking state value when indexing finishes on all + documents in the datasource. + :vartype all_docs_final_tracking_state: str + :ivar reset_docs_initial_tracking_state: Change tracking state used when indexing starts on + select, reset documents in the datasource. + :vartype reset_docs_initial_tracking_state: str + :ivar reset_docs_final_tracking_state: Change tracking state value when indexing finishes on + select, reset documents in the datasource. + :vartype reset_docs_final_tracking_state: str + :ivar reset_document_keys: The list of document keys that have been reset. The document key is + the document's unique identifier for the data in the search index. The indexer will prioritize + selectively re-ingesting these keys. + :vartype reset_document_keys: list[str] + :ivar reset_datasource_document_ids: The list of datasource document ids that have been reset. + The datasource document id is the unique identifier for the data in the datasource. The indexer + will prioritize selectively re-ingesting these ids. + :vartype reset_datasource_document_ids: list[str] + :ivar resync_initial_tracking_state: Change tracking state used when indexing starts on + selective options from the datasource. + :vartype resync_initial_tracking_state: str + :ivar resync_final_tracking_state: Change tracking state value when indexing finishes on + selective options from the datasource. + :vartype resync_final_tracking_state: str + """ + + _validation = { "mode": {"readonly": True}, "all_docs_initial_tracking_state": {"readonly": True}, "all_docs_final_tracking_state": {"readonly": True}, @@ -5534,6 +6008,68 @@ def __init__( self.options = options +class IndexerRuntime(_serialization.Model): + """Represents the indexer's cumulative runtime consumption in the service. + + All required parameters must be populated in order to send to server. + + :ivar used_seconds: Cumulative runtime of the indexer from the beginningTime to endingTime, in + seconds. Required. + :vartype used_seconds: int + :ivar remaining_seconds: Cumulative runtime remaining for all indexers in the service from the + beginningTime to endingTime, in seconds. + :vartype remaining_seconds: int + :ivar beginning_time: Beginning UTC time of the 24-hour period considered for indexer runtime + usage (inclusive). Required. + :vartype beginning_time: ~datetime.datetime + :ivar ending_time: End UTC time of the 24-hour period considered for indexer runtime usage + (inclusive). Required. + :vartype ending_time: ~datetime.datetime + """ + + _validation = { + "used_seconds": {"required": True}, + "beginning_time": {"required": True}, + "ending_time": {"required": True}, + } + + _attribute_map = { + "used_seconds": {"key": "usedSeconds", "type": "int"}, + "remaining_seconds": {"key": "remainingSeconds", "type": "int"}, + "beginning_time": {"key": "beginningTime", "type": "iso-8601"}, + "ending_time": {"key": "endingTime", "type": "iso-8601"}, + } + + def __init__( + self, + *, + used_seconds: int, + beginning_time: datetime.datetime, + ending_time: datetime.datetime, + remaining_seconds: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword used_seconds: Cumulative runtime of the indexer from the beginningTime to endingTime, + in seconds. Required. + :paramtype used_seconds: int + :keyword remaining_seconds: Cumulative runtime remaining for all indexers in the service from + the beginningTime to endingTime, in seconds. + :paramtype remaining_seconds: int + :keyword beginning_time: Beginning UTC time of the 24-hour period considered for indexer + runtime usage (inclusive). Required. + :paramtype beginning_time: ~datetime.datetime + :keyword ending_time: End UTC time of the 24-hour period considered for indexer runtime usage + (inclusive). Required. + :paramtype ending_time: ~datetime.datetime + """ + super().__init__(**kwargs) + self.used_seconds = used_seconds + self.remaining_seconds = remaining_seconds + self.beginning_time = beginning_time + self.ending_time = ending_time + + class IndexingParameters(_serialization.Model): """Represents parameters for indexer execution. @@ -6241,127 +6777,137 @@ def __init__(self, *, name: str, max_token_length: int = 256, **kwargs: Any) -> self.max_token_length = max_token_length -class KnowledgeAgent(_serialization.Model): - """KnowledgeAgent. +class KnowledgeBase(_serialization.Model): + """KnowledgeBase. All required parameters must be populated in order to send to server. - :ivar name: The name of the knowledge agent. Required. + :ivar name: The name of the knowledge knowledge base. Required. :vartype name: str - :ivar models: Contains configuration options on how to connect to AI models. Required. - :vartype models: list[~azure.search.documents.indexes.models.KnowledgeAgentModel] :ivar knowledge_sources: Required. :vartype knowledge_sources: list[~azure.search.documents.indexes.models.KnowledgeSourceReference] - :ivar output_configuration: - :vartype output_configuration: - ~azure.search.documents.indexes.models.KnowledgeAgentOutputConfiguration - :ivar request_limits: Guardrails to limit how much resources are utilized for a single agent - retrieval request. - :vartype request_limits: ~azure.search.documents.indexes.models.KnowledgeAgentRequestLimits - :ivar retrieval_instructions: Instructions considered by the knowledge agent when developing - query plan. - :vartype retrieval_instructions: str - :ivar e_tag: The ETag of the agent. + :ivar models: Contains configuration options on how to connect to AI models. + :vartype models: list[~azure.search.documents.indexes.models.KnowledgeBaseModel] + :ivar retrieval_reasoning_effort: + :vartype retrieval_reasoning_effort: + ~azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffort + :ivar output_mode: The output configuration for this retrieval. Known values are: + "extractiveData" and "answerSynthesis". + :vartype output_mode: str or + ~azure.search.documents.indexes.models.KnowledgeRetrievalOutputMode + :ivar e_tag: The ETag of the knowledge base. :vartype e_tag: str :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :ivar description: The description of the agent. + :ivar description: The description of the knowledge base. :vartype description: str + :ivar retrieval_instructions: Instructions considered by the knowledge knowledge base when + developing query plan. + :vartype retrieval_instructions: str + :ivar answer_instructions: Instructions considered by the knowledge knowledge base when + generating answers. + :vartype answer_instructions: str """ _validation = { "name": {"required": True}, - "models": {"required": True}, "knowledge_sources": {"required": True}, } _attribute_map = { "name": {"key": "name", "type": "str"}, - "models": {"key": "models", "type": "[KnowledgeAgentModel]"}, "knowledge_sources": {"key": "knowledgeSources", "type": "[KnowledgeSourceReference]"}, - "output_configuration": {"key": "outputConfiguration", "type": "KnowledgeAgentOutputConfiguration"}, - "request_limits": {"key": "requestLimits", "type": "KnowledgeAgentRequestLimits"}, - "retrieval_instructions": {"key": "retrievalInstructions", "type": "str"}, + "models": {"key": "models", "type": "[KnowledgeBaseModel]"}, + "retrieval_reasoning_effort": {"key": "retrievalReasoningEffort", "type": "KnowledgeRetrievalReasoningEffort"}, + "output_mode": {"key": "outputMode", "type": "str"}, "e_tag": {"key": "@odata\\.etag", "type": "str"}, "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, "description": {"key": "description", "type": "str"}, + "retrieval_instructions": {"key": "retrievalInstructions", "type": "str"}, + "answer_instructions": {"key": "answerInstructions", "type": "str"}, } def __init__( self, *, name: str, - models: list["_models.KnowledgeAgentModel"], knowledge_sources: list["_models.KnowledgeSourceReference"], - output_configuration: Optional["_models.KnowledgeAgentOutputConfiguration"] = None, - request_limits: Optional["_models.KnowledgeAgentRequestLimits"] = None, - retrieval_instructions: Optional[str] = None, + models: Optional[list["_models.KnowledgeBaseModel"]] = None, + retrieval_reasoning_effort: Optional["_models.KnowledgeRetrievalReasoningEffort"] = None, + output_mode: Optional[Union[str, "_models.KnowledgeRetrievalOutputMode"]] = None, e_tag: Optional[str] = None, encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, description: Optional[str] = None, + retrieval_instructions: Optional[str] = None, + answer_instructions: Optional[str] = None, **kwargs: Any ) -> None: """ - :keyword name: The name of the knowledge agent. Required. + :keyword name: The name of the knowledge knowledge base. Required. :paramtype name: str - :keyword models: Contains configuration options on how to connect to AI models. Required. - :paramtype models: list[~azure.search.documents.indexes.models.KnowledgeAgentModel] :keyword knowledge_sources: Required. :paramtype knowledge_sources: list[~azure.search.documents.indexes.models.KnowledgeSourceReference] - :keyword output_configuration: - :paramtype output_configuration: - ~azure.search.documents.indexes.models.KnowledgeAgentOutputConfiguration - :keyword request_limits: Guardrails to limit how much resources are utilized for a single agent - retrieval request. - :paramtype request_limits: ~azure.search.documents.indexes.models.KnowledgeAgentRequestLimits - :keyword retrieval_instructions: Instructions considered by the knowledge agent when developing - query plan. - :paramtype retrieval_instructions: str - :keyword e_tag: The ETag of the agent. + :keyword models: Contains configuration options on how to connect to AI models. + :paramtype models: list[~azure.search.documents.indexes.models.KnowledgeBaseModel] + :keyword retrieval_reasoning_effort: + :paramtype retrieval_reasoning_effort: + ~azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffort + :keyword output_mode: The output configuration for this retrieval. Known values are: + "extractiveData" and "answerSynthesis". + :paramtype output_mode: str or + ~azure.search.documents.indexes.models.KnowledgeRetrievalOutputMode + :keyword e_tag: The ETag of the knowledge base. :paramtype e_tag: str :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword description: The description of the agent. + :keyword description: The description of the knowledge base. :paramtype description: str + :keyword retrieval_instructions: Instructions considered by the knowledge knowledge base when + developing query plan. + :paramtype retrieval_instructions: str + :keyword answer_instructions: Instructions considered by the knowledge knowledge base when + generating answers. + :paramtype answer_instructions: str """ super().__init__(**kwargs) self.name = name - self.models = models self.knowledge_sources = knowledge_sources - self.output_configuration = output_configuration - self.request_limits = request_limits - self.retrieval_instructions = retrieval_instructions + self.models = models + self.retrieval_reasoning_effort = retrieval_reasoning_effort + self.output_mode = output_mode self.e_tag = e_tag self.encryption_key = encryption_key self.description = description + self.retrieval_instructions = retrieval_instructions + self.answer_instructions = answer_instructions -class KnowledgeAgentModel(_serialization.Model): +class KnowledgeBaseModel(_serialization.Model): """Specifies the connection parameters for the model to use for query planning. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - KnowledgeAgentAzureOpenAIModel + KnowledgeBaseAzureOpenAIModel All required parameters must be populated in order to send to server. :ivar kind: The type of AI model. Required. "azureOpenAI" - :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeAgentModelKind + :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeBaseModelKind """ _validation = { @@ -6372,7 +6918,7 @@ class KnowledgeAgentModel(_serialization.Model): "kind": {"key": "kind", "type": "str"}, } - _subtype_map = {"kind": {"azureOpenAI": "KnowledgeAgentAzureOpenAIModel"}} + _subtype_map = {"kind": {"azureOpenAI": "KnowledgeBaseAzureOpenAIModel"}} def __init__(self, **kwargs: Any) -> None: """ """ @@ -6380,13 +6926,13 @@ def __init__(self, **kwargs: Any) -> None: self.kind: Optional[str] = None -class KnowledgeAgentAzureOpenAIModel(KnowledgeAgentModel): +class KnowledgeBaseAzureOpenAIModel(KnowledgeBaseModel): """Specifies the Azure OpenAI resource used to do query planning. All required parameters must be populated in order to send to server. :ivar kind: The type of AI model. Required. "azureOpenAI" - :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeAgentModelKind + :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeBaseModelKind :ivar azure_open_ai_parameters: Contains the parameters specific to Azure OpenAI model endpoint. Required. :vartype azure_open_ai_parameters: @@ -6415,162 +6961,429 @@ def __init__(self, *, azure_open_ai_parameters: "_models.AzureOpenAIVectorizerPa self.azure_open_ai_parameters = azure_open_ai_parameters -class KnowledgeAgentOutputConfiguration(_serialization.Model): - """KnowledgeAgentOutputConfiguration. +class KnowledgeRetrievalReasoningEffort(_serialization.Model): + """KnowledgeRetrievalReasoningEffort. - :ivar modality: The output configuration for the agent. Known values are: "answerSynthesis" and - "extractiveData". - :vartype modality: str or - ~azure.search.documents.indexes.models.KnowledgeAgentOutputConfigurationModality - :ivar answer_instructions: Instructions considered by the knowledge agent when generating - answers. - :vartype answer_instructions: str - :ivar attempt_fast_path: Indicates whether the agent should attempt to issue the most recent - chat message as a direct query to the knowledge sources, bypassing the model calls. - :vartype attempt_fast_path: bool - :ivar include_activity: Indicates retrieval results should include activity information. - :vartype include_activity: bool + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeRetrievalLowReasoningEffort, KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffortKind """ + _validation = { + "kind": {"required": True}, + } + _attribute_map = { - "modality": {"key": "modality", "type": "str"}, - "answer_instructions": {"key": "answerInstructions", "type": "str"}, - "attempt_fast_path": {"key": "attemptFastPath", "type": "bool"}, - "include_activity": {"key": "includeActivity", "type": "bool"}, + "kind": {"key": "kind", "type": "str"}, + } + + _subtype_map = { + "kind": { + "low": "KnowledgeRetrievalLowReasoningEffort", + "medium": "KnowledgeRetrievalMediumReasoningEffort", + "minimal": "KnowledgeRetrievalMinimalReasoningEffort", + } + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: Optional[str] = None + + +class KnowledgeRetrievalLowReasoningEffort(KnowledgeRetrievalReasoningEffort): + """Run knowledge retrieval with low reasoning effort. + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffortKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: str = "low" + + +class KnowledgeRetrievalMediumReasoningEffort(KnowledgeRetrievalReasoningEffort): + """Run knowledge retrieval with medium reasoning effort. + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffortKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: str = "medium" + + +class KnowledgeRetrievalMinimalReasoningEffort(KnowledgeRetrievalReasoningEffort): + """Run knowledge retrieval with minimal reasoning effort. + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.indexes.models.KnowledgeRetrievalReasoningEffortKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: str = "minimal" + + +class KnowledgeSourceVectorizer(_serialization.Model): + """Specifies the vectorization method to be used for knowledge source embedding model, with + optional name. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeSourceAzureOpenAIVectorizer + + All required parameters must be populated in order to send to server. + + :ivar kind: The name of the kind of vectorization method being configured for use with vector + search. Required. Known values are: "azureOpenAI", "customWebApi", "aiServicesVision", and + "aml". + :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + _subtype_map = {"kind": {"azureOpenAI": "KnowledgeSourceAzureOpenAIVectorizer"}} + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: Optional[str] = None + + +class KnowledgeSourceAzureOpenAIVectorizer(KnowledgeSourceVectorizer): + """Specifies the Azure OpenAI resource used to vectorize a query string. + + All required parameters must be populated in order to send to server. + + :ivar kind: The name of the kind of vectorization method being configured for use with vector + search. Required. Known values are: "azureOpenAI", "customWebApi", "aiServicesVision", and + "aml". + :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind + :ivar azure_open_ai_parameters: Contains the parameters specific to Azure OpenAI embedding + vectorization. + :vartype azure_open_ai_parameters: + ~azure.search.documents.indexes.models.AzureOpenAIVectorizerParameters + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + "azure_open_ai_parameters": {"key": "azureOpenAIParameters", "type": "AzureOpenAIVectorizerParameters"}, + } + + def __init__( + self, *, azure_open_ai_parameters: Optional["_models.AzureOpenAIVectorizerParameters"] = None, **kwargs: Any + ) -> None: + """ + :keyword azure_open_ai_parameters: Contains the parameters specific to Azure OpenAI embedding + vectorization. + :paramtype azure_open_ai_parameters: + ~azure.search.documents.indexes.models.AzureOpenAIVectorizerParameters + """ + super().__init__(**kwargs) + self.kind: str = "azureOpenAI" + self.azure_open_ai_parameters = azure_open_ai_parameters + + +class KnowledgeSourceIngestionParameters(_serialization.Model): + """Consolidates all general ingestion settings for knowledge sources. + + :ivar identity: An explicit identity to use for this knowledge source. + :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :ivar embedding_model: Optional vectorizer configuration for vectorizing content. + :vartype embedding_model: ~azure.search.documents.indexes.models.KnowledgeSourceVectorizer + :ivar chat_completion_model: Optional chat completion model for image verbalization or context + extraction. + :vartype chat_completion_model: ~azure.search.documents.indexes.models.KnowledgeBaseModel + :ivar disable_image_verbalization: Indicates whether image verbalization should be disabled. + Default is false. + :vartype disable_image_verbalization: bool + :ivar ingestion_schedule: Optional schedule for data ingestion. + :vartype ingestion_schedule: ~azure.search.documents.indexes.models.IndexingSchedule + :ivar ingestion_permission_options: Optional list of permission types to ingest together with + document content. If specified, it will set the indexer permission options for the data source. + :vartype ingestion_permission_options: list[str or + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionPermissionOption] + :ivar content_extraction_mode: Optional content extraction mode. Default is 'minimal'. Known + values are: "minimal" and "standard". + :vartype content_extraction_mode: str or + ~azure.search.documents.indexes.models.KnowledgeSourceContentExtractionMode + :ivar ai_services: Optional AI Services configuration for content processing. + :vartype ai_services: ~azure.search.documents.indexes.models.AIServices + """ + + _attribute_map = { + "identity": {"key": "identity", "type": "SearchIndexerDataIdentity"}, + "embedding_model": {"key": "embeddingModel", "type": "KnowledgeSourceVectorizer"}, + "chat_completion_model": {"key": "chatCompletionModel", "type": "KnowledgeBaseModel"}, + "disable_image_verbalization": {"key": "disableImageVerbalization", "type": "bool"}, + "ingestion_schedule": {"key": "ingestionSchedule", "type": "IndexingSchedule"}, + "ingestion_permission_options": {"key": "ingestionPermissionOptions", "type": "[str]"}, + "content_extraction_mode": {"key": "contentExtractionMode", "type": "str"}, + "ai_services": {"key": "aiServices", "type": "AIServices"}, } def __init__( self, *, - modality: Optional[Union[str, "_models.KnowledgeAgentOutputConfigurationModality"]] = None, - answer_instructions: Optional[str] = None, - attempt_fast_path: Optional[bool] = None, - include_activity: Optional[bool] = None, + identity: Optional["_models.SearchIndexerDataIdentity"] = None, + embedding_model: Optional["_models.KnowledgeSourceVectorizer"] = None, + chat_completion_model: Optional["_models.KnowledgeBaseModel"] = None, + disable_image_verbalization: bool = False, + ingestion_schedule: Optional["_models.IndexingSchedule"] = None, + ingestion_permission_options: Optional[ + list[Union[str, "_models.KnowledgeSourceIngestionPermissionOption"]] + ] = None, + content_extraction_mode: Union[str, "_models.KnowledgeSourceContentExtractionMode"] = "minimal", + ai_services: Optional["_models.AIServices"] = None, **kwargs: Any ) -> None: """ - :keyword modality: The output configuration for the agent. Known values are: "answerSynthesis" - and "extractiveData". - :paramtype modality: str or - ~azure.search.documents.indexes.models.KnowledgeAgentOutputConfigurationModality - :keyword answer_instructions: Instructions considered by the knowledge agent when generating - answers. - :paramtype answer_instructions: str - :keyword attempt_fast_path: Indicates whether the agent should attempt to issue the most recent - chat message as a direct query to the knowledge sources, bypassing the model calls. - :paramtype attempt_fast_path: bool - :keyword include_activity: Indicates retrieval results should include activity information. - :paramtype include_activity: bool + :keyword identity: An explicit identity to use for this knowledge source. + :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :keyword embedding_model: Optional vectorizer configuration for vectorizing content. + :paramtype embedding_model: ~azure.search.documents.indexes.models.KnowledgeSourceVectorizer + :keyword chat_completion_model: Optional chat completion model for image verbalization or + context extraction. + :paramtype chat_completion_model: ~azure.search.documents.indexes.models.KnowledgeBaseModel + :keyword disable_image_verbalization: Indicates whether image verbalization should be disabled. + Default is false. + :paramtype disable_image_verbalization: bool + :keyword ingestion_schedule: Optional schedule for data ingestion. + :paramtype ingestion_schedule: ~azure.search.documents.indexes.models.IndexingSchedule + :keyword ingestion_permission_options: Optional list of permission types to ingest together + with document content. If specified, it will set the indexer permission options for the data + source. + :paramtype ingestion_permission_options: list[str or + ~azure.search.documents.indexes.models.KnowledgeSourceIngestionPermissionOption] + :keyword content_extraction_mode: Optional content extraction mode. Default is 'minimal'. Known + values are: "minimal" and "standard". + :paramtype content_extraction_mode: str or + ~azure.search.documents.indexes.models.KnowledgeSourceContentExtractionMode + :keyword ai_services: Optional AI Services configuration for content processing. + :paramtype ai_services: ~azure.search.documents.indexes.models.AIServices + """ + super().__init__(**kwargs) + self.identity = identity + self.embedding_model = embedding_model + self.chat_completion_model = chat_completion_model + self.disable_image_verbalization = disable_image_verbalization + self.ingestion_schedule = ingestion_schedule + self.ingestion_permission_options = ingestion_permission_options + self.content_extraction_mode = content_extraction_mode + self.ai_services = ai_services + + +class KnowledgeSourceReference(_serialization.Model): + """KnowledgeSourceReference. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the knowledge source. Required. + :vartype name: str + """ + + _validation = { + "name": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + } + + def __init__(self, *, name: str, **kwargs: Any) -> None: + """ + :keyword name: The name of the knowledge source. Required. + :paramtype name: str """ super().__init__(**kwargs) - self.modality = modality - self.answer_instructions = answer_instructions - self.attempt_fast_path = attempt_fast_path - self.include_activity = include_activity + self.name = name -class KnowledgeAgentRequestLimits(_serialization.Model): - """Guardrails to limit how much resources are utilized for a single agent retrieval request. +class KnowledgeSourceStatistics(_serialization.Model): + """Statistical information about knowledge source synchronization history. + + All required parameters must be populated in order to send to server. - :ivar max_runtime_in_seconds: The maximum runtime in seconds. - :vartype max_runtime_in_seconds: int - :ivar max_output_size: Limits the maximum size of the content in the output. - :vartype max_output_size: int + :ivar total_synchronization: The total number of synchronizations completed. Required. + :vartype total_synchronization: int + :ivar average_synchronization_duration: The average duration of synchronizations in HH:MM:SS + format. Required. + :vartype average_synchronization_duration: str + :ivar average_items_processed_per_synchronization: The average number of items processed per + synchronization. Required. + :vartype average_items_processed_per_synchronization: int """ + _validation = { + "total_synchronization": {"required": True}, + "average_synchronization_duration": {"required": True}, + "average_items_processed_per_synchronization": {"required": True}, + } + _attribute_map = { - "max_runtime_in_seconds": {"key": "maxRuntimeInSeconds", "type": "int"}, - "max_output_size": {"key": "maxOutputSize", "type": "int"}, + "total_synchronization": {"key": "totalSynchronization", "type": "int"}, + "average_synchronization_duration": {"key": "averageSynchronizationDuration", "type": "str"}, + "average_items_processed_per_synchronization": { + "key": "averageItemsProcessedPerSynchronization", + "type": "int", + }, } def __init__( - self, *, max_runtime_in_seconds: Optional[int] = None, max_output_size: Optional[int] = None, **kwargs: Any + self, + *, + total_synchronization: int, + average_synchronization_duration: str, + average_items_processed_per_synchronization: int, + **kwargs: Any ) -> None: """ - :keyword max_runtime_in_seconds: The maximum runtime in seconds. - :paramtype max_runtime_in_seconds: int - :keyword max_output_size: Limits the maximum size of the content in the output. - :paramtype max_output_size: int + :keyword total_synchronization: The total number of synchronizations completed. Required. + :paramtype total_synchronization: int + :keyword average_synchronization_duration: The average duration of synchronizations in HH:MM:SS + format. Required. + :paramtype average_synchronization_duration: str + :keyword average_items_processed_per_synchronization: The average number of items processed per + synchronization. Required. + :paramtype average_items_processed_per_synchronization: int """ super().__init__(**kwargs) - self.max_runtime_in_seconds = max_runtime_in_seconds - self.max_output_size = max_output_size + self.total_synchronization = total_synchronization + self.average_synchronization_duration = average_synchronization_duration + self.average_items_processed_per_synchronization = average_items_processed_per_synchronization -class KnowledgeSourceReference(_serialization.Model): - """KnowledgeSourceReference. +class KnowledgeSourceStatus(_serialization.Model): + """Represents the status and synchronization history of a knowledge source. All required parameters must be populated in order to send to server. - :ivar name: The name of the knowledge source. Required. - :vartype name: str - :ivar include_references: Indicates whether references should be included for data retrieved - from this source. - :vartype include_references: bool - :ivar include_reference_source_data: Indicates whether references should include the structured - data obtained during retrieval in their payload. - :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool - :ivar max_sub_queries: The maximum number of queries that can be issued at a time when - retrieving data from this source. - :vartype max_sub_queries: int - :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be - included in the response. - :vartype reranker_threshold: float + :ivar synchronization_status: The current synchronization status of the knowledge source. + Required. Known values are: "creating", "active", and "deleting". + :vartype synchronization_status: str or + ~azure.search.documents.indexes.models.KnowledgeSourceSynchronizationStatus + :ivar synchronization_interval: The synchronization interval (e.g., '1d' for daily). Null if no + schedule is configured. + :vartype synchronization_interval: str + :ivar current_synchronization_state: Current synchronization state that spans multiple indexer + runs. + :vartype current_synchronization_state: + ~azure.search.documents.indexes.models.SynchronizationState + :ivar last_synchronization_state: Details of the last completed synchronization. Null on first + sync. + :vartype last_synchronization_state: + ~azure.search.documents.indexes.models.CompletedSynchronizationState + :ivar statistics: Statistical information about the knowledge source synchronization history. + Null on first sync. + :vartype statistics: ~azure.search.documents.indexes.models.KnowledgeSourceStatistics """ _validation = { - "name": {"required": True}, + "synchronization_status": {"required": True}, } _attribute_map = { - "name": {"key": "name", "type": "str"}, - "include_references": {"key": "includeReferences", "type": "bool"}, - "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, - "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, - "max_sub_queries": {"key": "maxSubQueries", "type": "int"}, - "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "synchronization_status": {"key": "synchronizationStatus", "type": "str"}, + "synchronization_interval": {"key": "synchronizationInterval", "type": "str"}, + "current_synchronization_state": {"key": "currentSynchronizationState", "type": "SynchronizationState"}, + "last_synchronization_state": {"key": "lastSynchronizationState", "type": "CompletedSynchronizationState"}, + "statistics": {"key": "statistics", "type": "KnowledgeSourceStatistics"}, } def __init__( self, *, - name: str, - include_references: Optional[bool] = None, - include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, - max_sub_queries: Optional[int] = None, - reranker_threshold: Optional[float] = None, + synchronization_status: Union[str, "_models.KnowledgeSourceSynchronizationStatus"], + synchronization_interval: Optional[str] = None, + current_synchronization_state: Optional["_models.SynchronizationState"] = None, + last_synchronization_state: Optional["_models.CompletedSynchronizationState"] = None, + statistics: Optional["_models.KnowledgeSourceStatistics"] = None, **kwargs: Any ) -> None: """ - :keyword name: The name of the knowledge source. Required. - :paramtype name: str - :keyword include_references: Indicates whether references should be included for data retrieved - from this source. - :paramtype include_references: bool - :keyword include_reference_source_data: Indicates whether references should include the - structured data obtained during retrieval in their payload. - :paramtype include_reference_source_data: bool - :keyword always_query_source: Indicates that this knowledge source should bypass source - selection and always be queried at retrieval time. - :paramtype always_query_source: bool - :keyword max_sub_queries: The maximum number of queries that can be issued at a time when - retrieving data from this source. - :paramtype max_sub_queries: int - :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be - included in the response. - :paramtype reranker_threshold: float + :keyword synchronization_status: The current synchronization status of the knowledge source. + Required. Known values are: "creating", "active", and "deleting". + :paramtype synchronization_status: str or + ~azure.search.documents.indexes.models.KnowledgeSourceSynchronizationStatus + :keyword synchronization_interval: The synchronization interval (e.g., '1d' for daily). Null if + no schedule is configured. + :paramtype synchronization_interval: str + :keyword current_synchronization_state: Current synchronization state that spans multiple + indexer runs. + :paramtype current_synchronization_state: + ~azure.search.documents.indexes.models.SynchronizationState + :keyword last_synchronization_state: Details of the last completed synchronization. Null on + first sync. + :paramtype last_synchronization_state: + ~azure.search.documents.indexes.models.CompletedSynchronizationState + :keyword statistics: Statistical information about the knowledge source synchronization + history. Null on first sync. + :paramtype statistics: ~azure.search.documents.indexes.models.KnowledgeSourceStatistics """ super().__init__(**kwargs) - self.name = name - self.include_references = include_references - self.include_reference_source_data = include_reference_source_data - self.always_query_source = always_query_source - self.max_sub_queries = max_sub_queries - self.reranker_threshold = reranker_threshold + self.synchronization_status = synchronization_status + self.synchronization_interval = synchronization_interval + self.current_synchronization_state = current_synchronization_state + self.last_synchronization_state = last_synchronization_state + self.statistics = statistics class LanguageDetectionSkill(SearchIndexerSkill): @@ -6900,30 +7713,30 @@ def __init__(self, **kwargs: Any) -> None: self.indexes_statistics: Optional[list["_models.IndexStatisticsSummary"]] = None -class ListKnowledgeAgentsResult(_serialization.Model): - """ListKnowledgeAgentsResult. +class ListKnowledgeBasesResult(_serialization.Model): + """ListKnowledgeBasesResult. All required parameters must be populated in order to send to server. - :ivar knowledge_agents: Required. - :vartype knowledge_agents: list[~azure.search.documents.indexes.models.KnowledgeAgent] + :ivar knowledge_bases: Required. + :vartype knowledge_bases: list[~azure.search.documents.indexes.models.KnowledgeBase] """ _validation = { - "knowledge_agents": {"required": True}, + "knowledge_bases": {"required": True}, } _attribute_map = { - "knowledge_agents": {"key": "value", "type": "[KnowledgeAgent]"}, + "knowledge_bases": {"key": "value", "type": "[KnowledgeBase]"}, } - def __init__(self, *, knowledge_agents: list["_models.KnowledgeAgent"], **kwargs: Any) -> None: + def __init__(self, *, knowledge_bases: list["_models.KnowledgeBase"], **kwargs: Any) -> None: """ - :keyword knowledge_agents: Required. - :paramtype knowledge_agents: list[~azure.search.documents.indexes.models.KnowledgeAgent] + :keyword knowledge_bases: Required. + :paramtype knowledge_bases: list[~azure.search.documents.indexes.models.KnowledgeBase] """ super().__init__(**kwargs) - self.knowledge_agents = knowledge_agents + self.knowledge_bases = knowledge_bases class ListKnowledgeSourcesResult(_serialization.Model): @@ -8479,6 +9292,135 @@ def __init__( self.mask = mask +class RemoteSharePointKnowledgeSource(KnowledgeSource): + """Configuration for remote SharePoint knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the knowledge source. Required. + :vartype name: str + :ivar description: Optional user-defined description. + :vartype description: str + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind + :ivar e_tag: The ETag of the knowledge base. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar remote_share_point_parameters: The parameters for the knowledge source. + :vartype remote_share_point_parameters: + ~azure.search.documents.indexes.models.RemoteSharePointKnowledgeSourceParameters + """ + + _validation = { + "name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "description": {"key": "description", "type": "str"}, + "kind": {"key": "kind", "type": "str"}, + "e_tag": {"key": "@odata\\.etag", "type": "str"}, + "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, + "remote_share_point_parameters": { + "key": "remoteSharePointParameters", + "type": "RemoteSharePointKnowledgeSourceParameters", + }, + } + + def __init__( + self, + *, + name: str, + description: Optional[str] = None, + e_tag: Optional[str] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + remote_share_point_parameters: Optional["_models.RemoteSharePointKnowledgeSourceParameters"] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the knowledge source. Required. + :paramtype name: str + :keyword description: Optional user-defined description. + :paramtype description: str + :keyword e_tag: The ETag of the knowledge base. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword remote_share_point_parameters: The parameters for the knowledge source. + :paramtype remote_share_point_parameters: + ~azure.search.documents.indexes.models.RemoteSharePointKnowledgeSourceParameters + """ + super().__init__(name=name, description=description, e_tag=e_tag, encryption_key=encryption_key, **kwargs) + self.kind: str = "remoteSharePoint" + self.remote_share_point_parameters = remote_share_point_parameters + + +class RemoteSharePointKnowledgeSourceParameters(_serialization.Model): # pylint: disable=name-too-long + """Parameters for remote SharePoint knowledge source. + + :ivar filter_expression: Keyword Query Language (KQL) expression with queryable SharePoint + properties and attributes to scope the retrieval before the query runs. See documentation: + https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference. + :vartype filter_expression: str + :ivar resource_metadata: A list of metadata fields to be returned for each item in the + response. Only retrievable metadata properties can be included in this list. By default, no + metadata is returned. Optional. + :vartype resource_metadata: list[str] + :ivar container_type_id: Container ID for SharePoint Embedded connection. When this is null, it + will use SharePoint Online. + :vartype container_type_id: str + """ + + _attribute_map = { + "filter_expression": {"key": "filterExpression", "type": "str"}, + "resource_metadata": {"key": "resourceMetadata", "type": "[str]"}, + "container_type_id": {"key": "containerTypeId", "type": "str"}, + } + + def __init__( + self, + *, + filter_expression: Optional[str] = None, + resource_metadata: Optional[list[str]] = None, + container_type_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword filter_expression: Keyword Query Language (KQL) expression with queryable SharePoint + properties and attributes to scope the retrieval before the query runs. See documentation: + https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference. + :paramtype filter_expression: str + :keyword resource_metadata: A list of metadata fields to be returned for each item in the + response. Only retrievable metadata properties can be included in this list. By default, no + metadata is returned. Optional. + :paramtype resource_metadata: list[str] + :keyword container_type_id: Container ID for SharePoint Embedded connection. When this is null, + it will use SharePoint Online. + :paramtype container_type_id: str + """ + super().__init__(**kwargs) + self.filter_expression = filter_expression + self.resource_metadata = resource_metadata + self.container_type_id = container_type_id + + class RequestOptions(_serialization.Model): """Parameter group. @@ -8597,16 +9539,6 @@ class ScalarQuantizationCompression(VectorSearchCompression): :ivar kind: The name of the kind of compression method being configured for use with vector search. Required. Known values are: "scalarQuantization" and "binaryQuantization". :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchCompressionKind - :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated - using compressed vectors are obtained, they will be reranked again by recalculating the - full-precision similarity scores. This will improve recall at the expense of latency. - :vartype rerank_with_original_vectors: bool - :ivar default_oversampling: Default oversampling factor. Oversampling will internally request - more documents (specified by this multiplier) in the initial search. This increases the set of - results that will be reranked using recomputed similarity scores from full-precision vectors. - Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :vartype default_oversampling: float :ivar rescoring_options: Contains the options for rescoring. :vartype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the @@ -8628,8 +9560,6 @@ class ScalarQuantizationCompression(VectorSearchCompression): _attribute_map = { "compression_name": {"key": "name", "type": "str"}, "kind": {"key": "kind", "type": "str"}, - "rerank_with_original_vectors": {"key": "rerankWithOriginalVectors", "type": "bool"}, - "default_oversampling": {"key": "defaultOversampling", "type": "float"}, "rescoring_options": {"key": "rescoringOptions", "type": "RescoringOptions"}, "truncation_dimension": {"key": "truncationDimension", "type": "int"}, "parameters": {"key": "scalarQuantizationParameters", "type": "ScalarQuantizationParameters"}, @@ -8639,8 +9569,6 @@ def __init__( self, *, compression_name: str, - rerank_with_original_vectors: Optional[bool] = None, - default_oversampling: Optional[float] = None, rescoring_options: Optional["_models.RescoringOptions"] = None, truncation_dimension: Optional[int] = None, parameters: Optional["_models.ScalarQuantizationParameters"] = None, @@ -8649,16 +9577,6 @@ def __init__( """ :keyword compression_name: The name to associate with this particular configuration. Required. :paramtype compression_name: str - :keyword rerank_with_original_vectors: If set to true, once the ordered set of results - calculated using compressed vectors are obtained, they will be reranked again by recalculating - the full-precision similarity scores. This will improve recall at the expense of latency. - :paramtype rerank_with_original_vectors: bool - :keyword default_oversampling: Default oversampling factor. Oversampling will internally - request more documents (specified by this multiplier) in the initial search. This increases the - set of results that will be reranked using recomputed similarity scores from full-precision - vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :paramtype default_oversampling: float :keyword rescoring_options: Contains the options for rescoring. :paramtype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions :keyword truncation_dimension: The number of dimensions to truncate the vectors to. Truncating @@ -8673,8 +9591,6 @@ def __init__( """ super().__init__( compression_name=compression_name, - rerank_with_original_vectors=rerank_with_original_vectors, - default_oversampling=default_oversampling, rescoring_options=rescoring_options, truncation_dimension=truncation_dimension, **kwargs @@ -8724,7 +9640,7 @@ class ScoringProfile(_serialization.Model): :vartype functions: list[~azure.search.documents.indexes.models.ScoringFunction] :ivar function_aggregation: A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Known values - are: "sum", "average", "minimum", "maximum", and "firstMatching". + are: "sum", "average", "minimum", "maximum", "firstMatching", and "product". :vartype function_aggregation: str or ~azure.search.documents.indexes.models.ScoringFunctionAggregation """ @@ -8759,7 +9675,7 @@ def __init__( :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction] :keyword function_aggregation: A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. - Known values are: "sum", "average", "minimum", "maximum", and "firstMatching". + Known values are: "sum", "average", "minimum", "maximum", "firstMatching", and "product". :paramtype function_aggregation: str or ~azure.search.documents.indexes.models.ScoringFunctionAggregation """ @@ -8885,6 +9801,10 @@ class SearchField(_serialization.Model): :ivar permission_filter: A value indicating whether the field should be used as a permission filter. Known values are: "userIds", "groupIds", and "rbacScope". :vartype permission_filter: str or ~azure.search.documents.indexes.models.PermissionFilter + :ivar sensitivity_label: A value indicating whether the field should be used for sensitivity + label filtering. This enables document-level filtering based on Microsoft Purview sensitivity + labels. + :vartype sensitivity_label: bool :ivar analyzer: The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null @@ -8993,6 +9913,7 @@ class SearchField(_serialization.Model): "sortable": {"key": "sortable", "type": "bool"}, "facetable": {"key": "facetable", "type": "bool"}, "permission_filter": {"key": "permissionFilter", "type": "str"}, + "sensitivity_label": {"key": "sensitivityLabel", "type": "bool"}, "analyzer": {"key": "analyzer", "type": "str"}, "search_analyzer": {"key": "searchAnalyzer", "type": "str"}, "index_analyzer": {"key": "indexAnalyzer", "type": "str"}, @@ -9017,6 +9938,7 @@ def __init__( sortable: Optional[bool] = None, facetable: Optional[bool] = None, permission_filter: Optional[Union[str, "_models.PermissionFilter"]] = None, + sensitivity_label: Optional[bool] = None, analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, search_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, index_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, @@ -9097,6 +10019,10 @@ def __init__( :keyword permission_filter: A value indicating whether the field should be used as a permission filter. Known values are: "userIds", "groupIds", and "rbacScope". :paramtype permission_filter: str or ~azure.search.documents.indexes.models.PermissionFilter + :keyword sensitivity_label: A value indicating whether the field should be used for sensitivity + label filtering. This enables document-level filtering based on Microsoft Purview sensitivity + labels. + :paramtype sensitivity_label: bool :keyword analyzer: The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null @@ -9199,6 +10125,7 @@ def __init__( self.sortable = sortable self.facetable = facetable self.permission_filter = permission_filter + self.sensitivity_label = sensitivity_label self.analyzer = analyzer self.search_analyzer = search_analyzer self.index_analyzer = index_analyzer @@ -9265,6 +10192,9 @@ class SearchIndex(_serialization.Model): the index. Known values are: "enabled" and "disabled". :vartype permission_filter_option: str or ~azure.search.documents.indexes.models.SearchIndexPermissionFilterOption + :ivar purview_enabled: A value indicating whether the index is leveraging Purview-specific + features. This property defaults to false and cannot be changed after index creation. + :vartype purview_enabled: bool :ivar e_tag: The ETag of the index. :vartype e_tag: str """ @@ -9292,6 +10222,7 @@ class SearchIndex(_serialization.Model): "semantic_search": {"key": "semantic", "type": "SemanticSearch"}, "vector_search": {"key": "vectorSearch", "type": "VectorSearch"}, "permission_filter_option": {"key": "permissionFilterOption", "type": "str"}, + "purview_enabled": {"key": "purviewEnabled", "type": "bool"}, "e_tag": {"key": "@odata\\.etag", "type": "str"}, } @@ -9315,6 +10246,7 @@ def __init__( semantic_search: Optional["_models.SemanticSearch"] = None, vector_search: Optional["_models.VectorSearch"] = None, permission_filter_option: Optional[Union[str, "_models.SearchIndexPermissionFilterOption"]] = None, + purview_enabled: Optional[bool] = None, e_tag: Optional[str] = None, **kwargs: Any ) -> None: @@ -9368,6 +10300,9 @@ def __init__( for the index. Known values are: "enabled" and "disabled". :paramtype permission_filter_option: str or ~azure.search.documents.indexes.models.SearchIndexPermissionFilterOption + :keyword purview_enabled: A value indicating whether the index is leveraging Purview-specific + features. This property defaults to false and cannot be changed after index creation. + :paramtype purview_enabled: bool :keyword e_tag: The ETag of the index. :paramtype e_tag: str """ @@ -9389,6 +10324,7 @@ def __init__( self.semantic_search = semantic_search self.vector_search = vector_search self.permission_filter_option = permission_filter_option + self.purview_enabled = purview_enabled self.e_tag = e_tag @@ -9694,7 +10630,7 @@ class SearchIndexerDataSource(_serialization.Model): :ivar description: The description of the datasource. :vartype description: str :ivar type: The type of the datasource. Required. Known values are: "azuresql", "cosmosdb", - "azureblob", "azuretable", "mysql", "adlsgen2", and "onelake". + "azureblob", "azuretable", "mysql", "adlsgen2", "onelake", and "sharepoint". :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType :ivar sub_type: A specific type of the data source, in case the resource is capable of different modalities. For example, 'MongoDb' for certain 'cosmosDb' accounts. @@ -9775,7 +10711,7 @@ def __init__( :keyword description: The description of the datasource. :paramtype description: str :keyword type: The type of the datasource. Required. Known values are: "azuresql", "cosmosdb", - "azureblob", "azuretable", "mysql", "adlsgen2", and "onelake". + "azureblob", "azuretable", "mysql", "adlsgen2", "onelake", and "sharepoint". :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType :keyword credentials: Credentials for the datasource. Required. :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials @@ -10601,6 +11537,9 @@ class SearchIndexerStatus(_serialization.Model): :ivar status: Overall indexer status. Required. Known values are: "unknown", "error", and "running". :vartype status: str or ~azure.search.documents.indexes.models.IndexerStatus + :ivar runtime: Snapshot of the indexer’s cumulative runtime consumption for the service over + the current UTC period. Required. + :vartype runtime: ~azure.search.documents.indexes.models.IndexerRuntime :ivar last_result: The result of the most recent or an in-progress indexer execution. :vartype last_result: ~azure.search.documents.indexes.models.IndexerExecutionResult :ivar execution_history: History of the recent indexer executions, sorted in reverse @@ -10616,6 +11555,7 @@ class SearchIndexerStatus(_serialization.Model): _validation = { "name": {"required": True, "readonly": True}, "status": {"required": True, "readonly": True}, + "runtime": {"required": True, "readonly": True}, "last_result": {"readonly": True}, "execution_history": {"required": True, "readonly": True}, "limits": {"required": True, "readonly": True}, @@ -10625,6 +11565,7 @@ class SearchIndexerStatus(_serialization.Model): _attribute_map = { "name": {"key": "name", "type": "str"}, "status": {"key": "status", "type": "str"}, + "runtime": {"key": "runtime", "type": "IndexerRuntime"}, "last_result": {"key": "lastResult", "type": "IndexerExecutionResult"}, "execution_history": {"key": "executionHistory", "type": "[IndexerExecutionResult]"}, "limits": {"key": "limits", "type": "SearchIndexerLimits"}, @@ -10636,6 +11577,7 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self.name: Optional[str] = None self.status: Optional[Union[str, "_models.IndexerStatus"]] = None + self.runtime: Optional["_models.IndexerRuntime"] = None self.last_result: Optional["_models.IndexerExecutionResult"] = None self.execution_history: Optional[list["_models.IndexerExecutionResult"]] = None self.limits: Optional["_models.SearchIndexerLimits"] = None @@ -10691,6 +11633,32 @@ def __init__(self, **kwargs: Any) -> None: self.documentation_link: Optional[str] = None +class SearchIndexFieldReference(_serialization.Model): + """SearchIndexFieldReference. + + All required parameters must be populated in order to send to server. + + :ivar name: Required. + :vartype name: str + """ + + _validation = { + "name": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + } + + def __init__(self, *, name: str, **kwargs: Any) -> None: + """ + :keyword name: Required. + :paramtype name: str + """ + super().__init__(**kwargs) + self.name = name + + class SearchIndexKnowledgeSource(KnowledgeSource): """Knowledge Source targeting a search index. @@ -10700,19 +11668,19 @@ class SearchIndexKnowledgeSource(KnowledgeSource): :vartype name: str :ivar description: Optional user-defined description. :vartype description: str - :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex" and - "azureBlob". + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind - :ivar e_tag: The ETag of the agent. + :ivar e_tag: The ETag of the knowledge base. :vartype e_tag: str :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey :ivar search_index_parameters: The parameters for the knowledge source. Required. :vartype search_index_parameters: @@ -10749,16 +11717,16 @@ def __init__( :paramtype name: str :keyword description: Optional user-defined description. :paramtype description: str - :keyword e_tag: The ETag of the agent. + :keyword e_tag: The ETag of the knowledge base. :paramtype e_tag: str :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your agent definition - when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have - encrypted your agent definition, it will always remain encrypted. The search service will - ignore attempts to set this property to null. You can change this property as needed if you - want to rotate your encryption key; Your agent definition will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey :keyword search_index_parameters: The parameters for the knowledge source. Required. :paramtype search_index_parameters: @@ -10776,8 +11744,14 @@ class SearchIndexKnowledgeSourceParameters(_serialization.Model): :ivar search_index_name: The name of the Search index. Required. :vartype search_index_name: str - :ivar source_data_select: Used to request additional fields for referenced source data. - :vartype source_data_select: str + :ivar source_data_fields: Used to request additional fields for referenced source data. + :vartype source_data_fields: + list[~azure.search.documents.indexes.models.SearchIndexFieldReference] + :ivar search_fields: Used to restrict which fields to search on the search index. + :vartype search_fields: list[~azure.search.documents.indexes.models.SearchIndexFieldReference] + :ivar semantic_configuration_name: Used to specify a different semantic configuration on the + target search index other than the default one. + :vartype semantic_configuration_name: str """ _validation = { @@ -10786,19 +11760,38 @@ class SearchIndexKnowledgeSourceParameters(_serialization.Model): _attribute_map = { "search_index_name": {"key": "searchIndexName", "type": "str"}, - "source_data_select": {"key": "sourceDataSelect", "type": "str"}, + "source_data_fields": {"key": "sourceDataFields", "type": "[SearchIndexFieldReference]"}, + "search_fields": {"key": "searchFields", "type": "[SearchIndexFieldReference]"}, + "semantic_configuration_name": {"key": "semanticConfigurationName", "type": "str"}, } - def __init__(self, *, search_index_name: str, source_data_select: Optional[str] = None, **kwargs: Any) -> None: + def __init__( + self, + *, + search_index_name: str, + source_data_fields: Optional[list["_models.SearchIndexFieldReference"]] = None, + search_fields: Optional[list["_models.SearchIndexFieldReference"]] = None, + semantic_configuration_name: Optional[str] = None, + **kwargs: Any + ) -> None: """ :keyword search_index_name: The name of the Search index. Required. :paramtype search_index_name: str - :keyword source_data_select: Used to request additional fields for referenced source data. - :paramtype source_data_select: str + :keyword source_data_fields: Used to request additional fields for referenced source data. + :paramtype source_data_fields: + list[~azure.search.documents.indexes.models.SearchIndexFieldReference] + :keyword search_fields: Used to restrict which fields to search on the search index. + :paramtype search_fields: + list[~azure.search.documents.indexes.models.SearchIndexFieldReference] + :keyword semantic_configuration_name: Used to specify a different semantic configuration on the + target search index other than the default one. + :paramtype semantic_configuration_name: str """ super().__init__(**kwargs) self.search_index_name = search_index_name - self.source_data_select = source_data_select + self.source_data_fields = source_data_fields + self.search_fields = search_fields + self.semantic_configuration_name = semantic_configuration_name class SearchResourceEncryptionKey(_serialization.Model): @@ -10994,6 +11987,9 @@ class SearchServiceLimits(_serialization.Model): :vartype max_complex_objects_in_collections_per_document: int :ivar max_storage_per_index_in_bytes: The maximum amount of storage in bytes allowed per index. :vartype max_storage_per_index_in_bytes: int + :ivar max_cumulative_indexer_runtime_seconds: The maximum cumulative runtime in seconds allowed + for all indexers in the service over the current UTC period. + :vartype max_cumulative_indexer_runtime_seconds: int """ _attribute_map = { @@ -11005,6 +12001,7 @@ class SearchServiceLimits(_serialization.Model): "type": "int", }, "max_storage_per_index_in_bytes": {"key": "maxStoragePerIndex", "type": "int"}, + "max_cumulative_indexer_runtime_seconds": {"key": "maxCumulativeIndexerRuntimeSeconds", "type": "int"}, } def __init__( @@ -11015,6 +12012,7 @@ def __init__( max_complex_collection_fields_per_index: Optional[int] = None, max_complex_objects_in_collections_per_document: Optional[int] = None, max_storage_per_index_in_bytes: Optional[int] = None, + max_cumulative_indexer_runtime_seconds: Optional[int] = None, **kwargs: Any ) -> None: """ @@ -11032,6 +12030,9 @@ def __init__( :keyword max_storage_per_index_in_bytes: The maximum amount of storage in bytes allowed per index. :paramtype max_storage_per_index_in_bytes: int + :keyword max_cumulative_indexer_runtime_seconds: The maximum cumulative runtime in seconds + allowed for all indexers in the service over the current UTC period. + :paramtype max_cumulative_indexer_runtime_seconds: int """ super().__init__(**kwargs) self.max_fields_per_index = max_fields_per_index @@ -11039,41 +12040,54 @@ def __init__( self.max_complex_collection_fields_per_index = max_complex_collection_fields_per_index self.max_complex_objects_in_collections_per_document = max_complex_objects_in_collections_per_document self.max_storage_per_index_in_bytes = max_storage_per_index_in_bytes + self.max_cumulative_indexer_runtime_seconds = max_cumulative_indexer_runtime_seconds class SearchServiceStatistics(_serialization.Model): """Response from a get service statistics request. If successful, it includes service level - counters and limits. + counters, indexer runtime information, and limits. All required parameters must be populated in order to send to server. :ivar counters: Service level resource counters. Required. :vartype counters: ~azure.search.documents.indexes.models.SearchServiceCounters + :ivar indexers_runtime: Service level indexers runtime information. Required. + :vartype indexers_runtime: ~azure.search.documents.indexes.models.ServiceIndexersRuntime :ivar limits: Service level general limits. Required. :vartype limits: ~azure.search.documents.indexes.models.SearchServiceLimits """ _validation = { "counters": {"required": True}, + "indexers_runtime": {"required": True}, "limits": {"required": True}, } _attribute_map = { "counters": {"key": "counters", "type": "SearchServiceCounters"}, + "indexers_runtime": {"key": "indexersRuntime", "type": "ServiceIndexersRuntime"}, "limits": {"key": "limits", "type": "SearchServiceLimits"}, } def __init__( - self, *, counters: "_models.SearchServiceCounters", limits: "_models.SearchServiceLimits", **kwargs: Any + self, + *, + counters: "_models.SearchServiceCounters", + indexers_runtime: "_models.ServiceIndexersRuntime", + limits: "_models.SearchServiceLimits", + **kwargs: Any ) -> None: """ :keyword counters: Service level resource counters. Required. :paramtype counters: ~azure.search.documents.indexes.models.SearchServiceCounters + :keyword indexers_runtime: Service level indexers runtime information. Required. + :paramtype indexers_runtime: ~azure.search.documents.indexes.models.ServiceIndexersRuntime :keyword limits: Service level general limits. Required. :paramtype limits: ~azure.search.documents.indexes.models.SearchServiceLimits """ super().__init__(**kwargs) self.counters = counters + self.indexers_runtime = indexers_runtime self.limits = limits @@ -11487,6 +12501,68 @@ def __init__( self.model_version = model_version +class ServiceIndexersRuntime(_serialization.Model): + """Represents service level indexers runtime information. + + All required parameters must be populated in order to send to server. + + :ivar used_seconds: Cumulative runtime of all indexers in the service from the beginningTime to + endingTime, in seconds. Required. + :vartype used_seconds: int + :ivar remaining_seconds: Cumulative runtime remaining for all indexers in the service from the + beginningTime to endingTime, in seconds. + :vartype remaining_seconds: int + :ivar beginning_time: Beginning UTC time of the 24-hour period considered for indexer runtime + usage (inclusive). Required. + :vartype beginning_time: ~datetime.datetime + :ivar ending_time: End UTC time of the 24-hour period considered for indexer runtime usage + (inclusive). Required. + :vartype ending_time: ~datetime.datetime + """ + + _validation = { + "used_seconds": {"required": True}, + "beginning_time": {"required": True}, + "ending_time": {"required": True}, + } + + _attribute_map = { + "used_seconds": {"key": "usedSeconds", "type": "int"}, + "remaining_seconds": {"key": "remainingSeconds", "type": "int"}, + "beginning_time": {"key": "beginningTime", "type": "iso-8601"}, + "ending_time": {"key": "endingTime", "type": "iso-8601"}, + } + + def __init__( + self, + *, + used_seconds: int, + beginning_time: datetime.datetime, + ending_time: datetime.datetime, + remaining_seconds: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword used_seconds: Cumulative runtime of all indexers in the service from the beginningTime + to endingTime, in seconds. Required. + :paramtype used_seconds: int + :keyword remaining_seconds: Cumulative runtime remaining for all indexers in the service from + the beginningTime to endingTime, in seconds. + :paramtype remaining_seconds: int + :keyword beginning_time: Beginning UTC time of the 24-hour period considered for indexer + runtime usage (inclusive). Required. + :paramtype beginning_time: ~datetime.datetime + :keyword ending_time: End UTC time of the 24-hour period considered for indexer runtime usage + (inclusive). Required. + :paramtype ending_time: ~datetime.datetime + """ + super().__init__(**kwargs) + self.used_seconds = used_seconds + self.remaining_seconds = remaining_seconds + self.beginning_time = beginning_time + self.ending_time = ending_time + + class ShaperSkill(SearchIndexerSkill): """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). @@ -12174,6 +13250,65 @@ def __init__( self.remove_trailing_stop_words = remove_trailing_stop_words +class SynchronizationState(_serialization.Model): + """Represents the current state of an ongoing synchronization that spans multiple indexer runs. + + All required parameters must be populated in order to send to server. + + :ivar start_time: The start time of the current synchronization. Required. + :vartype start_time: ~datetime.datetime + :ivar items_updates_processed: The number of item updates successfully processed in the current + synchronization. Required. + :vartype items_updates_processed: int + :ivar items_updates_failed: The number of item updates that failed in the current + synchronization. Required. + :vartype items_updates_failed: int + :ivar items_skipped: The number of items skipped in the current synchronization. Required. + :vartype items_skipped: int + """ + + _validation = { + "start_time": {"required": True}, + "items_updates_processed": {"required": True}, + "items_updates_failed": {"required": True}, + "items_skipped": {"required": True}, + } + + _attribute_map = { + "start_time": {"key": "startTime", "type": "iso-8601"}, + "items_updates_processed": {"key": "itemsUpdatesProcessed", "type": "int"}, + "items_updates_failed": {"key": "itemsUpdatesFailed", "type": "int"}, + "items_skipped": {"key": "itemsSkipped", "type": "int"}, + } + + def __init__( + self, + *, + start_time: datetime.datetime, + items_updates_processed: int, + items_updates_failed: int, + items_skipped: int, + **kwargs: Any + ) -> None: + """ + :keyword start_time: The start time of the current synchronization. Required. + :paramtype start_time: ~datetime.datetime + :keyword items_updates_processed: The number of item updates successfully processed in the + current synchronization. Required. + :paramtype items_updates_processed: int + :keyword items_updates_failed: The number of item updates that failed in the current + synchronization. Required. + :paramtype items_updates_failed: int + :keyword items_skipped: The number of items skipped in the current synchronization. Required. + :paramtype items_skipped: int + """ + super().__init__(**kwargs) + self.start_time = start_time + self.items_updates_processed = items_updates_processed + self.items_updates_failed = items_updates_failed + self.items_skipped = items_skipped + + class SynonymMap(_serialization.Model): """Represents a synonym map definition. @@ -13040,6 +14175,167 @@ def __init__( self.auth_identity = auth_identity +class WebKnowledgeSource(KnowledgeSource): + """Knowledge Source targeting web results. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the knowledge source. Required. + :vartype name: str + :ivar description: Optional user-defined description. + :vartype description: str + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind + :ivar e_tag: The ETag of the knowledge base. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar web_parameters: The parameters for the web knowledge source. + :vartype web_parameters: ~azure.search.documents.indexes.models.WebKnowledgeSourceParameters + """ + + _validation = { + "name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "description": {"key": "description", "type": "str"}, + "kind": {"key": "kind", "type": "str"}, + "e_tag": {"key": "@odata\\.etag", "type": "str"}, + "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, + "web_parameters": {"key": "webParameters", "type": "WebKnowledgeSourceParameters"}, + } + + def __init__( + self, + *, + name: str, + description: Optional[str] = None, + e_tag: Optional[str] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + web_parameters: Optional["_models.WebKnowledgeSourceParameters"] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the knowledge source. Required. + :paramtype name: str + :keyword description: Optional user-defined description. + :paramtype description: str + :keyword e_tag: The ETag of the knowledge base. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your knowledge base + definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once + you have encrypted your knowledge base definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your knowledge base definition will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword web_parameters: The parameters for the web knowledge source. + :paramtype web_parameters: ~azure.search.documents.indexes.models.WebKnowledgeSourceParameters + """ + super().__init__(name=name, description=description, e_tag=e_tag, encryption_key=encryption_key, **kwargs) + self.kind: str = "web" + self.web_parameters = web_parameters + + +class WebKnowledgeSourceDomain(_serialization.Model): + """Configuration for web knowledge source domain. + + All required parameters must be populated in order to send to server. + + :ivar address: The address of the domain. Required. + :vartype address: str + :ivar include_subpages: Whether or not to include subpages from this domain. + :vartype include_subpages: bool + """ + + _validation = { + "address": {"required": True}, + } + + _attribute_map = { + "address": {"key": "address", "type": "str"}, + "include_subpages": {"key": "includeSubpages", "type": "bool"}, + } + + def __init__(self, *, address: str, include_subpages: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword address: The address of the domain. Required. + :paramtype address: str + :keyword include_subpages: Whether or not to include subpages from this domain. + :paramtype include_subpages: bool + """ + super().__init__(**kwargs) + self.address = address + self.include_subpages = include_subpages + + +class WebKnowledgeSourceDomains(_serialization.Model): + """Domain allow/block configuration for web knowledge source. + + :ivar allowed_domains: Domains that are allowed for web results. + :vartype allowed_domains: list[~azure.search.documents.indexes.models.WebKnowledgeSourceDomain] + :ivar blocked_domains: Domains that are blocked from web results. + :vartype blocked_domains: list[~azure.search.documents.indexes.models.WebKnowledgeSourceDomain] + """ + + _attribute_map = { + "allowed_domains": {"key": "allowedDomains", "type": "[WebKnowledgeSourceDomain]"}, + "blocked_domains": {"key": "blockedDomains", "type": "[WebKnowledgeSourceDomain]"}, + } + + def __init__( + self, + *, + allowed_domains: Optional[list["_models.WebKnowledgeSourceDomain"]] = None, + blocked_domains: Optional[list["_models.WebKnowledgeSourceDomain"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword allowed_domains: Domains that are allowed for web results. + :paramtype allowed_domains: + list[~azure.search.documents.indexes.models.WebKnowledgeSourceDomain] + :keyword blocked_domains: Domains that are blocked from web results. + :paramtype blocked_domains: + list[~azure.search.documents.indexes.models.WebKnowledgeSourceDomain] + """ + super().__init__(**kwargs) + self.allowed_domains = allowed_domains + self.blocked_domains = blocked_domains + + +class WebKnowledgeSourceParameters(_serialization.Model): + """Parameters for web knowledge source. + + :ivar domains: Domain allow/block configuration for web results. + :vartype domains: ~azure.search.documents.indexes.models.WebKnowledgeSourceDomains + """ + + _attribute_map = { + "domains": {"key": "domains", "type": "WebKnowledgeSourceDomains"}, + } + + def __init__(self, *, domains: Optional["_models.WebKnowledgeSourceDomains"] = None, **kwargs: Any) -> None: + """ + :keyword domains: Domain allow/block configuration for web results. + :paramtype domains: ~azure.search.documents.indexes.models.WebKnowledgeSourceDomains + """ + super().__init__(**kwargs) + self.domains = domains + + class WordDelimiterTokenFilter(TokenFilter): """Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_search_service_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_search_service_client_enums.py index 04ac6e62e675..00bdf6b7b406 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_search_service_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_search_service_client_enums.py @@ -1,7 +1,7 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -35,6 +35,9 @@ class AzureOpenAIModelName(str, Enum, metaclass=CaseInsensitiveEnumMeta): GPT41 = "gpt-4.1" GPT41_MINI = "gpt-4.1-mini" GPT41_NANO = "gpt-4.1-nano" + GPT5 = "gpt-5" + GPT5_MINI = "gpt-5-mini" + GPT5_NANO = "gpt-5-nano" class BlobIndexerDataToExtract(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -148,6 +151,22 @@ class CjkBigramTokenFilterScripts(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Ignore Hangul script when forming bigrams of CJK terms.""" +class ContentUnderstandingSkillChunkingUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Controls the cardinality of the chunk unit. Default is 'characters'.""" + + CHARACTERS = "characters" + """Specifies chunk by characters.""" + + +class ContentUnderstandingSkillExtractionOptions(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Controls the cardinality of the content extracted from the document by the skill.""" + + IMAGES = "images" + """Specify that image content should be extracted from the document.""" + LOCATION_METADATA = "locationMetadata" + """Specify that location metadata should be extracted from the document.""" + + class CustomEntityLookupSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The language codes supported for input text by CustomEntityLookupSkill.""" @@ -423,6 +442,17 @@ class ImageDetail(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Details recognized as landmarks.""" +class IndexedSharePointContainerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies which SharePoint libraries to access.""" + + DEFAULT_SITE_LIBRARY = "defaultSiteLibrary" + """Index content from the site's default document library.""" + ALL_SITE_LIBRARIES = "allSiteLibraries" + """Index content from every document library in the site.""" + USE_QUERY = "useQuery" + """Index only content that matches the query specified in the knowledge source.""" + + class IndexerExecutionEnvironment(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the environment in which the indexer should execute.""" @@ -547,29 +577,79 @@ class KeyPhraseExtractionSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumM """Swedish""" -class KnowledgeAgentModelKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class KnowledgeBaseModelKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The AI model to be used for query planning.""" AZURE_OPEN_AI = "azureOpenAI" """Use Azure Open AI models for query planning.""" -class KnowledgeAgentOutputConfigurationModality(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The output configuration for the agent.""" +class KnowledgeRetrievalOutputMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The output configuration for this retrieval.""" - ANSWER_SYNTHESIS = "answerSynthesis" - """Synthesize an answer for the response payload.""" EXTRACTIVE_DATA = "extractiveData" """Return data from the knowledge sources directly without generative alteration.""" + ANSWER_SYNTHESIS = "answerSynthesis" + """Synthesize an answer for the response payload.""" + + +class KnowledgeRetrievalReasoningEffortKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The amount of effort to use during retrieval.""" + + MINIMAL = "minimal" + """Does not perform any source selections, query planning, or iterative search.""" + LOW = "low" + """Use low reasoning during retrieval.""" + MEDIUM = "medium" + """Use a moderate amount of reasoning during retrieval.""" + + +class KnowledgeSourceContentExtractionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Optional content extraction mode. Default is 'minimal'.""" + + MINIMAL = "minimal" + """Extracts only essential metadata while deferring most content processing.""" + STANDARD = "standard" + """Performs the full default content extraction pipeline.""" + + +class KnowledgeSourceIngestionPermissionOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """KnowledgeSourceIngestionPermissionOption.""" + + USER_IDS = "userIds" + """Ingest explicit user identifiers alongside document content.""" + GROUP_IDS = "groupIds" + """Ingest group identifiers alongside document content.""" + RBAC_SCOPE = "rbacScope" + """Ingest RBAC scope information alongside document content.""" class KnowledgeSourceKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The kind of the knowledge source.""" SEARCH_INDEX = "searchIndex" - """A knowledge source that reads data from a Search Index.""" + """A knowledge source that retrieves data from a Search Index.""" AZURE_BLOB = "azureBlob" - """A knowledge source that read and ingest data from Azure Blob Storage to a Search Index.""" + """A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index.""" + WEB = "web" + """A knowledge source that retrieves data from the web.""" + REMOTE_SHARE_POINT = "remoteSharePoint" + """A knowledge source that retrieves data from a remote SharePoint endpoint.""" + INDEXED_SHARE_POINT = "indexedSharePoint" + """A knowledge source that retrieves and ingests data from SharePoint to a Search Index.""" + INDEXED_ONE_LAKE = "indexedOneLake" + """A knowledge source that retrieves and ingests data from OneLake to a Search Index.""" + + +class KnowledgeSourceSynchronizationStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current synchronization status of the knowledge source.""" + + CREATING = "creating" + """The knowledge source is being provisioned.""" + ACTIVE = "active" + """The knowledge source is active and synchronization runs are occurring.""" + DELETING = "deleting" + """The knowledge source is being deleted and synchronization is paused.""" class LexicalAnalyzerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1512,6 +1592,8 @@ class ScoringFunctionAggregation(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Boost scores by the maximum of all scoring function results.""" FIRST_MATCHING = "firstMatching" """Boost scores using the first applicable scoring function in the scoring profile.""" + PRODUCT = "product" + """Boost scores by the product of all scoring function results.""" class ScoringFunctionInterpolation(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1586,6 +1668,8 @@ class SearchIndexerDataSourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Indicates an ADLS Gen2 datasource.""" ONE_LAKE = "onelake" """Indicates a Microsoft Fabric OneLake datasource.""" + SHARE_POINT = "sharepoint" + """Indicates a SharePoint datasource.""" class SearchIndexPermissionFilterOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py index ff48959532fe..83f75c712c54 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py @@ -1,7 +1,7 @@ # pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -11,7 +11,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._knowledge_agents_operations import KnowledgeAgentsOperations # type: ignore +from ._knowledge_bases_operations import KnowledgeBasesOperations # type: ignore from ._knowledge_sources_operations import KnowledgeSourcesOperations # type: ignore from ._data_sources_operations import DataSourcesOperations # type: ignore from ._indexers_operations import IndexersOperations # type: ignore @@ -26,7 +26,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "KnowledgeAgentsOperations", + "KnowledgeBasesOperations", "KnowledgeSourcesOperations", "DataSourcesOperations", "IndexersOperations", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py index a174ad9aa35b..d11a50479c03 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -27,9 +27,9 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -39,7 +39,7 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -63,7 +63,7 @@ def build_list_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -92,7 +92,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -133,7 +133,7 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -163,7 +163,7 @@ def build_get_request(alias_name: str, *, x_ms_client_request_id: Optional[str] _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -326,7 +326,10 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) @@ -424,7 +427,10 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -602,7 +608,10 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) @@ -682,7 +691,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -744,7 +756,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py index ccfcee79d175..05684a56756c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -25,9 +25,9 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -46,7 +46,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -91,7 +91,7 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -123,7 +123,7 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -151,7 +151,7 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -174,7 +174,7 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -397,7 +397,10 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) @@ -476,7 +479,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -538,7 +544,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) @@ -606,7 +615,10 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListDataSourcesResult", pipeline_response.http_response) @@ -738,7 +750,10 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py index 1cf75a9c4b9b..d1244a9da49a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py @@ -1,7 +1,7 @@ # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -26,9 +26,9 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -40,7 +40,7 @@ def build_reset_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -68,7 +68,7 @@ def build_reset_docs_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -101,7 +101,7 @@ def build_resync_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -130,7 +130,7 @@ def build_run_request(indexer_name: str, *, x_ms_client_request_id: Optional[str _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -166,7 +166,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -215,7 +215,7 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -245,7 +245,7 @@ def build_get_request(indexer_name: str, *, x_ms_client_request_id: Optional[str _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -273,7 +273,7 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -296,7 +296,7 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -322,7 +322,7 @@ def build_get_status_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -419,7 +419,10 @@ def reset( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -574,7 +577,10 @@ def reset_docs( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -711,7 +717,10 @@ def resync( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -773,7 +782,10 @@ def run( # pylint: disable=inconsistent-return-statements if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -976,7 +988,10 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) @@ -1055,7 +1070,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -1117,7 +1135,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) @@ -1185,7 +1206,10 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListIndexersResult", pipeline_response.http_response) @@ -1317,7 +1341,10 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) @@ -1383,7 +1410,10 @@ def get_status( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerStatus", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py index 31eb1e38c0b5..da86d3b8d39c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py @@ -1,7 +1,7 @@ # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -28,9 +28,9 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -40,7 +40,7 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -66,7 +66,7 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -98,7 +98,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -141,7 +141,7 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -171,7 +171,7 @@ def build_get_request(index_name: str, *, x_ms_client_request_id: Optional[str] _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -199,7 +199,7 @@ def build_get_statistics_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -227,7 +227,7 @@ def build_analyze_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -393,7 +393,10 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) @@ -496,7 +499,10 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -696,7 +702,10 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) @@ -777,7 +786,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -839,7 +851,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) @@ -905,7 +920,10 @@ def get_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("GetIndexStatisticsResult", pipeline_response.http_response) @@ -1047,7 +1065,10 @@ def analyze( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("AnalyzeResult", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_agents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_bases_operations.py similarity index 79% rename from sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_agents_operations.py rename to sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_bases_operations.py index 9bdca072bfa6..6ec7039362ad 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_agents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_bases_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -27,16 +27,16 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_create_or_update_request( - agent_name: str, + knowledge_base_name: str, *, prefer: Union[str, _models.Enum0], x_ms_client_request_id: Optional[str] = None, @@ -47,14 +47,14 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = kwargs.pop("template_url", "/agents('{agentName}')") + _url = kwargs.pop("template_url", "/knowledgebases('{knowledgeBaseName}')") path_format_arguments = { - "agentName": _SERIALIZER.url("agent_name", agent_name, "str"), + "knowledgeBaseName": _SERIALIZER.url("knowledge_base_name", knowledge_base_name, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -78,7 +78,7 @@ def build_create_or_update_request( def build_delete_request( - agent_name: str, + knowledge_base_name: str, *, x_ms_client_request_id: Optional[str] = None, if_match: Optional[str] = None, @@ -88,13 +88,13 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = kwargs.pop("template_url", "/agents('{agentName}')") + _url = kwargs.pop("template_url", "/knowledgebases('{knowledgeBaseName}')") path_format_arguments = { - "agentName": _SERIALIZER.url("agent_name", agent_name, "str"), + "knowledgeBaseName": _SERIALIZER.url("knowledge_base_name", knowledge_base_name, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -114,17 +114,19 @@ def build_delete_request( return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_get_request(agent_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_get_request( + knowledge_base_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = kwargs.pop("template_url", "/agents('{agentName}')") + _url = kwargs.pop("template_url", "/knowledgebases('{knowledgeBaseName}')") path_format_arguments = { - "agentName": _SERIALIZER.url("agent_name", agent_name, "str"), + "knowledgeBaseName": _SERIALIZER.url("knowledge_base_name", knowledge_base_name, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -144,11 +146,11 @@ def build_list_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = kwargs.pop("template_url", "/agents") + _url = kwargs.pop("template_url", "/knowledgebases") # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") @@ -165,12 +167,12 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = kwargs.pop("template_url", "/agents") + _url = kwargs.pop("template_url", "/knowledgebases") # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") @@ -185,14 +187,14 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -class KnowledgeAgentsOperations: +class KnowledgeBasesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.indexes.SearchServiceClient`'s - :attr:`knowledge_agents` attribute. + :attr:`knowledge_bases` attribute. """ models = _models @@ -207,25 +209,25 @@ def __init__(self, *args, **kwargs) -> None: @overload def create_or_update( self, - agent_name: str, + knowledge_base_name: str, prefer: Union[str, _models.Enum0], - knowledge_agent: _models.KnowledgeAgent, + knowledge_base: _models.KnowledgeBase, if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent or updates an agent if it already exists. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base or updates an knowledge base if it already exists. - :param agent_name: The name of the agent to create or update. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to create or update. Required. + :type knowledge_base_name: str :param prefer: For HTTP PUT requests, instructs the service to return the created/updated resource on success. "return=representation" Required. :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param knowledge_agent: The definition of the agent to create or update. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The definition of the knowledge base to create or update. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -237,33 +239,33 @@ def create_or_update( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @overload def create_or_update( self, - agent_name: str, + knowledge_base_name: str, prefer: Union[str, _models.Enum0], - knowledge_agent: IO[bytes], + knowledge_base: IO[bytes], if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent or updates an agent if it already exists. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base or updates an knowledge base if it already exists. - :param agent_name: The name of the agent to create or update. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to create or update. Required. + :type knowledge_base_name: str :param prefer: For HTTP PUT requests, instructs the service to return the created/updated resource on success. "return=representation" Required. :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param knowledge_agent: The definition of the agent to create or update. Required. - :type knowledge_agent: IO[bytes] + :param knowledge_base: The definition of the knowledge base to create or update. Required. + :type knowledge_base: IO[bytes] :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -275,32 +277,32 @@ def create_or_update( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace def create_or_update( self, - agent_name: str, + knowledge_base_name: str, prefer: Union[str, _models.Enum0], - knowledge_agent: Union[_models.KnowledgeAgent, IO[bytes]], + knowledge_base: Union[_models.KnowledgeBase, IO[bytes]], if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent or updates an agent if it already exists. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base or updates an knowledge base if it already exists. - :param agent_name: The name of the agent to create or update. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to create or update. Required. + :type knowledge_base_name: str :param prefer: For HTTP PUT requests, instructs the service to return the created/updated resource on success. "return=representation" Required. :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param knowledge_agent: The definition of the agent to create or update. Is either a - KnowledgeAgent type or a IO[bytes] type. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent or IO[bytes] + :param knowledge_base: The definition of the knowledge base to create or update. Is either a + KnowledgeBase type or a IO[bytes] type. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase or IO[bytes] :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -309,8 +311,8 @@ def create_or_update( :type if_none_match: str :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -326,7 +328,7 @@ def create_or_update( api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.KnowledgeAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBase] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: @@ -334,13 +336,13 @@ def create_or_update( content_type = content_type or "application/json" _json = None _content = None - if isinstance(knowledge_agent, (IOBase, bytes)): - _content = knowledge_agent + if isinstance(knowledge_base, (IOBase, bytes)): + _content = knowledge_base else: - _json = self._serialize.body(knowledge_agent, "KnowledgeAgent") + _json = self._serialize.body(knowledge_base, "KnowledgeBase") _request = build_create_or_update_request( - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, prefer=prefer, x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, @@ -366,10 +368,13 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgent", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBase", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -379,16 +384,16 @@ def create_or_update( @distributed_trace def delete( # pylint: disable=inconsistent-return-statements self, - agent_name: str, + knowledge_base_name: str, if_match: Optional[str] = None, if_none_match: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any ) -> None: - """Deletes an existing agent. + """Deletes an existing knowledge base. - :param agent_name: The name of the agent to delete. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to delete. Required. + :type knowledge_base_name: str :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. Default value is None. :type if_match: str @@ -420,7 +425,7 @@ def delete( # pylint: disable=inconsistent-return-statements _x_ms_client_request_id = request_options.x_ms_client_request_id _request = build_delete_request( - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, @@ -442,7 +447,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -450,16 +458,16 @@ def delete( # pylint: disable=inconsistent-return-statements @distributed_trace def get( - self, agent_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgent: - """Retrieves an agent definition. + self, knowledge_base_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any + ) -> _models.KnowledgeBase: + """Retrieves an knowledge base definition. - :param agent_name: The name of the agent to retrieve. Required. - :type agent_name: str + :param knowledge_base_name: The name of the knowledge base to retrieve. Required. + :type knowledge_base_name: str :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -474,14 +482,14 @@ def get( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.KnowledgeAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBase] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id _request = build_get_request( - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, x_ms_client_request_id=_x_ms_client_request_id, api_version=api_version, headers=_headers, @@ -501,10 +509,13 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgent", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBase", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -514,20 +525,20 @@ def get( @distributed_trace def list( self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> ItemPaged["_models.KnowledgeAgent"]: - """Lists all agents available for a search service. + ) -> ItemPaged["_models.KnowledgeBase"]: + """Lists all knowledge bases available for a search service. :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: An iterator like instance of either KnowledgeAgent or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.KnowledgeAgent] + :return: An iterator like instance of either KnowledgeBase or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.KnowledgeBase] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListKnowledgeAgentsResult] = kwargs.pop("cls", None) + cls: ClsType[_models.ListKnowledgeBasesResult] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -579,8 +590,8 @@ def prepare_request(next_link=None): return _request def extract_data(pipeline_response): - deserialized = self._deserialize("ListKnowledgeAgentsResult", pipeline_response) - list_of_elem = deserialized.knowledge_agents + deserialized = self._deserialize("ListKnowledgeBasesResult", pipeline_response) + list_of_elem = deserialized.knowledge_bases if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, iter(list_of_elem) @@ -596,7 +607,10 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -606,65 +620,65 @@ def get_next(next_link=None): @overload def create( self, - knowledge_agent: _models.KnowledgeAgent, + knowledge_base: _models.KnowledgeBase, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base. - :param knowledge_agent: The definition of the agent to create. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The definition of the knowledge base to create. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @overload def create( self, - knowledge_agent: IO[bytes], + knowledge_base: IO[bytes], request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base. - :param knowledge_agent: The definition of the agent to create. Required. - :type knowledge_agent: IO[bytes] + :param knowledge_base: The definition of the knowledge base to create. Required. + :type knowledge_base: IO[bytes] :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace def create( self, - knowledge_agent: Union[_models.KnowledgeAgent, IO[bytes]], + knowledge_base: Union[_models.KnowledgeBase, IO[bytes]], request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgent: - """Creates a new agent. + ) -> _models.KnowledgeBase: + """Creates a new knowledge base. - :param knowledge_agent: The definition of the agent to create. Is either a KnowledgeAgent type - or a IO[bytes] type. Required. - :type knowledge_agent: ~azure.search.documents.indexes.models.KnowledgeAgent or IO[bytes] + :param knowledge_base: The definition of the knowledge base to create. Is either a + KnowledgeBase type or a IO[bytes] type. Required. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase or IO[bytes] :param request_options: Parameter group. Default value is None. :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: KnowledgeAgent or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -680,7 +694,7 @@ def create( api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.KnowledgeAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBase] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: @@ -688,10 +702,10 @@ def create( content_type = content_type or "application/json" _json = None _content = None - if isinstance(knowledge_agent, (IOBase, bytes)): - _content = knowledge_agent + if isinstance(knowledge_base, (IOBase, bytes)): + _content = knowledge_base else: - _json = self._serialize.body(knowledge_agent, "KnowledgeAgent") + _json = self._serialize.body(knowledge_base, "KnowledgeBase") _request = build_create_request( x_ms_client_request_id=_x_ms_client_request_id, @@ -716,10 +730,13 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgent", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBase", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_sources_operations.py index 606c4d50e437..df41b5184714 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_knowledge_sources_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -27,9 +27,9 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -47,7 +47,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -88,7 +88,7 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -118,7 +118,7 @@ def build_get_request(source_name: str, *, x_ms_client_request_id: Optional[str] _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -144,7 +144,7 @@ def build_list_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -165,7 +165,7 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -185,6 +185,34 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) +def build_get_status_request( + source_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/knowledgesources('{sourceName}')/status") + path_format_arguments = { + "sourceName": _SERIALIZER.url("source_name", source_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if x_ms_client_request_id is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + class KnowledgeSourcesOperations: """ .. warning:: @@ -366,7 +394,10 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("KnowledgeSource", pipeline_response.http_response) @@ -442,7 +473,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -501,7 +535,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("KnowledgeSource", pipeline_response.http_response) @@ -596,7 +633,10 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -716,7 +756,10 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("KnowledgeSource", pipeline_response.http_response) @@ -725,3 +768,69 @@ def create( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace + def get_status( + self, source_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any + ) -> _models.KnowledgeSourceStatus: + """Returns the current status and synchronization history of a knowledge source. + + :param source_name: The name of the knowledge source for which to retrieve status. Required. + :type source_name: str + :param request_options: Parameter group. Default value is None. + :type request_options: ~azure.search.documents.indexes.models.RequestOptions + :return: KnowledgeSourceStatus or the result of cls(response) + :rtype: ~azure.search.documents.indexes.models.KnowledgeSourceStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.KnowledgeSourceStatus] = kwargs.pop("cls", None) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + + _request = build_get_status_request( + source_name=source_name, + x_ms_client_request_id=_x_ms_client_request_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize("KnowledgeSourceStatus", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py index 55b5dc6364ec..e5df723a945b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -29,6 +29,7 @@ T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -38,7 +39,7 @@ def build_get_service_statistics_request(*, x_ms_client_request_id: Optional[str _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -61,7 +62,7 @@ def build_get_index_stats_summary_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -132,7 +133,10 @@ def get_service_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchServiceStatistics", pipeline_response.http_response) @@ -229,7 +233,10 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) return pipeline_response diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py index 460e2c091eee..f70f1dcda9a8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -25,9 +25,9 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -47,7 +47,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -96,7 +96,7 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -128,7 +128,7 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -156,7 +156,7 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -179,7 +179,7 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -205,7 +205,7 @@ def build_reset_skills_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -448,7 +448,10 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) @@ -527,7 +530,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -589,7 +595,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) @@ -657,7 +666,10 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListSkillsetsResult", pipeline_response.http_response) @@ -791,7 +803,10 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) @@ -933,7 +948,10 @@ def reset_skills( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py index 556ba4b9ac58..d573a54ae765 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -25,9 +25,9 @@ from .._configuration import SearchServiceClientConfiguration from .._utils.serialization import Deserializer, Serializer -List = list T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -45,7 +45,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -86,7 +86,7 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -118,7 +118,7 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -146,7 +146,7 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -169,7 +169,7 @@ def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -379,7 +379,10 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) @@ -458,7 +461,10 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) if cls: @@ -520,7 +526,10 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) @@ -588,7 +597,10 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("ListSynonymMapsResult", pipeline_response.http_response) @@ -720,7 +732,10 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py index 8d622df44258..416614659f05 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py @@ -28,8 +28,9 @@ AnalyzeTextOptions, AnalyzeResult, IndexStatisticsSummary, - KnowledgeAgent, + KnowledgeBase, KnowledgeSource, + KnowledgeSourceStatus, ) @@ -653,99 +654,105 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs) return self._client._send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access @distributed_trace - def delete_agent( + def delete_knowledge_base( self, - agent: Union[str, KnowledgeAgent], + knowledge_base: Union[str, KnowledgeBase], *, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any ) -> None: - """Deletes an existing agent. + """Deletes an existing knowledge base. - :param agent: The agent name or object to delete. - :type agent: str or ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The knowledge base name or object to delete. + :type knowledge_base: str or ~azure.search.documents.indexes.models.KnowledgeBase :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(agent, match_condition) + error_map, access_condition = get_access_conditions(knowledge_base, match_condition) kwargs.update(access_condition) try: - agent_name = agent.name # type: ignore + knowledge_base_name = knowledge_base.name # type: ignore except AttributeError: - agent_name = agent - self._client.knowledge_agents.delete(agent_name=agent_name, error_map=error_map, **kwargs) + knowledge_base_name = knowledge_base + self._client.knowledge_bases.delete( + knowledge_base_name=knowledge_base_name, error_map=error_map, **kwargs + ) @distributed_trace - def create_agent(self, agent: KnowledgeAgent, **kwargs: Any) -> KnowledgeAgent: - """Creates a new knowledge agent. + def create_knowledge_base(self, knowledge_base: KnowledgeBase, **kwargs: Any) -> KnowledgeBase: + """Creates a new knowledge base. - :param agent: The agent object. - :type agent: ~azure.search.documents.indexes.models.KnowledgeAgent - :return: The agent created - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The knowledge base object. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase + :return: The knowledge base created + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.knowledge_agents.create(agent, **kwargs) + result = self._client.knowledge_bases.create(knowledge_base, **kwargs) return result @distributed_trace - def create_or_update_agent( + def create_or_update_knowledge_base( self, - agent: KnowledgeAgent, + knowledge_base: KnowledgeBase, *, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any - ) -> KnowledgeAgent: - """Creates a new knowledge agent or updates an agent if it already exists. + ) -> KnowledgeBase: + """Creates a new knowledge base or updates one if it already exists. - :param agent: The agent object. - :type agent: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The knowledge base object. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions - :return: The index created or updated - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent - :raises ~azure.core.exceptions.ResourceNotFoundError: If the index doesn't exist. - :raises ~azure.core.exceptions.ResourceModifiedError: If the index has been modified in the server. - :raises ~azure.core.exceptions.ResourceNotModifiedError: If the index hasn't been modified in the server. - :raises ~azure.core.exceptions.ResourceNotFoundError: If the index doesn't exist. - :raises ~azure.core.exceptions.ResourceExistsError: If the index already exists. + :return: The knowledge base created or updated + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase + :raises ~azure.core.exceptions.ResourceNotFoundError: If the knowledge base doesn't exist. + :raises ~azure.core.exceptions.ResourceModifiedError: If the knowledge base has been modified on the server. + :raises ~azure.core.exceptions.ResourceNotModifiedError: If the knowledge base hasn't been + modified on the server. + :raises ~azure.core.exceptions.ResourceExistsError: If the knowledge base already exists. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(agent, match_condition) + error_map, access_condition = get_access_conditions(knowledge_base, match_condition) kwargs.update(access_condition) - result = self._client.knowledge_agents.create_or_update( - agent_name=agent.name, knowledge_agent=agent, prefer="return=representation", error_map=error_map, **kwargs + result = self._client.knowledge_bases.create_or_update( + knowledge_base_name=knowledge_base.name, + knowledge_base=knowledge_base, + prefer="return=representation", + error_map=error_map, + **kwargs ) return result @distributed_trace - def get_agent(self, name: str, **kwargs: Any) -> KnowledgeAgent: - """ + def get_knowledge_base(self, name: str, **kwargs: Any) -> KnowledgeBase: + """Gets a knowledge base definition. - :param name: The name of the agent to retrieve. + :param name: The name of the knowledge base to retrieve. :type name: str - :return: KnowledgeAgent object - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase object + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.knowledge_agents.get(name, **kwargs) + result = self._client.knowledge_bases.get(knowledge_base_name=name, **kwargs) return result @distributed_trace - def list_agents(self, **kwargs: Any) -> ItemPaged[KnowledgeAgent]: - """List the agents in an Azure Search service. + def list_knowledge_bases(self, **kwargs: Any) -> ItemPaged[KnowledgeBase]: + """List the knowledge bases in an Azure Search service. - :return: List of Knowledge Agents - :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.KnowledgeAgent] + :return: List of knowledge bases + :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.KnowledgeBase] :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) # pylint:disable=protected-access - return cast(ItemPaged[KnowledgeAgent], self._client.knowledge_agents.list(**kwargs)) + return cast(ItemPaged[KnowledgeBase], self._client.knowledge_bases.list(**kwargs)) @distributed_trace def delete_knowledge_source( @@ -834,6 +841,20 @@ def get_knowledge_source(self, name: str, **kwargs: Any) -> KnowledgeSource: result = self._client.knowledge_sources.get(name, **kwargs) return result + @distributed_trace + def get_knowledge_source_status(self, name: str, **kwargs: Any) -> KnowledgeSourceStatus: + """Returns the current status and synchronization history of a knowledge source. + + :param name: The name of the knowledge source for which to retrieve status. + :type name: str + :return: KnowledgeSourceStatus object + :rtype: ~azure.search.documents.indexes.models.KnowledgeSourceStatus + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. + """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + result = self._client.knowledge_sources.get_status(source_name=name, **kwargs) + return result + @distributed_trace def list_knowledge_sources(self, **kwargs: Any) -> ItemPaged[KnowledgeSource]: """List the knowledge sources in an Azure Search service. diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py index db82fab940b0..e33e978a6fd6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py @@ -29,8 +29,9 @@ AnalyzeResult, AnalyzeTextOptions, IndexStatisticsSummary, - KnowledgeAgent, + KnowledgeBase, KnowledgeSource, + KnowledgeSourceStatus, ) @@ -647,99 +648,105 @@ async def send_request(self, request: HttpRequest, *, stream: bool = False, **kw return await self._client._send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access @distributed_trace_async - async def delete_agent( + async def delete_knowledge_base( self, - agent: Union[str, KnowledgeAgent], + knowledge_base: Union[str, KnowledgeBase], *, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any ) -> None: - """Deletes an existing agent. + """Deletes an existing knowledge base. - :param agent: The agent name or object to delete. - :type agent: str or ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The knowledge base name or object to delete. + :type knowledge_base: str or ~azure.search.documents.indexes.models.KnowledgeBase :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(agent, match_condition) + error_map, access_condition = get_access_conditions(knowledge_base, match_condition) kwargs.update(access_condition) try: - agent_name = agent.name # type: ignore + knowledge_base_name = knowledge_base.name # type: ignore except AttributeError: - agent_name = agent - await self._client.knowledge_agents.delete(agent_name=agent_name, error_map=error_map, **kwargs) + knowledge_base_name = knowledge_base + await self._client.knowledge_bases.delete( + knowledge_base_name=knowledge_base_name, error_map=error_map, **kwargs + ) @distributed_trace_async - async def create_agent(self, agent: KnowledgeAgent, **kwargs: Any) -> KnowledgeAgent: - """Creates a new knowledge agent. + async def create_knowledge_base(self, knowledge_base: KnowledgeBase, **kwargs: Any) -> KnowledgeBase: + """Creates a new knowledge base. - :param agent: The agent object. - :type agent: ~azure.search.documents.indexes.models.KnowledgeAgent - :return: The agent created - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The knowledge base object. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase + :return: The knowledge base created + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.knowledge_agents.create(agent, **kwargs) + result = await self._client.knowledge_bases.create(knowledge_base, **kwargs) return result @distributed_trace_async - async def create_or_update_agent( + async def create_or_update_knowledge_base( self, - agent: KnowledgeAgent, + knowledge_base: KnowledgeBase, *, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any - ) -> KnowledgeAgent: - """Creates a new knowledge agent or updates an agent if it already exists. + ) -> KnowledgeBase: + """Creates a new knowledge base or updates one if it already exists. - :param agent: The agent object. - :type agent: ~azure.search.documents.indexes.models.KnowledgeAgent + :param knowledge_base: The knowledge base object. + :type knowledge_base: ~azure.search.documents.indexes.models.KnowledgeBase :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions - :return: The index created or updated - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent - :raises ~azure.core.exceptions.ResourceNotFoundError: If the index doesn't exist. - :raises ~azure.core.exceptions.ResourceModifiedError: If the index has been modified in the server. - :raises ~azure.core.exceptions.ResourceNotModifiedError: If the index hasn't been modified in the server. - :raises ~azure.core.exceptions.ResourceNotFoundError: If the index doesn't exist. - :raises ~azure.core.exceptions.ResourceExistsError: If the index already exists. + :return: The knowledge base created or updated + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase + :raises ~azure.core.exceptions.ResourceNotFoundError: If the knowledge base doesn't exist. + :raises ~azure.core.exceptions.ResourceModifiedError: If the knowledge base has been modified on the server. + :raises ~azure.core.exceptions.ResourceNotModifiedError: If the knowledge base hasn't been + modified on the server. + :raises ~azure.core.exceptions.ResourceExistsError: If the knowledge base already exists. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(agent, match_condition) + error_map, access_condition = get_access_conditions(knowledge_base, match_condition) kwargs.update(access_condition) - result = await self._client.knowledge_agents.create_or_update( - agent_name=agent.name, knowledge_agent=agent, prefer="return=representation", error_map=error_map, **kwargs + result = await self._client.knowledge_bases.create_or_update( + knowledge_base_name=knowledge_base.name, + knowledge_base=knowledge_base, + prefer="return=representation", + error_map=error_map, + **kwargs ) return result @distributed_trace_async - async def get_agent(self, name: str, **kwargs: Any) -> KnowledgeAgent: - """ + async def get_knowledge_base(self, name: str, **kwargs: Any) -> KnowledgeBase: + """Gets a knowledge base definition. - :param name: The name of the agent to retrieve. + :param name: The name of the knowledge base to retrieve. :type name: str - :return: KnowledgeAgent object - :rtype: ~azure.search.documents.indexes.models.KnowledgeAgent + :return: KnowledgeBase object + :rtype: ~azure.search.documents.indexes.models.KnowledgeBase :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.knowledge_agents.get(name, **kwargs) + result = await self._client.knowledge_bases.get(knowledge_base_name=name, **kwargs) return result @distributed_trace - def list_agents(self, **kwargs) -> AsyncItemPaged[KnowledgeAgent]: - """List the agents in an Azure Search service. + def list_knowledge_bases(self, **kwargs: Any) -> AsyncItemPaged[KnowledgeBase]: + """List the knowledge bases in an Azure Search service. - :return: List of Knowledge Agents - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.KnowledgeAgent] + :return: List of knowledge bases + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.KnowledgeBase] :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) # pylint:disable=protected-access - return cast(AsyncItemPaged[KnowledgeAgent], self._client.knowledge_agents.list(**kwargs)) + return cast(AsyncItemPaged[KnowledgeBase], self._client.knowledge_bases.list(**kwargs)) @distributed_trace_async async def delete_knowledge_source( @@ -828,6 +835,20 @@ async def get_knowledge_source(self, name: str, **kwargs: Any) -> KnowledgeSourc result = await self._client.knowledge_sources.get(name, **kwargs) return result + @distributed_trace_async + async def get_knowledge_source_status(self, name: str, **kwargs: Any) -> KnowledgeSourceStatus: + """Returns the current status and synchronization history of a knowledge source. + + :param name: The name of the knowledge source for which to retrieve status. + :type name: str + :return: KnowledgeSourceStatus object + :rtype: ~azure.search.documents.indexes.models.KnowledgeSourceStatus + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. + """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + result = await self._client.knowledge_sources.get_status(source_name=name, **kwargs) + return result + @distributed_trace def list_knowledge_sources(self, **kwargs: Any) -> AsyncItemPaged[KnowledgeSource]: """List the knowledge sources in an Azure Search service. diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py index 7340e4ec6214..0b979e44658b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py @@ -35,6 +35,7 @@ from ..._generated.models import SuggestOptions from .._generated.models import ( SearchAlias, + AIServices, AIServicesAccountIdentity, AIServicesAccountKey, AIServicesVisionParameters, @@ -69,13 +70,18 @@ ChatCompletionSkill, CjkBigramTokenFilter, CjkBigramTokenFilterScripts, + ContentUnderstandingSkillChunkingUnit, + ContentUnderstandingSkillExtractionOptions, ClassicSimilarityAlgorithm, ClassicTokenizer, CognitiveServicesAccount, CognitiveServicesAccountKey, CommonGramTokenFilter, CommonModelParameters, + CompletedSynchronizationState, ConditionalSkill, + ContentUnderstandingSkill, + ContentUnderstandingSkillChunkingProperties, CorsOptions, CustomEntity, CustomEntityAlias, @@ -122,6 +128,7 @@ IndexerExecutionEnvironment, IndexerExecutionResult, IndexerResyncBody, + IndexerRuntime, IndexerExecutionStatus, IndexerPermissionOption, IndexerResyncOption, @@ -131,23 +138,39 @@ IndexingParametersConfiguration, IndexingSchedule, IndexProjectionMode, + IndexedSharePointContainerName, IndexStatisticsSummary, + IndexedOneLakeKnowledgeSource, + IndexedOneLakeKnowledgeSourceParameters, + IndexedSharePointKnowledgeSource, + IndexedSharePointKnowledgeSourceParameters, InputFieldMappingEntry, KeepTokenFilter, KeyPhraseExtractionSkill, KeyPhraseExtractionSkillLanguage, KeywordMarkerTokenFilter, KeywordTokenizerV2, - KnowledgeAgent, - KnowledgeAgentAzureOpenAIModel, - KnowledgeAgentModel, - KnowledgeAgentOutputConfiguration, - KnowledgeAgentModelKind, - KnowledgeAgentOutputConfigurationModality, - KnowledgeAgentRequestLimits, + KnowledgeBase, + KnowledgeBaseAzureOpenAIModel, + KnowledgeBaseModel, + KnowledgeBaseModelKind, + KnowledgeRetrievalLowReasoningEffort, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeRetrievalReasoningEffort, + KnowledgeRetrievalOutputMode, + KnowledgeRetrievalReasoningEffortKind, KnowledgeSource, + KnowledgeSourceAzureOpenAIVectorizer, + KnowledgeSourceIngestionParameters, + KnowledgeSourceContentExtractionMode, + KnowledgeSourceIngestionPermissionOption, KnowledgeSourceKind, KnowledgeSourceReference, + KnowledgeSourceStatistics, + KnowledgeSourceStatus, + KnowledgeSourceSynchronizationStatus, + KnowledgeSourceVectorizer, LanguageDetectionSkill, LengthTokenFilter, LexicalAnalyzer, @@ -182,6 +205,8 @@ PatternReplaceTokenFilter, PhoneticEncoder, PhoneticTokenFilter, + RemoteSharePointKnowledgeSource, + RemoteSharePointKnowledgeSourceParameters, PIIDetectionSkill, PIIDetectionSkillMaskingMode, PermissionFilter, @@ -195,6 +220,7 @@ ScoringFunctionAggregation, ScoringFunctionInterpolation, ScoringProfile, + SearchIndexFieldReference, SearchIndexKnowledgeSource, SearchIndexKnowledgeSourceParameters, SearchIndexerCache, @@ -228,6 +254,7 @@ SemanticPrioritizedFields, SemanticSearch, SentimentSkillLanguage, + ServiceIndexersRuntime, ShaperSkill, ShingleTokenFilter, SimilarityAlgorithm, @@ -246,6 +273,7 @@ StopAnalyzer, StopwordsList, StopwordsTokenFilter, + SynchronizationState, SynonymTokenFilter, TagScoringFunction, TagScoringParameters, @@ -276,6 +304,10 @@ WebApiSkill, WebApiVectorizer, WebApiVectorizerParameters, + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, WordDelimiterTokenFilter, ) from ._models import ( @@ -306,6 +338,7 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): __all__ = ( + "AIServices", "AIServicesAccountIdentity", "AIServicesAccountKey", "AIServicesVisionParameters", @@ -347,8 +380,13 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "CognitiveServicesAccountKey", "CommonGramTokenFilter", "CommonModelParameters", + "CompletedSynchronizationState", "ComplexField", "ConditionalSkill", + "ContentUnderstandingSkill", + "ContentUnderstandingSkillChunkingProperties", + "ContentUnderstandingSkillChunkingUnit", + "ContentUnderstandingSkillExtractionOptions", "CorsOptions", "CustomAnalyzer", "CustomEntity", @@ -409,6 +447,12 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "IndexingParametersConfiguration", "IndexingSchedule", "IndexProjectionMode", + "IndexedOneLakeKnowledgeSource", + "IndexedOneLakeKnowledgeSourceParameters", + "IndexedSharePointKnowledgeSource", + "IndexedSharePointKnowledgeSourceParameters", + "IndexedSharePointContainerName", + "IndexerRuntime", "InputFieldMappingEntry", "KeepTokenFilter", "KeyPhraseExtractionSkill", @@ -416,16 +460,27 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "KeywordMarkerTokenFilter", "KeywordTokenizer", "LanguageDetectionSkill", - "KnowledgeAgent", - "KnowledgeAgentAzureOpenAIModel", - "KnowledgeAgentModel", - "KnowledgeAgentModelKind", - "KnowledgeAgentOutputConfiguration", - "KnowledgeAgentOutputConfigurationModality", - "KnowledgeAgentRequestLimits", + "KnowledgeBase", + "KnowledgeBaseAzureOpenAIModel", + "KnowledgeBaseModel", + "KnowledgeBaseModelKind", + "KnowledgeRetrievalLowReasoningEffort", + "KnowledgeRetrievalMediumReasoningEffort", + "KnowledgeRetrievalMinimalReasoningEffort", + "KnowledgeRetrievalReasoningEffort", + "KnowledgeRetrievalOutputMode", + "KnowledgeRetrievalReasoningEffortKind", "KnowledgeSource", + "KnowledgeSourceAzureOpenAIVectorizer", + "KnowledgeSourceContentExtractionMode", + "KnowledgeSourceIngestionParameters", + "KnowledgeSourceIngestionPermissionOption", "KnowledgeSourceKind", "KnowledgeSourceReference", + "KnowledgeSourceStatistics", + "KnowledgeSourceStatus", + "KnowledgeSourceSynchronizationStatus", + "KnowledgeSourceVectorizer", "LengthTokenFilter", "LexicalAnalyzer", "LexicalAnalyzerName", @@ -462,6 +517,8 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "PatternTokenizer", "PermissionFilter", "PIIDetectionSkill", + "RemoteSharePointKnowledgeSource", + "RemoteSharePointKnowledgeSourceParameters", "PIIDetectionSkillMaskingMode", "PhoneticEncoder", "PhoneticTokenFilter", @@ -479,6 +536,7 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "SearchableField", "SearchField", "SearchIndex", + "SearchIndexFieldReference", "SearchIndexKnowledgeSource", "SearchIndexKnowledgeSourceParameters", "SearchIndexer", @@ -517,6 +575,7 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "SemanticSearch", "SentimentSkill", "SentimentSkillLanguage", + "ServiceIndexersRuntime", "SentimentSkillVersion", "ShaperSkill", "ShingleTokenFilter", @@ -538,6 +597,7 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "StopwordsList", "StopwordsTokenFilter", "SuggestOptions", + "SynchronizationState", "SynonymMap", "SynonymTokenFilter", "TagScoringFunction", @@ -567,6 +627,10 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "VisualFeature", "VisionVectorizeSkill", "WebApiSkill", + "WebKnowledgeSource", + "WebKnowledgeSourceDomain", + "WebKnowledgeSourceDomains", + "WebKnowledgeSourceParameters", "WordDelimiterTokenFilter", "SearchFieldDataType", ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py index 3ed8dfa370f6..140cc95e0d51 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py @@ -41,9 +41,10 @@ class SearchField(_serialization.Model): # pylint: disable=too-many-instance-attributes - """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + """Represents a field in an index definition, which describes the name, data type, and search + behavior of a field. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. :ivar name: The name of the field, which must be unique within the fields collection of the index or parent field. Required. @@ -57,58 +58,63 @@ class SearchField(_serialization.Model): type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. :vartype key: bool + :ivar hidden: Convenience property that mirrors the generated ``retrievable`` flag. Set this to + true to prevent the field from being returned in search results. Defaults to false for simple + fields, true for vector fields, and null for complex fields. + :vartype hidden: bool :ivar stored: An immutable value indicating whether the field will be persisted separately on - disk to be returned in a search result. You can disable this option if you don't plan to return - the field contents in a search response to save on storage overhead. This can only be set - during index creation and only for vector fields. This property cannot be changed for existing - fields or set as false for new fields. If this property is set as false, the property - 'hidden' must be set to true. This property must be true or unset for key fields, - for new fields, and for non-vector fields, and it must be null for complex fields. Disabling - this property will reduce index storage requirements. The default is true for vector fields. + disk to be returned in a search result. You can disable this option if you don't plan to return + the field contents in a search response to save on storage overhead. This can only be set + during index creation and only for vector fields. This property cannot be changed for existing + fields or set as false for new fields. If this property is set to false, ``hidden`` must be set + to true. This property must be true or unset for key fields, for new fields, and for + non-vector fields, and it must be null for complex fields. Disabling this property will reduce + index storage requirements. The default is true for vector fields. :vartype stored: bool - :ivar searchable: A value indicating whether the field is full-text searchable. This means it - will undergo analysis such as word-breaking during indexing. If you set a searchable field to a - value like "sunny day", internally it will be split into the individual tokens "sunny" and - "day". This enables full-text searches for these terms. Fields of type Edm.String or - Collection(Edm.String) are searchable by default. This property must be false for simple fields - of other non-string data types, and it must be null for complex fields. Note: searchable fields - consume extra space in your index since Azure Cognitive Search will store an additional - tokenized version of the field value for full-text searches. If you want to save space in your - index and you don't need a field to be included in searches, set searchable to false. + :ivar searchable: A value indicating whether the field is full-text searchable. This means it will + undergo analysis such as word-breaking during indexing. If you set a searchable field to a value + like "sunny day", internally it will be split into the individual tokens "sunny" and "day". + This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) + are searchable by default. This property must be false for simple fields of other non-string data + types, and it must be null for complex fields. Note: searchable fields consume extra space in your + index to accommodate additional tokenized versions of the field value for full-text searches. If + you want to save space in your index and you don't need a field to be included in searches, set + searchable to false. :vartype searchable: bool :ivar filterable: A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", - $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property - must be null for complex fields. Default is true for simple fields and null for complex fields. + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must + be null for complex fields. Default is true for simple fields and null for complex fields. :vartype filterable: bool :ivar sortable: A value indicating whether to enable the field to be referenced in $orderby - expressions. By default Azure Cognitive Search sorts results by score, but in many experiences - users will want to sort by fields in the documents. A simple field can be sortable only if it - is single-valued (it has a single value in the scope of the parent document). Simple collection - fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex - collections are also multi-valued, and therefore cannot be sortable. This is true whether it's - an immediate parent field, or an ancestor field, that's the complex collection. Complex fields - cannot be sortable and the sortable property must be null for such fields. The default for - sortable is true for single-valued simple fields, false for multi-valued simple fields, and - null for complex fields. + expressions. By default, the search engine sorts results by score, but in many experiences users + will want to sort by fields in the documents. A simple field can be sortable only if it is + single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections + are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate + parent field, or an ancestor field, that's the complex collection. Complex fields cannot be + sortable and the sortable property must be null for such fields. The default for sortable is true + for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. :vartype sortable: bool - :ivar facetable: A value indicating whether to enable the field to be referenced in facet - queries. Typically used in a presentation of search results that includes hit count by category - (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so - on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or - Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - fields. + :ivar facetable: A value indicating whether to enable the field to be referenced in facet queries. + Typically used in a presentation of search results that includes hit count by category (for + example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). + This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. :vartype facetable: bool - :ivar permission_filter: A value indicating whether the field should be used as a permission - filter. Known values are: "userIds", "groupIds", and "rbacScope". + :ivar permission_filter: A value indicating whether the field should be used as a permission filter. + Known values are: "userIds", "groupIds", and "rbacScope". :vartype permission_filter: str or ~azure.search.documents.indexes.models.PermissionFilter - :ivar analyzer_name: The name of the analyzer to use for the field. This option can be used only - with searchable fields and it can't be set together with either searchAnalyzer or - indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Known values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", - "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", + :ivar sensitivity_label: A value indicating whether the field should be used for sensitivity label + filtering. This enables document-level filtering based on Microsoft Purview sensitivity labels. + :vartype sensitivity_label: bool + :ivar analyzer_name: The name of the analyzer to use for the field. This option can be used only with + searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. Known + values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", @@ -126,11 +132,34 @@ class SearchField(_serialization.Model): "whitespace". :vartype analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName :ivar search_analyzer_name: The name of the analyzer used at search time for the field. This option - can be used only with searchable fields. It must be set together with indexAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. This - analyzer can be updated on an existing field. Must be null for complex fields. Known values - are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot + be set together with the analyzer option. This property cannot be set to the name of a language + analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be + updated on an existing field. Must be null for complex fields. Known values are: "ar.microsoft", + "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", + "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", + "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", + "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", + "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", + "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", + "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", + "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", + "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", + "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", + "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", + "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr-cyrillic.microsoft", + "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", "es.lucene", + "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", "th.lucene", + "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and + "whitespace". + :vartype search_analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar index_analyzer_name: The name of the analyzer used at indexing time for the field. This option + can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot + be set together with the analyzer option. This property cannot be set to the name of a language + analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is + chosen, it cannot be changed for the field. Must be null for complex fields. Known values are: + "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", @@ -147,51 +176,28 @@ class SearchField(_serialization.Model): "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and "whitespace". - :vartype search_analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :ivar index_analyzer_name: The name of the analyzer used at indexing time for the field. This option - can be used only with searchable fields. It must be set together with searchAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. Once the - analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. Known - values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", - "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", - "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", - "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", - "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", - "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", - "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", - "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", - "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", - "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", - "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". :vartype index_analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName :ivar normalizer_name: The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it - cannot be changed for the field. Must be null for complex fields. Known values are: - "asciifolding", "elision", "lowercase", "standard", and "uppercase". + cannot be changed for the field. Must be null for complex fields. Known values are: "asciifolding", + "elision", "lowercase", "standard", and "uppercase". :vartype normalizer_name: str or ~azure.search.documents.indexes.models.LexicalNormalizerName :ivar vector_search_dimensions: The dimensionality of the vector field. :vartype vector_search_dimensions: int :ivar vector_search_profile_name: The name of the vector search profile that specifies the algorithm - to use when searching the vector field. + and vectorizer to use when searching the vector field. :vartype vector_search_profile_name: str - :ivar synonym_map_names: A list of the names of synonym maps to associate with this field. This - option can be used only with searchable fields. Currently only one synonym map per field is - supported. Assigning a synonym map to a field ensures that query terms targeting that field are - expanded at query-time using the rules in the synonym map. This attribute can be changed on - existing fields. Must be null or an empty collection for complex fields. + :ivar vector_encoding_format: The encoding format to interpret the field contents. "packedBit" + :vartype vector_encoding_format: str or ~azure.search.documents.indexes.models.VectorEncodingFormat + :ivar synonym_map_names: A list of the names of synonym maps to associate with this field. This option + can be used only with searchable fields. Currently only one synonym map per field is supported. + Assigning a synonym map to a field ensures that query terms targeting that field are expanded at + query-time using the rules in the synonym map. This attribute can be changed on existing fields. + Must be null or an empty collection for complex fields. :vartype synonym_map_names: list[str] :ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. :vartype fields: list[~azure.search.documents.indexes.models.SearchField] - :ivar vector_encoding_format: The encoding format to interpret the field contents. "packedBit" - :vartype vector_encoding_format: str or ~azure.search.documents.indexes.models.VectorEncodingFormat """ def __init__( @@ -207,6 +213,7 @@ def __init__( sortable: Optional[bool] = None, facetable: Optional[bool] = None, permission_filter: Optional[Union[str, PermissionFilter]] = None, + sensitivity_label: Optional[bool] = None, analyzer_name: Optional[Union[str, LexicalAnalyzerName]] = None, search_analyzer_name: Optional[Union[str, LexicalAnalyzerName]] = None, index_analyzer_name: Optional[Union[str, LexicalAnalyzerName]] = None, @@ -229,6 +236,7 @@ def __init__( self.sortable = sortable self.facetable = facetable self.permission_filter = permission_filter + self.sensitivity_label = sensitivity_label self.analyzer_name = analyzer_name self.search_analyzer_name = search_analyzer_name self.index_analyzer_name = index_analyzer_name @@ -253,6 +261,7 @@ def _to_generated(self) -> _SearchField: sortable=self.sortable, facetable=self.facetable, permission_filter=self.permission_filter, + sensitivity_label=self.sensitivity_label, analyzer=self.analyzer_name, search_analyzer=self.search_analyzer_name, index_analyzer=self.index_analyzer_name, @@ -290,6 +299,7 @@ def _from_generated(cls, search_field) -> Optional[Self]: sortable=search_field.sortable, facetable=search_field.facetable, permission_filter=search_field.permission_filter, + sensitivity_label=search_field.sensitivity_label, analyzer_name=search_field.analyzer, search_analyzer_name=search_field.search_analyzer, index_analyzer_name=search_field.index_analyzer, @@ -615,15 +625,16 @@ def ComplexField( class SearchIndex(_serialization.Model): # pylint: disable=too-many-instance-attributes - """Represents a search index definition, which describes the fields and search behavior of an index. + """Represents a search index definition, which describes the fields and search behavior of an + index. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar name: Required. The name of the index. + :ivar name: The name of the index. Required. :vartype name: str :ivar description: The description of the index. :vartype description: str - :ivar fields: Required. The fields of the index. + :ivar fields: The fields of the index. Required. :vartype fields: list[~azure.search.documents.indexes.models.SearchField] :ivar scoring_profiles: The scoring profiles for the index. :vartype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] @@ -661,12 +672,15 @@ class SearchIndex(_serialization.Model): :vartype similarity: ~azure.search.documents.indexes.models.SimilarityAlgorithm :ivar semantic_search: Defines parameters for a search index that influence semantic capabilities. :vartype semantic_search: ~azure.search.documents.indexes.models.SemanticSearch - :ivar vector_search: Defines parameters for a search index that influence scoring in a vector space. + :ivar vector_search: Contains configuration options related to vector search. :vartype vector_search: ~azure.search.documents.indexes.models.VectorSearch :ivar permission_filter_option: A value indicating whether permission filtering is enabled for the index. Known values are: "enabled" and "disabled". :vartype permission_filter_option: str or ~azure.search.documents.indexes.models.SearchIndexPermissionFilterOption + :ivar purview_enabled: A value indicating whether the index is leveraging Purview-specific + features. This property defaults to false and cannot be changed after index creation. + :vartype purview_enabled: bool :ivar e_tag: The ETag of the index. :vartype e_tag: str """ @@ -691,6 +705,7 @@ def __init__( semantic_search: Optional[SemanticSearch] = None, vector_search: Optional[VectorSearch] = None, permission_filter_option: Optional[Union[str, SearchIndexPermissionFilterOption]] = None, + purview_enabled: Optional[bool] = None, e_tag: Optional[str] = None, **kwargs ): @@ -712,6 +727,7 @@ def __init__( self.semantic_search = semantic_search self.vector_search = vector_search self.permission_filter_option = permission_filter_option + self.purview_enabled = purview_enabled self.e_tag = e_tag def _to_generated(self) -> _SearchIndex: @@ -750,6 +766,7 @@ def _to_generated(self) -> _SearchIndex: e_tag=self.e_tag, vector_search=self.vector_search, permission_filter_option=self.permission_filter_option, + purview_enabled=self.purview_enabled, ) @classmethod @@ -800,6 +817,7 @@ def _from_generated(cls, search_index) -> Optional[Self]: e_tag=search_index.e_tag, vector_search=search_index.vector_search, permission_filter_option=search_index.permission_filter_option, + purview_enabled=search_index.purview_enabled, ) def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: @@ -878,7 +896,9 @@ def pack_search_field(search_field: SearchField) -> _SearchField: filterable = search_field.get("filterable") sortable = search_field.get("sortable") facetable = search_field.get("facetable") + permission_filter = search_field.get("permission_filter") analyzer_name = search_field.get("analyzer_name") + sensitivity_label = search_field.get("sensitivity_label") search_analyzer_name = search_field.get("search_analyzer_name") index_analyzer_name = search_field.get("index_analyzer_name") normalizer = search_field.get("normalizer") @@ -896,6 +916,8 @@ def pack_search_field(search_field: SearchField) -> _SearchField: filterable=filterable, sortable=sortable, facetable=facetable, + permission_filter=permission_filter, + sensitivity_label=sensitivity_label, analyzer=analyzer_name, search_analyzer=search_analyzer_name, index_analyzer=index_analyzer_name, diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py index 7a04efd01c69..31128c0a672d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py @@ -47,7 +47,7 @@ class SearchIndexerSkillset(_serialization.Model): """A list of skills. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. :ivar name: The name of the skillset. Required. :vartype name: str @@ -57,7 +57,8 @@ class SearchIndexerSkillset(_serialization.Model): :vartype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] :ivar cognitive_services_account: Details about the Azure AI service to be used when running skills. - :vartype cognitive_services_account: ~azure.search.documents.indexes.models.CognitiveServicesAccount + :vartype cognitive_services_account: + ~azure.search.documents.indexes.models.CognitiveServicesAccount :ivar knowledge_store: Definition of additional projections to Azure blob, table, or files, of enriched data. :vartype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore @@ -215,12 +216,11 @@ class EntityRecognitionSkillVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta class EntityRecognitionSkill(SearchIndexerSkill): - """Using the Text Analytics API, extracts entities of different types from text. + """This skill is deprecated. Use the V3.EntityRecognitionSkill instead. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by - server. + :ivar odata_type: A URI fragment specifying the type of skill. Required. :vartype odata_type: str :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, @@ -232,23 +232,23 @@ class EntityRecognitionSkill(SearchIndexerSkill): :ivar context: Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. :vartype context: str - :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the - output of an upstream skill. + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: Required. The output of a skill is either a field in a search index, or a value - that can be consumed as an input by another skill. + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] :ivar categories: A list of entity categories that should be extracted. - :vartype categories: list[str] or list[~azure.search.documents.indexes.models.EntityCategory] - :ivar default_language_code: A value indicating which language code to use. Default is en. - Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", - "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". + :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", and "tr". :vartype default_language_code: str or ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage :ivar include_typeless_entities: Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not - be surfaced. Only valid for skill version 1. + be surfaced. :vartype include_typeless_entities: bool :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly @@ -256,7 +256,7 @@ class EntityRecognitionSkill(SearchIndexerSkill): :vartype minimum_precision: float :ivar model_version: The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify - this value unless absolutely necessary. Only valid from skill version 3. + this value unless absolutely necessary. :vartype model_version: str :ivar skill_version: The version of the skill to use when calling the Text Analytics service. It will default to V1 when not specified. @@ -338,16 +338,11 @@ class SentimentSkillVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta): class SentimentSkill(SearchIndexerSkill): - """V1: Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero - to 1. - V3: Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels - (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at - a sentence and document-level. + """This skill is deprecated. Use the V3.SentimentSkill instead. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by - server. + :ivar odata_type: A URI fragment specifying the type of skill. Required. :vartype odata_type: str :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, @@ -359,28 +354,28 @@ class SentimentSkill(SearchIndexerSkill): :ivar context: Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. :vartype context: str - :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the - output of an upstream skill. + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: Required. The output of a skill is either a field in a search index, or a value - that can be consumed as an input by another skill. + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is en. - Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", - "ru", "es", "sv", "tr". + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", "ru", + "es", "sv", and "tr". :vartype default_language_code: str or ~azure.search.documents.indexes.models.SentimentSkillLanguage + :ivar skill_version: The version of the skill to use when calling the Text Analytics service. + It will default to V1 when not specified. + :vartype skill_version: ~azure.search.documents.indexes.models.SentimentSkillVersion :ivar include_opinion_mining: If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated - assessment (adjective) in the text. Default is false. + assessment (adjective) in the text. Default is false. Only used when ``skill_version`` is V3. :vartype include_opinion_mining: bool :ivar model_version: The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. :vartype model_version: str - :ivar skill_version: The version of the skill to use when calling the Text Analytics service. - It will default to V1 when not specified. - :vartype skill_version: ~azure.search.documents.indexes.models.SentimentSkillVersion """ _validation = { @@ -586,34 +581,33 @@ def from_dict( # type: ignore class CustomAnalyzer(LexicalAnalyzer): - """Allows you to take control over the process of converting text into indexable/searchable tokens. - It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. - The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens - emitted by the tokenizer. + """Allows you to take control over the process of converting text into indexable/searchable + tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one + or more filters. The tokenizer is responsible for breaking text into tokens, and the filters + for modifying tokens emitted by the tokenizer. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. + :ivar odata_type: A URI fragment specifying the type of analyzer. Required. :vartype odata_type: str - :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or + underscores, can only start and end with alphanumeric characters, and is limited to 128 + characters. Required. :vartype name: str - :ivar tokenizer_name: Required. The name of the tokenizer to use to divide continuous text into a - sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + :ivar tokenizer_name: The name of the tokenizer to use to divide continuous text into a sequence + of tokens, such as breaking a sentence into words. Required. Known values are: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", - "standard_v2", "uax_url_email", "whitespace". + "standard_v2", "uax_url_email", and "whitespace". :vartype tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName :ivar token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :vartype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] + :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] :ivar char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :vartype char_filters: list[str] + :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] """ def __init__(self, **kwargs): @@ -646,24 +640,26 @@ def _from_generated(cls, custom_analyzer): class PatternAnalyzer(LexicalAnalyzer): - """Flexibly separates text into terms via a regular expression. - This analyzer is implemented using Apache Lucene. + """Flexibly separates text into terms via a regular expression pattern. This analyzer is + implemented using Apache Lucene. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. + :ivar odata_type: A URI fragment specifying the type of analyzer. Required. + :vartype odata_type: str + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or + underscores, can only start and end with alphanumeric characters, and is limited to 128 + characters. Required. :vartype name: str :ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is true. :vartype lower_case_terms: bool - :ivar pattern: A regular expression to match token separators. Default is an - expression that matches one or more white space characters. + :ivar pattern: A regular expression pattern to match token separators. Default is an expression + that matches one or more non-word characters. :vartype pattern: str - :ivar flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :vartype flags: list[str] or list[~azure.search.documents.indexes.models.RegexFlags] + :ivar flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". + :vartype flags: list[str or ~azure.search.documents.indexes.models.RegexFlags] :ivar stopwords: A list of stopwords. :vartype stopwords: list[str] """ @@ -718,22 +714,24 @@ def _from_generated(cls, pattern_analyzer): class PatternTokenizer(LexicalTokenizer): - """Tokenizer that uses regex pattern matching to construct distinct tokens. - This tokenizer is implemented using Apache Lucene. + """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is + implemented using Apache Lucene. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. + :vartype odata_type: str + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or + underscores, can only start and end with alphanumeric characters, and is limited to 128 + characters. Required. :vartype name: str - :ivar pattern: A regular expression to match token separators. Default is an - expression that matches one or more white space characters. + :ivar pattern: A regular expression pattern to match token separators. Default is an expression + that matches one or more non-word characters. :vartype pattern: str - :ivar flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :vartype flags: list[str] or list[~azure.search.documents.indexes.models.RegexFlags] - :ivar group: The zero-based ordinal of the matching group in the regular expression to + :ivar flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". + :vartype flags: list[str or ~azure.search.documents.indexes.models.RegexFlags] + :ivar group: The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. :vartype group: int @@ -786,25 +784,25 @@ def _from_generated(cls, pattern_tokenizer): class SearchResourceEncryptionKey(_serialization.Model): """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be - used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. + used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data - at rest. + :ivar key_name: The name of your Azure Key Vault key to be used to encrypt your data at rest. + Required. :vartype key_name: str - :ivar key_version: Required. The version of your Azure Key Vault key to be used to encrypt - your data at rest. + :ivar key_version: The version of your Azure Key Vault key to be used to encrypt your data at + rest. :vartype key_version: str - :ivar vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that - contains the key to be used to encrypt your data at rest. An example URI might be https://my- - keyvault-name.vault.azure.net. + :ivar vault_uri: The URI of your Azure Key Vault, also referred to as DNS name, that contains + the key to be used to encrypt your data at rest. An example URI might be + ``https://my-keyvault-name.vault.azure.net``. Required. :vartype vault_uri: str - :ivar application_id: Required. An AAD Application ID that was granted the required access - permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The - Application ID should not be confused with the Object ID for your AAD Application. + :ivar application_id: Optional Azure Active Directory application ID used to construct access + credentials. Supply this together with ``application_secret`` instead of ``access_credentials``. :vartype application_id: str - :ivar application_secret: The authentication key of the specified AAD application. + :ivar application_secret: The authentication key of the specified AAD application. Used with + ``application_id`` to populate access credentials. :vartype application_secret: str :ivar identity: An explicit managed identity to use for this encryption key. If not specified and the access credentials property is null, the system-assigned managed identity is used. On @@ -936,24 +934,24 @@ class SynonymMap(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :ivar name: Required. The name of the synonym map. + :ivar name: The name of the synonym map. Required. :vartype name: str - :ivar format: Required. The format of the synonym map. Only the 'solr' format is currently - supported. Default value: "solr". + :ivar format: The format of the synonym map. Only the 'solr' format is currently supported. + Required. Default value is "solr". :vartype format: str - :ivar synonyms: Required. A series of synonym rules in the specified synonym map format. The - rules must be separated by newlines. + :ivar synonyms: A series of synonym rules in the specified synonym map format. The rules must + be separated by newlines. Required. :vartype synonyms: list[str] :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive - Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive - Search will ignore attempts to set this property to null. You can change this property as - needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. + want full assurance that no one, not even Microsoft, can decrypt your data. Once you have + encrypted your data, it will always remain encrypted. The search service will ignore attempts + to set this property to null. You can change this property as needed if you want to rotate your + encryption key; Your data will be unaffected. Encryption with customer-managed keys is not + available for free search services, and is only available for paid services created on or after + January 1, 2019. :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey :ivar e_tag: The ETag of the synonym map. :vartype e_tag: str @@ -1071,7 +1069,7 @@ class SearchIndexerDataSourceConnection(_serialization.Model): # pylint: disabl :ivar description: The description of the datasource connection. :vartype description: str :ivar type: Required. The type of the datasource connection. Possible values include: "azuresql", - "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2". + "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2", "onelake", "sharepoint". :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType :ivar connection_string: The connection string for the datasource connection. :vartype connection_string: str diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/__init__.py similarity index 92% rename from sdk/search/azure-search-documents/azure/search/documents/agent/__init__.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/__init__.py index e6760b0b795a..321f317a4aae 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/__init__.py @@ -24,6 +24,6 @@ # # -------------------------------------------------------------------------- -from ._agent_client import KnowledgeAgentRetrievalClient +from ._knowledgebase_client import KnowledgeBaseRetrievalClient -__all__ = ("KnowledgeAgentRetrievalClient",) +__all__ = ("KnowledgeBaseRetrievalClient",) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/__init__.py similarity index 80% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/__init__.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/__init__.py index 79e8ad096465..fcd722b7102c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -10,7 +10,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._knowledge_agent_retrieval_client import KnowledgeAgentRetrievalClient # type: ignore +from ._knowledge_base_retrieval_client import KnowledgeBaseRetrievalClient # type: ignore try: from ._patch import __all__ as _patch_all @@ -20,7 +20,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "KnowledgeAgentRetrievalClient", + "KnowledgeBaseRetrievalClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_configuration.py similarity index 70% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_configuration.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_configuration.py index 2c38780be15c..fbe84162cd8c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -11,33 +11,33 @@ VERSION = "unknown" -class KnowledgeAgentRetrievalClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long - """Configuration for KnowledgeAgentRetrievalClient. +class KnowledgeBaseRetrievalClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for KnowledgeBaseRetrievalClient. Note that all parameters used to create this instance are saved as instance attributes. :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :param agent_name: The name of the agent. Required. - :type agent_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, agent_name: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-08-01-preview") + def __init__(self, endpoint: str, knowledge_base_name: str, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if agent_name is None: - raise ValueError("Parameter 'agent_name' must not be None.") + if knowledge_base_name is None: + raise ValueError("Parameter 'knowledge_base_name' must not be None.") self.endpoint = endpoint - self.agent_name = agent_name + self.knowledge_base_name = knowledge_base_name self.api_version = api_version - kwargs.setdefault("sdk_moniker", "knowledgeagentretrievalclient/{}".format(VERSION)) + kwargs.setdefault("sdk_moniker", "knowledgebaseretrievalclient/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_knowledge_agent_retrieval_client.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_knowledge_base_retrieval_client.py similarity index 81% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_knowledge_agent_retrieval_client.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_knowledge_base_retrieval_client.py index 799ecddcb875..4e93613f3d16 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_knowledge_agent_retrieval_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_knowledge_base_retrieval_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -13,31 +13,33 @@ from azure.core.rest import HttpRequest, HttpResponse from . import models as _models -from ._configuration import KnowledgeAgentRetrievalClientConfiguration +from ._configuration import KnowledgeBaseRetrievalClientConfiguration from ._utils.serialization import Deserializer, Serializer from .operations import KnowledgeRetrievalOperations -class KnowledgeAgentRetrievalClient: - """Client that can be used to query an agent. +class KnowledgeBaseRetrievalClient: + """Client that can be used to query an knowledge base. :ivar knowledge_retrieval: KnowledgeRetrievalOperations operations :vartype knowledge_retrieval: - azure.search.documents.agent.operations.KnowledgeRetrievalOperations + azure.search.documents.knowledgebases.operations.KnowledgeRetrievalOperations :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :param agent_name: The name of the agent. Required. - :type agent_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, endpoint: str, agent_name: str, **kwargs: Any + self, endpoint: str, knowledge_base_name: str, **kwargs: Any ) -> None: - _endpoint = "{endpoint}/agents('{agentName}')" - self._config = KnowledgeAgentRetrievalClientConfiguration(endpoint=endpoint, agent_name=agent_name, **kwargs) + _endpoint = "{endpoint}/knowledgebases('{knowledgeBaseName}')" + self._config = KnowledgeBaseRetrievalClientConfiguration( + endpoint=endpoint, knowledge_base_name=knowledge_base_name, **kwargs + ) _policies = kwargs.pop("policies", None) if _policies is None: @@ -87,7 +89,9 @@ def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "agentName": self._serialize.url("self._config.agent_name", self._config.agent_name, "str"), + "knowledgeBaseName": self._serialize.url( + "self._config.knowledge_base_name", self._config.knowledge_base_name, "str" + ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_patch.py similarity index 100% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_patch.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_patch.py diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_utils/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_utils/__init__.py similarity index 86% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_utils/__init__.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_utils/__init__.py index f986b371549e..4473821c4ebe 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_utils/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_utils/__init__.py @@ -1,4 +1,4 @@ # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_utils/serialization.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_utils/serialization.py similarity index 99% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_utils/serialization.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_utils/serialization.py index 003e1c89fb35..9b8154c91dc2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/_utils/serialization.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/_utils/serialization.py @@ -1,7 +1,7 @@ # pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -785,7 +785,7 @@ def serialize_data(self, data, data_type, **kwargs): # If dependencies is empty, try with current data class # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) if issubclass(enum_type, Enum): return Serializer.serialize_enum(data, enum_obj=enum_type) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/__init__.py similarity index 80% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/__init__.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/__init__.py index 79e8ad096465..fcd722b7102c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -10,7 +10,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._knowledge_agent_retrieval_client import KnowledgeAgentRetrievalClient # type: ignore +from ._knowledge_base_retrieval_client import KnowledgeBaseRetrievalClient # type: ignore try: from ._patch import __all__ as _patch_all @@ -20,7 +20,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "KnowledgeAgentRetrievalClient", + "KnowledgeBaseRetrievalClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_configuration.py similarity index 70% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_configuration.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_configuration.py index 4891889e5941..a96675a3c2ad 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -11,33 +11,33 @@ VERSION = "unknown" -class KnowledgeAgentRetrievalClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long - """Configuration for KnowledgeAgentRetrievalClient. +class KnowledgeBaseRetrievalClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for KnowledgeBaseRetrievalClient. Note that all parameters used to create this instance are saved as instance attributes. :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :param agent_name: The name of the agent. Required. - :type agent_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, agent_name: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-08-01-preview") + def __init__(self, endpoint: str, knowledge_base_name: str, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if agent_name is None: - raise ValueError("Parameter 'agent_name' must not be None.") + if knowledge_base_name is None: + raise ValueError("Parameter 'knowledge_base_name' must not be None.") self.endpoint = endpoint - self.agent_name = agent_name + self.knowledge_base_name = knowledge_base_name self.api_version = api_version - kwargs.setdefault("sdk_moniker", "knowledgeagentretrievalclient/{}".format(VERSION)) + kwargs.setdefault("sdk_moniker", "knowledgebaseretrievalclient/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_knowledge_agent_retrieval_client.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_knowledge_base_retrieval_client.py similarity index 81% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_knowledge_agent_retrieval_client.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_knowledge_base_retrieval_client.py index 40d68fb6e571..59d9df34245f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_knowledge_agent_retrieval_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_knowledge_base_retrieval_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -14,30 +14,32 @@ from .. import models as _models from .._utils.serialization import Deserializer, Serializer -from ._configuration import KnowledgeAgentRetrievalClientConfiguration +from ._configuration import KnowledgeBaseRetrievalClientConfiguration from .operations import KnowledgeRetrievalOperations -class KnowledgeAgentRetrievalClient: - """Client that can be used to query an agent. +class KnowledgeBaseRetrievalClient: + """Client that can be used to query an knowledge base. :ivar knowledge_retrieval: KnowledgeRetrievalOperations operations :vartype knowledge_retrieval: - azure.search.documents.agent.aio.operations.KnowledgeRetrievalOperations + azure.search.documents.knowledgebases.aio.operations.KnowledgeRetrievalOperations :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str - :param agent_name: The name of the agent. Required. - :type agent_name: str - :keyword api_version: Api Version. Default value is "2025-08-01-preview". Note that overriding + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :keyword api_version: Api Version. Default value is "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, endpoint: str, agent_name: str, **kwargs: Any + self, endpoint: str, knowledge_base_name: str, **kwargs: Any ) -> None: - _endpoint = "{endpoint}/agents('{agentName}')" - self._config = KnowledgeAgentRetrievalClientConfiguration(endpoint=endpoint, agent_name=agent_name, **kwargs) + _endpoint = "{endpoint}/knowledgebases('{knowledgeBaseName}')" + self._config = KnowledgeBaseRetrievalClientConfiguration( + endpoint=endpoint, knowledge_base_name=knowledge_base_name, **kwargs + ) _policies = kwargs.pop("policies", None) if _policies is None: @@ -89,7 +91,9 @@ def _send_request( request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "agentName": self._serialize.url("self._config.agent_name", self._config.agent_name, "str"), + "knowledgeBaseName": self._serialize.url( + "self._config.knowledge_base_name", self._config.knowledge_base_name, "str" + ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_patch.py similarity index 100% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/_patch.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/_patch.py diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/__init__.py similarity index 94% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/__init__.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/__init__.py index ee2a3f9ac8bb..60ca88c85f3c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/_knowledge_retrieval_operations.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/_knowledge_retrieval_operations.py similarity index 73% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/_knowledge_retrieval_operations.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/_knowledge_retrieval_operations.py index adb4b09ccb69..73d0b76d7b23 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/_knowledge_retrieval_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/_knowledge_retrieval_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -24,7 +24,7 @@ from ... import models as _models from ..._utils.serialization import Deserializer, Serializer from ...operations._knowledge_retrieval_operations import build_retrieve_request -from .._configuration import KnowledgeAgentRetrievalClientConfiguration +from .._configuration import KnowledgeBaseRetrievalClientConfiguration T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] @@ -36,7 +36,7 @@ class KnowledgeRetrievalOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.search.documents.agent.aio.KnowledgeAgentRetrievalClient`'s + :class:`~azure.search.documents.knowledgebases.aio.KnowledgeBaseRetrievalClient`'s :attr:`knowledge_retrieval` attribute. """ @@ -45,7 +45,7 @@ class KnowledgeRetrievalOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: KnowledgeAgentRetrievalClientConfiguration = ( + self._config: KnowledgeBaseRetrievalClientConfiguration = ( input_args.pop(0) if input_args else kwargs.pop("config") ) self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") @@ -54,28 +54,29 @@ def __init__(self, *args, **kwargs) -> None: @overload async def retrieve( self, - retrieval_request: _models.KnowledgeAgentRetrievalRequest, + retrieval_request: _models.KnowledgeBaseRetrievalRequest, x_ms_query_source_authorization: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgentRetrievalResponse: - """KnowledgeAgent retrieves relevant data from backing stores. + ) -> _models.KnowledgeBaseRetrievalResponse: + """KnowledgeBase retrieves relevant data from backing stores. :param retrieval_request: The retrieval request to process. Required. - :type retrieval_request: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalRequest + :type retrieval_request: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest :param x_ms_query_source_authorization: Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.agent.models.RequestOptions + :type request_options: ~azure.search.documents.knowledgebases.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgentRetrievalResponse or the result of cls(response) - :rtype: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalResponse + :return: KnowledgeBaseRetrievalResponse or the result of cls(response) + :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse :raises ~azure.core.exceptions.HttpResponseError: """ @@ -88,8 +89,8 @@ async def retrieve( *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgentRetrievalResponse: - """KnowledgeAgent retrieves relevant data from backing stores. + ) -> _models.KnowledgeBaseRetrievalResponse: + """KnowledgeBase retrieves relevant data from backing stores. :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: IO[bytes] @@ -98,37 +99,37 @@ async def retrieve( None. :type x_ms_query_source_authorization: str :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.agent.models.RequestOptions + :type request_options: ~azure.search.documents.knowledgebases.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgentRetrievalResponse or the result of cls(response) - :rtype: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalResponse + :return: KnowledgeBaseRetrievalResponse or the result of cls(response) + :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def retrieve( self, - retrieval_request: Union[_models.KnowledgeAgentRetrievalRequest, IO[bytes]], + retrieval_request: Union[_models.KnowledgeBaseRetrievalRequest, IO[bytes]], x_ms_query_source_authorization: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgentRetrievalResponse: - """KnowledgeAgent retrieves relevant data from backing stores. + ) -> _models.KnowledgeBaseRetrievalResponse: + """KnowledgeBase retrieves relevant data from backing stores. :param retrieval_request: The retrieval request to process. Is either a - KnowledgeAgentRetrievalRequest type or a IO[bytes] type. Required. - :type retrieval_request: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalRequest or - IO[bytes] + KnowledgeBaseRetrievalRequest type or a IO[bytes] type. Required. + :type retrieval_request: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest or IO[bytes] :param x_ms_query_source_authorization: Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.agent.models.RequestOptions - :return: KnowledgeAgentRetrievalResponse or the result of cls(response) - :rtype: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalResponse + :type request_options: ~azure.search.documents.knowledgebases.models.RequestOptions + :return: KnowledgeBaseRetrievalResponse or the result of cls(response) + :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -144,7 +145,7 @@ async def retrieve( api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.KnowledgeAgentRetrievalResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBaseRetrievalResponse] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: @@ -155,7 +156,7 @@ async def retrieve( if isinstance(retrieval_request, (IOBase, bytes)): _content = retrieval_request else: - _json = self._serialize.body(retrieval_request, "KnowledgeAgentRetrievalRequest") + _json = self._serialize.body(retrieval_request, "KnowledgeBaseRetrievalRequest") _request = build_retrieve_request( x_ms_client_request_id=_x_ms_client_request_id, @@ -169,7 +170,9 @@ async def retrieve( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "agentName": self._serialize.url("self._config.agent_name", self._config.agent_name, "str"), + "knowledgeBaseName": self._serialize.url( + "self._config.knowledge_base_name", self._config.knowledge_base_name, "str" + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -180,12 +183,15 @@ async def retrieve( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 206]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgentRetrievalResponse", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBaseRetrievalResponse", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/_patch.py similarity index 100% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/_patch.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/aio/operations/_patch.py diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/__init__.py new file mode 100644 index 000000000000..6f5c31b6dfde --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/__init__.py @@ -0,0 +1,140 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models_py3 import ( # type: ignore + AzureBlobKnowledgeSourceParams, + ErrorAdditionalInfo, + ErrorDetail, + ErrorResponse, + IndexedOneLakeKnowledgeSourceParams, + IndexedSharePointKnowledgeSourceParams, + KnowledgeBaseActivityRecord, + KnowledgeBaseAgenticReasoningActivityRecord, + KnowledgeBaseAzureBlobActivityArguments, + KnowledgeBaseAzureBlobActivityRecord, + KnowledgeBaseAzureBlobReference, + KnowledgeBaseErrorAdditionalInfo, + KnowledgeBaseErrorDetail, + KnowledgeBaseIndexedOneLakeActivityArguments, + KnowledgeBaseIndexedOneLakeActivityRecord, + KnowledgeBaseIndexedOneLakeReference, + KnowledgeBaseIndexedSharePointActivityArguments, + KnowledgeBaseIndexedSharePointActivityRecord, + KnowledgeBaseIndexedSharePointReference, + KnowledgeBaseMessage, + KnowledgeBaseMessageContent, + KnowledgeBaseMessageImageContent, + KnowledgeBaseMessageImageContentImage, + KnowledgeBaseMessageTextContent, + KnowledgeBaseModelAnswerSynthesisActivityRecord, + KnowledgeBaseModelQueryPlanningActivityRecord, + KnowledgeBaseReference, + KnowledgeBaseRemoteSharePointActivityArguments, + KnowledgeBaseRemoteSharePointActivityRecord, + KnowledgeBaseRemoteSharePointReference, + KnowledgeBaseRetrievalActivityRecord, + KnowledgeBaseRetrievalRequest, + KnowledgeBaseRetrievalResponse, + KnowledgeBaseSearchIndexActivityArguments, + KnowledgeBaseSearchIndexActivityRecord, + KnowledgeBaseSearchIndexReference, + KnowledgeBaseWebActivityArguments, + KnowledgeBaseWebActivityRecord, + KnowledgeBaseWebReference, + KnowledgeRetrievalIntent, + KnowledgeRetrievalLowReasoningEffort, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeRetrievalReasoningEffort, + KnowledgeRetrievalSemanticIntent, + KnowledgeSourceParams, + RemoteSharePointKnowledgeSourceParams, + RequestOptions, + SearchIndexFieldReference, + SearchIndexKnowledgeSourceParams, + SharePointSensitivityLabelInfo, + WebKnowledgeSourceParams, +) + +from ._knowledge_base_retrieval_client_enums import ( # type: ignore + KnowledgeBaseMessageContentType, + KnowledgeRetrievalIntentType, + KnowledgeRetrievalOutputMode, + KnowledgeRetrievalReasoningEffortKind, + KnowledgeSourceKind, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AzureBlobKnowledgeSourceParams", + "ErrorAdditionalInfo", + "ErrorDetail", + "ErrorResponse", + "IndexedOneLakeKnowledgeSourceParams", + "IndexedSharePointKnowledgeSourceParams", + "KnowledgeBaseActivityRecord", + "KnowledgeBaseAgenticReasoningActivityRecord", + "KnowledgeBaseAzureBlobActivityArguments", + "KnowledgeBaseAzureBlobActivityRecord", + "KnowledgeBaseAzureBlobReference", + "KnowledgeBaseErrorAdditionalInfo", + "KnowledgeBaseErrorDetail", + "KnowledgeBaseIndexedOneLakeActivityArguments", + "KnowledgeBaseIndexedOneLakeActivityRecord", + "KnowledgeBaseIndexedOneLakeReference", + "KnowledgeBaseIndexedSharePointActivityArguments", + "KnowledgeBaseIndexedSharePointActivityRecord", + "KnowledgeBaseIndexedSharePointReference", + "KnowledgeBaseMessage", + "KnowledgeBaseMessageContent", + "KnowledgeBaseMessageImageContent", + "KnowledgeBaseMessageImageContentImage", + "KnowledgeBaseMessageTextContent", + "KnowledgeBaseModelAnswerSynthesisActivityRecord", + "KnowledgeBaseModelQueryPlanningActivityRecord", + "KnowledgeBaseReference", + "KnowledgeBaseRemoteSharePointActivityArguments", + "KnowledgeBaseRemoteSharePointActivityRecord", + "KnowledgeBaseRemoteSharePointReference", + "KnowledgeBaseRetrievalActivityRecord", + "KnowledgeBaseRetrievalRequest", + "KnowledgeBaseRetrievalResponse", + "KnowledgeBaseSearchIndexActivityArguments", + "KnowledgeBaseSearchIndexActivityRecord", + "KnowledgeBaseSearchIndexReference", + "KnowledgeBaseWebActivityArguments", + "KnowledgeBaseWebActivityRecord", + "KnowledgeBaseWebReference", + "KnowledgeRetrievalIntent", + "KnowledgeRetrievalLowReasoningEffort", + "KnowledgeRetrievalMediumReasoningEffort", + "KnowledgeRetrievalMinimalReasoningEffort", + "KnowledgeRetrievalReasoningEffort", + "KnowledgeRetrievalSemanticIntent", + "KnowledgeSourceParams", + "RemoteSharePointKnowledgeSourceParams", + "RequestOptions", + "SearchIndexFieldReference", + "SearchIndexKnowledgeSourceParams", + "SharePointSensitivityLabelInfo", + "WebKnowledgeSourceParams", + "KnowledgeBaseMessageContentType", + "KnowledgeRetrievalIntentType", + "KnowledgeRetrievalOutputMode", + "KnowledgeRetrievalReasoningEffortKind", + "KnowledgeSourceKind", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_knowledge_base_retrieval_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_knowledge_base_retrieval_client_enums.py new file mode 100644 index 000000000000..e16ce913c7a3 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_knowledge_base_retrieval_client_enums.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class KnowledgeBaseMessageContentType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of message content.""" + + TEXT = "text" + """Text message content kind.""" + IMAGE = "image" + """Image message content kind.""" + + +class KnowledgeRetrievalIntentType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of knowledge base configuration to use.""" + + SEMANTIC = "semantic" + """A natural language semantic query intent.""" + + +class KnowledgeRetrievalOutputMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The output configuration for this retrieval.""" + + EXTRACTIVE_DATA = "extractiveData" + """Return data from the knowledge sources directly without generative alteration.""" + ANSWER_SYNTHESIS = "answerSynthesis" + """Synthesize an answer for the response payload.""" + + +class KnowledgeRetrievalReasoningEffortKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The amount of effort to use during retrieval.""" + + MINIMAL = "minimal" + """Does not perform any source selections, query planning, or iterative search.""" + LOW = "low" + """Use low reasoning during retrieval.""" + MEDIUM = "medium" + """Use a moderate amount of reasoning during retrieval.""" + + +class KnowledgeSourceKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of the knowledge source.""" + + SEARCH_INDEX = "searchIndex" + """A knowledge source that retrieves data from a Search Index.""" + AZURE_BLOB = "azureBlob" + """A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index.""" + WEB = "web" + """A knowledge source that retrieves data from the web.""" + REMOTE_SHARE_POINT = "remoteSharePoint" + """A knowledge source that retrieves data from a remote SharePoint endpoint.""" + INDEXED_SHARE_POINT = "indexedSharePoint" + """A knowledge source that retrieves and ingests data from SharePoint to a Search Index.""" + INDEXED_ONE_LAKE = "indexedOneLake" + """A knowledge source that retrieves and ingests data from OneLake to a Search Index.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_models_py3.py new file mode 100644 index 000000000000..83ba940d15f6 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_models_py3.py @@ -0,0 +1,2875 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from collections.abc import MutableMapping +import datetime +from typing import Any, Optional, TYPE_CHECKING, Union + +from .._utils import serialization as _serialization + +if TYPE_CHECKING: + from .. import models as _models +JSON = MutableMapping[str, Any] + + +class KnowledgeSourceParams(_serialization.Model): + """KnowledgeSourceParams. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AzureBlobKnowledgeSourceParams, IndexedOneLakeKnowledgeSourceParams, + IndexedSharePointKnowledgeSourceParams, RemoteSharePointKnowledgeSourceParams, + SearchIndexKnowledgeSourceParams, WebKnowledgeSourceParams + + All required parameters must be populated in order to send to server. + + :ivar knowledge_source_name: The name of the index the params apply to. Required. + :vartype knowledge_source_name: str + :ivar include_references: Indicates whether references should be included for data retrieved + from this source. + :vartype include_references: bool + :ivar include_reference_source_data: Indicates whether references should include the structured + data obtained during retrieval in their payload. + :vartype include_reference_source_data: bool + :ivar always_query_source: Indicates that this knowledge source should bypass source selection + and always be queried at retrieval time. + :vartype always_query_source: bool + :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :vartype reranker_threshold: float + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeSourceKind + """ + + _validation = { + "knowledge_source_name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "include_references": {"key": "includeReferences", "type": "bool"}, + "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, + "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, + "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "kind": {"key": "kind", "type": "str"}, + } + + _subtype_map = { + "kind": { + "azureBlob": "AzureBlobKnowledgeSourceParams", + "indexedOneLake": "IndexedOneLakeKnowledgeSourceParams", + "indexedSharePoint": "IndexedSharePointKnowledgeSourceParams", + "remoteSharePoint": "RemoteSharePointKnowledgeSourceParams", + "searchIndex": "SearchIndexKnowledgeSourceParams", + "web": "WebKnowledgeSourceParams", + } + } + + def __init__( + self, + *, + knowledge_source_name: str, + include_references: Optional[bool] = None, + include_reference_source_data: Optional[bool] = None, + always_query_source: Optional[bool] = None, + reranker_threshold: Optional[float] = None, + **kwargs: Any + ) -> None: + """ + :keyword knowledge_source_name: The name of the index the params apply to. Required. + :paramtype knowledge_source_name: str + :keyword include_references: Indicates whether references should be included for data retrieved + from this source. + :paramtype include_references: bool + :keyword include_reference_source_data: Indicates whether references should include the + structured data obtained during retrieval in their payload. + :paramtype include_reference_source_data: bool + :keyword always_query_source: Indicates that this knowledge source should bypass source + selection and always be queried at retrieval time. + :paramtype always_query_source: bool + :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :paramtype reranker_threshold: float + """ + super().__init__(**kwargs) + self.knowledge_source_name = knowledge_source_name + self.include_references = include_references + self.include_reference_source_data = include_reference_source_data + self.always_query_source = always_query_source + self.reranker_threshold = reranker_threshold + self.kind: Optional[str] = None + + +class AzureBlobKnowledgeSourceParams(KnowledgeSourceParams): + """Specifies runtime parameters for a azure blob knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar knowledge_source_name: The name of the index the params apply to. Required. + :vartype knowledge_source_name: str + :ivar include_references: Indicates whether references should be included for data retrieved + from this source. + :vartype include_references: bool + :ivar include_reference_source_data: Indicates whether references should include the structured + data obtained during retrieval in their payload. + :vartype include_reference_source_data: bool + :ivar always_query_source: Indicates that this knowledge source should bypass source selection + and always be queried at retrieval time. + :vartype always_query_source: bool + :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :vartype reranker_threshold: float + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeSourceKind + """ + + _validation = { + "knowledge_source_name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "include_references": {"key": "includeReferences", "type": "bool"}, + "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, + "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, + "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "kind": {"key": "kind", "type": "str"}, + } + + def __init__( + self, + *, + knowledge_source_name: str, + include_references: Optional[bool] = None, + include_reference_source_data: Optional[bool] = None, + always_query_source: Optional[bool] = None, + reranker_threshold: Optional[float] = None, + **kwargs: Any + ) -> None: + """ + :keyword knowledge_source_name: The name of the index the params apply to. Required. + :paramtype knowledge_source_name: str + :keyword include_references: Indicates whether references should be included for data retrieved + from this source. + :paramtype include_references: bool + :keyword include_reference_source_data: Indicates whether references should include the + structured data obtained during retrieval in their payload. + :paramtype include_reference_source_data: bool + :keyword always_query_source: Indicates that this knowledge source should bypass source + selection and always be queried at retrieval time. + :paramtype always_query_source: bool + :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :paramtype reranker_threshold: float + """ + super().__init__( + knowledge_source_name=knowledge_source_name, + include_references=include_references, + include_reference_source_data=include_reference_source_data, + always_query_source=always_query_source, + reranker_threshold=reranker_threshold, + **kwargs + ) + self.kind: str = "azureBlob" + + +class ErrorAdditionalInfo(_serialization.Model): + """The resource management error additional info. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: JSON + """ + + _validation = { + "type": {"readonly": True}, + "info": {"readonly": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "info": {"key": "info", "type": "object"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.type: Optional[str] = None + self.info: Optional[JSON] = None + + +class ErrorDetail(_serialization.Model): + """The error detail. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.search.documents.knowledgebases.models.ErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: + list[~azure.search.documents.knowledgebases.models.ErrorAdditionalInfo] + """ + + _validation = { + "code": {"readonly": True}, + "message": {"readonly": True}, + "target": {"readonly": True}, + "details": {"readonly": True}, + "additional_info": {"readonly": True}, + } + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "message": {"key": "message", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "details": {"key": "details", "type": "[ErrorDetail]"}, + "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.code: Optional[str] = None + self.message: Optional[str] = None + self.target: Optional[str] = None + self.details: Optional[list["_models.ErrorDetail"]] = None + self.additional_info: Optional[list["_models.ErrorAdditionalInfo"]] = None + + +class ErrorResponse(_serialization.Model): + """Common error response for all Azure Resource Manager APIs to return error details for failed + operations. (This also follows the OData error response format.). + + :ivar error: The error object. + :vartype error: ~azure.search.documents.knowledgebases.models.ErrorDetail + """ + + _attribute_map = { + "error": {"key": "error", "type": "ErrorDetail"}, + } + + def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None: + """ + :keyword error: The error object. + :paramtype error: ~azure.search.documents.knowledgebases.models.ErrorDetail + """ + super().__init__(**kwargs) + self.error = error + + +class IndexedOneLakeKnowledgeSourceParams(KnowledgeSourceParams): + """Specifies runtime parameters for a indexed OneLake knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar knowledge_source_name: The name of the index the params apply to. Required. + :vartype knowledge_source_name: str + :ivar include_references: Indicates whether references should be included for data retrieved + from this source. + :vartype include_references: bool + :ivar include_reference_source_data: Indicates whether references should include the structured + data obtained during retrieval in their payload. + :vartype include_reference_source_data: bool + :ivar always_query_source: Indicates that this knowledge source should bypass source selection + and always be queried at retrieval time. + :vartype always_query_source: bool + :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :vartype reranker_threshold: float + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeSourceKind + """ + + _validation = { + "knowledge_source_name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "include_references": {"key": "includeReferences", "type": "bool"}, + "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, + "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, + "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "kind": {"key": "kind", "type": "str"}, + } + + def __init__( + self, + *, + knowledge_source_name: str, + include_references: Optional[bool] = None, + include_reference_source_data: Optional[bool] = None, + always_query_source: Optional[bool] = None, + reranker_threshold: Optional[float] = None, + **kwargs: Any + ) -> None: + """ + :keyword knowledge_source_name: The name of the index the params apply to. Required. + :paramtype knowledge_source_name: str + :keyword include_references: Indicates whether references should be included for data retrieved + from this source. + :paramtype include_references: bool + :keyword include_reference_source_data: Indicates whether references should include the + structured data obtained during retrieval in their payload. + :paramtype include_reference_source_data: bool + :keyword always_query_source: Indicates that this knowledge source should bypass source + selection and always be queried at retrieval time. + :paramtype always_query_source: bool + :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :paramtype reranker_threshold: float + """ + super().__init__( + knowledge_source_name=knowledge_source_name, + include_references=include_references, + include_reference_source_data=include_reference_source_data, + always_query_source=always_query_source, + reranker_threshold=reranker_threshold, + **kwargs + ) + self.kind: str = "indexedOneLake" + + +class IndexedSharePointKnowledgeSourceParams(KnowledgeSourceParams): + """Specifies runtime parameters for a indexed SharePoint knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar knowledge_source_name: The name of the index the params apply to. Required. + :vartype knowledge_source_name: str + :ivar include_references: Indicates whether references should be included for data retrieved + from this source. + :vartype include_references: bool + :ivar include_reference_source_data: Indicates whether references should include the structured + data obtained during retrieval in their payload. + :vartype include_reference_source_data: bool + :ivar always_query_source: Indicates that this knowledge source should bypass source selection + and always be queried at retrieval time. + :vartype always_query_source: bool + :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :vartype reranker_threshold: float + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeSourceKind + """ + + _validation = { + "knowledge_source_name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "include_references": {"key": "includeReferences", "type": "bool"}, + "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, + "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, + "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "kind": {"key": "kind", "type": "str"}, + } + + def __init__( + self, + *, + knowledge_source_name: str, + include_references: Optional[bool] = None, + include_reference_source_data: Optional[bool] = None, + always_query_source: Optional[bool] = None, + reranker_threshold: Optional[float] = None, + **kwargs: Any + ) -> None: + """ + :keyword knowledge_source_name: The name of the index the params apply to. Required. + :paramtype knowledge_source_name: str + :keyword include_references: Indicates whether references should be included for data retrieved + from this source. + :paramtype include_references: bool + :keyword include_reference_source_data: Indicates whether references should include the + structured data obtained during retrieval in their payload. + :paramtype include_reference_source_data: bool + :keyword always_query_source: Indicates that this knowledge source should bypass source + selection and always be queried at retrieval time. + :paramtype always_query_source: bool + :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :paramtype reranker_threshold: float + """ + super().__init__( + knowledge_source_name=knowledge_source_name, + include_references=include_references, + include_reference_source_data=include_reference_source_data, + always_query_source=always_query_source, + reranker_threshold=reranker_threshold, + **kwargs + ) + self.kind: str = "indexedSharePoint" + + +class KnowledgeBaseActivityRecord(_serialization.Model): + """Base type for activity records. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeBaseRetrievalActivityRecord, KnowledgeBaseAgenticReasoningActivityRecord, + KnowledgeBaseModelAnswerSynthesisActivityRecord, KnowledgeBaseModelQueryPlanningActivityRecord + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + } + + _subtype_map = { + "type": { + "KnowledgeBaseRetrievalActivityRecord": "KnowledgeBaseRetrievalActivityRecord", + "agenticReasoning": "KnowledgeBaseAgenticReasoningActivityRecord", + "modelAnswerSynthesis": "KnowledgeBaseModelAnswerSynthesisActivityRecord", + "modelQueryPlanning": "KnowledgeBaseModelQueryPlanningActivityRecord", + } + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + """ + super().__init__(**kwargs) + self.id = id + self.type: Optional[str] = None + self.elapsed_ms = elapsed_ms + self.error = error + + +class KnowledgeBaseAgenticReasoningActivityRecord(KnowledgeBaseActivityRecord): # pylint: disable=name-too-long + """Represents an agentic reasoning activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar reasoning_tokens: The number of input tokens for agentic reasoning. + :vartype reasoning_tokens: int + :ivar retrieval_reasoning_effort: + :vartype retrieval_reasoning_effort: + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffort + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "reasoning_tokens": {"key": "reasoningTokens", "type": "int"}, + "retrieval_reasoning_effort": {"key": "retrievalReasoningEffort", "type": "KnowledgeRetrievalReasoningEffort"}, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + reasoning_tokens: Optional[int] = None, + retrieval_reasoning_effort: Optional["_models.KnowledgeRetrievalReasoningEffort"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword reasoning_tokens: The number of input tokens for agentic reasoning. + :paramtype reasoning_tokens: int + :keyword retrieval_reasoning_effort: + :paramtype retrieval_reasoning_effort: + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffort + """ + super().__init__(id=id, elapsed_ms=elapsed_ms, error=error, **kwargs) + self.type: str = "agenticReasoning" + self.reasoning_tokens = reasoning_tokens + self.retrieval_reasoning_effort = retrieval_reasoning_effort + + +class KnowledgeBaseAzureBlobActivityArguments(_serialization.Model): + """Represents the arguments the azure blob retrieval activity was run with. + + :ivar search: The search string used to query blob contents. + :vartype search: str + """ + + _attribute_map = { + "search": {"key": "search", "type": "str"}, + } + + def __init__(self, *, search: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword search: The search string used to query blob contents. + :paramtype search: str + """ + super().__init__(**kwargs) + self.search = search + + +class KnowledgeBaseRetrievalActivityRecord(KnowledgeBaseActivityRecord): + """Represents a retrieval activity record. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeBaseAzureBlobActivityRecord, KnowledgeBaseIndexedOneLakeActivityRecord, + KnowledgeBaseIndexedSharePointActivityRecord, KnowledgeBaseRemoteSharePointActivityRecord, + KnowledgeBaseSearchIndexActivityRecord, KnowledgeBaseWebActivityRecord + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar knowledge_source_name: The knowledge source for the retrieval activity. + :vartype knowledge_source_name: str + :ivar query_time: The query time for this retrieval activity. + :vartype query_time: ~datetime.datetime + :ivar count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :vartype count: int + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "query_time": {"key": "queryTime", "type": "iso-8601"}, + "count": {"key": "count", "type": "int"}, + } + + _subtype_map = { + "type": { + "azureBlob": "KnowledgeBaseAzureBlobActivityRecord", + "indexedOneLake": "KnowledgeBaseIndexedOneLakeActivityRecord", + "indexedSharePoint": "KnowledgeBaseIndexedSharePointActivityRecord", + "remoteSharePoint": "KnowledgeBaseRemoteSharePointActivityRecord", + "searchIndex": "KnowledgeBaseSearchIndexActivityRecord", + "web": "KnowledgeBaseWebActivityRecord", + } + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + knowledge_source_name: Optional[str] = None, + query_time: Optional[datetime.datetime] = None, + count: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword knowledge_source_name: The knowledge source for the retrieval activity. + :paramtype knowledge_source_name: str + :keyword query_time: The query time for this retrieval activity. + :paramtype query_time: ~datetime.datetime + :keyword count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :paramtype count: int + """ + super().__init__(id=id, elapsed_ms=elapsed_ms, error=error, **kwargs) + self.type: str = "KnowledgeBaseRetrievalActivityRecord" + self.knowledge_source_name = knowledge_source_name + self.query_time = query_time + self.count = count + + +class KnowledgeBaseAzureBlobActivityRecord(KnowledgeBaseRetrievalActivityRecord): + """Represents a azure blob retrieval activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar knowledge_source_name: The knowledge source for the retrieval activity. + :vartype knowledge_source_name: str + :ivar query_time: The query time for this retrieval activity. + :vartype query_time: ~datetime.datetime + :ivar count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :vartype count: int + :ivar azure_blob_arguments: The azure blob arguments for the retrieval activity. + :vartype azure_blob_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseAzureBlobActivityArguments + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "query_time": {"key": "queryTime", "type": "iso-8601"}, + "count": {"key": "count", "type": "int"}, + "azure_blob_arguments": {"key": "azureBlobArguments", "type": "KnowledgeBaseAzureBlobActivityArguments"}, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + knowledge_source_name: Optional[str] = None, + query_time: Optional[datetime.datetime] = None, + count: Optional[int] = None, + azure_blob_arguments: Optional["_models.KnowledgeBaseAzureBlobActivityArguments"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword knowledge_source_name: The knowledge source for the retrieval activity. + :paramtype knowledge_source_name: str + :keyword query_time: The query time for this retrieval activity. + :paramtype query_time: ~datetime.datetime + :keyword count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :paramtype count: int + :keyword azure_blob_arguments: The azure blob arguments for the retrieval activity. + :paramtype azure_blob_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseAzureBlobActivityArguments + """ + super().__init__( + id=id, + elapsed_ms=elapsed_ms, + error=error, + knowledge_source_name=knowledge_source_name, + query_time=query_time, + count=count, + **kwargs + ) + self.type: str = "azureBlob" + self.azure_blob_arguments = azure_blob_arguments + + +class KnowledgeBaseReference(_serialization.Model): + """Base type for references. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeBaseAzureBlobReference, KnowledgeBaseIndexedOneLakeReference, + KnowledgeBaseIndexedSharePointReference, KnowledgeBaseRemoteSharePointReference, + KnowledgeBaseSearchIndexReference, KnowledgeBaseWebReference + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the reference. Required. + :vartype type: str + :ivar id: The ID of the reference. Required. + :vartype id: str + :ivar activity_source: The source activity ID for the reference. Required. + :vartype activity_source: int + :ivar source_data: Dictionary of :code:``. + :vartype source_data: dict[str, any] + :ivar reranker_score: The reranker score for the document reference. + :vartype reranker_score: float + """ + + _validation = { + "type": {"required": True}, + "id": {"required": True}, + "activity_source": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "activity_source": {"key": "activitySource", "type": "int"}, + "source_data": {"key": "sourceData", "type": "{object}"}, + "reranker_score": {"key": "rerankerScore", "type": "float"}, + } + + _subtype_map = { + "type": { + "azureBlob": "KnowledgeBaseAzureBlobReference", + "indexedOneLake": "KnowledgeBaseIndexedOneLakeReference", + "indexedSharePoint": "KnowledgeBaseIndexedSharePointReference", + "remoteSharePoint": "KnowledgeBaseRemoteSharePointReference", + "searchIndex": "KnowledgeBaseSearchIndexReference", + "web": "KnowledgeBaseWebReference", + } + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + activity_source: int, + source_data: Optional[dict[str, Any]] = None, + reranker_score: Optional[float] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the reference. Required. + :paramtype id: str + :keyword activity_source: The source activity ID for the reference. Required. + :paramtype activity_source: int + :keyword source_data: Dictionary of :code:``. + :paramtype source_data: dict[str, any] + :keyword reranker_score: The reranker score for the document reference. + :paramtype reranker_score: float + """ + super().__init__(**kwargs) + self.type: Optional[str] = None + self.id = id + self.activity_source = activity_source + self.source_data = source_data + self.reranker_score = reranker_score + + +class KnowledgeBaseAzureBlobReference(KnowledgeBaseReference): + """Represents an Azure Blob Storage document reference. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the reference. Required. + :vartype type: str + :ivar id: The ID of the reference. Required. + :vartype id: str + :ivar activity_source: The source activity ID for the reference. Required. + :vartype activity_source: int + :ivar source_data: Dictionary of :code:``. + :vartype source_data: dict[str, any] + :ivar reranker_score: The reranker score for the document reference. + :vartype reranker_score: float + :ivar blob_url: The blob URL for the reference. + :vartype blob_url: str + """ + + _validation = { + "type": {"required": True}, + "id": {"required": True}, + "activity_source": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "activity_source": {"key": "activitySource", "type": "int"}, + "source_data": {"key": "sourceData", "type": "{object}"}, + "reranker_score": {"key": "rerankerScore", "type": "float"}, + "blob_url": {"key": "blobUrl", "type": "str"}, + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + activity_source: int, + source_data: Optional[dict[str, Any]] = None, + reranker_score: Optional[float] = None, + blob_url: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the reference. Required. + :paramtype id: str + :keyword activity_source: The source activity ID for the reference. Required. + :paramtype activity_source: int + :keyword source_data: Dictionary of :code:``. + :paramtype source_data: dict[str, any] + :keyword reranker_score: The reranker score for the document reference. + :paramtype reranker_score: float + :keyword blob_url: The blob URL for the reference. + :paramtype blob_url: str + """ + super().__init__( + id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs + ) + self.type: str = "azureBlob" + self.blob_url = blob_url + + +class KnowledgeBaseErrorAdditionalInfo(_serialization.Model): + """The resource management error additional info. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: JSON + """ + + _validation = { + "type": {"readonly": True}, + "info": {"readonly": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "info": {"key": "info", "type": "object"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.type: Optional[str] = None + self.info: Optional[JSON] = None + + +class KnowledgeBaseErrorDetail(_serialization.Model): + """The error details. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: + list[~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorAdditionalInfo] + """ + + _validation = { + "code": {"readonly": True}, + "message": {"readonly": True}, + "target": {"readonly": True}, + "details": {"readonly": True}, + "additional_info": {"readonly": True}, + } + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "message": {"key": "message", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "details": {"key": "details", "type": "[KnowledgeBaseErrorDetail]"}, + "additional_info": {"key": "additionalInfo", "type": "[KnowledgeBaseErrorAdditionalInfo]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.code: Optional[str] = None + self.message: Optional[str] = None + self.target: Optional[str] = None + self.details: Optional[list["_models.KnowledgeBaseErrorDetail"]] = None + self.additional_info: Optional[list["_models.KnowledgeBaseErrorAdditionalInfo"]] = None + + +class KnowledgeBaseIndexedOneLakeActivityArguments(_serialization.Model): # pylint: disable=name-too-long + """Represents the arguments the indexed OneLake retrieval activity was run with. + + :ivar search: The search string used to query indexed OneLake contents. + :vartype search: str + """ + + _attribute_map = { + "search": {"key": "search", "type": "str"}, + } + + def __init__(self, *, search: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword search: The search string used to query indexed OneLake contents. + :paramtype search: str + """ + super().__init__(**kwargs) + self.search = search + + +class KnowledgeBaseIndexedOneLakeActivityRecord(KnowledgeBaseRetrievalActivityRecord): # pylint: disable=name-too-long + """Represents a indexed OneLake retrieval activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar knowledge_source_name: The knowledge source for the retrieval activity. + :vartype knowledge_source_name: str + :ivar query_time: The query time for this retrieval activity. + :vartype query_time: ~datetime.datetime + :ivar count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :vartype count: int + :ivar indexed_one_lake_arguments: The indexed OneLake arguments for the retrieval activity. + :vartype indexed_one_lake_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseIndexedOneLakeActivityArguments + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "query_time": {"key": "queryTime", "type": "iso-8601"}, + "count": {"key": "count", "type": "int"}, + "indexed_one_lake_arguments": { + "key": "indexedOneLakeArguments", + "type": "KnowledgeBaseIndexedOneLakeActivityArguments", + }, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + knowledge_source_name: Optional[str] = None, + query_time: Optional[datetime.datetime] = None, + count: Optional[int] = None, + indexed_one_lake_arguments: Optional["_models.KnowledgeBaseIndexedOneLakeActivityArguments"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword knowledge_source_name: The knowledge source for the retrieval activity. + :paramtype knowledge_source_name: str + :keyword query_time: The query time for this retrieval activity. + :paramtype query_time: ~datetime.datetime + :keyword count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :paramtype count: int + :keyword indexed_one_lake_arguments: The indexed OneLake arguments for the retrieval activity. + :paramtype indexed_one_lake_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseIndexedOneLakeActivityArguments + """ + super().__init__( + id=id, + elapsed_ms=elapsed_ms, + error=error, + knowledge_source_name=knowledge_source_name, + query_time=query_time, + count=count, + **kwargs + ) + self.type: str = "indexedOneLake" + self.indexed_one_lake_arguments = indexed_one_lake_arguments + + +class KnowledgeBaseIndexedOneLakeReference(KnowledgeBaseReference): + """Represents an Azure Blob Storage document reference. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the reference. Required. + :vartype type: str + :ivar id: The ID of the reference. Required. + :vartype id: str + :ivar activity_source: The source activity ID for the reference. Required. + :vartype activity_source: int + :ivar source_data: Dictionary of :code:``. + :vartype source_data: dict[str, any] + :ivar reranker_score: The reranker score for the document reference. + :vartype reranker_score: float + :ivar doc_url: The document URL for the reference. + :vartype doc_url: str + """ + + _validation = { + "type": {"required": True}, + "id": {"required": True}, + "activity_source": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "activity_source": {"key": "activitySource", "type": "int"}, + "source_data": {"key": "sourceData", "type": "{object}"}, + "reranker_score": {"key": "rerankerScore", "type": "float"}, + "doc_url": {"key": "docUrl", "type": "str"}, + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + activity_source: int, + source_data: Optional[dict[str, Any]] = None, + reranker_score: Optional[float] = None, + doc_url: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the reference. Required. + :paramtype id: str + :keyword activity_source: The source activity ID for the reference. Required. + :paramtype activity_source: int + :keyword source_data: Dictionary of :code:``. + :paramtype source_data: dict[str, any] + :keyword reranker_score: The reranker score for the document reference. + :paramtype reranker_score: float + :keyword doc_url: The document URL for the reference. + :paramtype doc_url: str + """ + super().__init__( + id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs + ) + self.type: str = "indexedOneLake" + self.doc_url = doc_url + + +class KnowledgeBaseIndexedSharePointActivityArguments(_serialization.Model): # pylint: disable=name-too-long + """Represents the arguments the indexed SharePoint retrieval activity was run with. + + :ivar search: The search string used to query indexed SharePoint contents. + :vartype search: str + """ + + _attribute_map = { + "search": {"key": "search", "type": "str"}, + } + + def __init__(self, *, search: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword search: The search string used to query indexed SharePoint contents. + :paramtype search: str + """ + super().__init__(**kwargs) + self.search = search + + +class KnowledgeBaseIndexedSharePointActivityRecord( + KnowledgeBaseRetrievalActivityRecord +): # pylint: disable=name-too-long + """Represents a indexed SharePoint retrieval activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar knowledge_source_name: The knowledge source for the retrieval activity. + :vartype knowledge_source_name: str + :ivar query_time: The query time for this retrieval activity. + :vartype query_time: ~datetime.datetime + :ivar count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :vartype count: int + :ivar indexed_share_point_arguments: The indexed SharePoint arguments for the retrieval + activity. + :vartype indexed_share_point_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseIndexedSharePointActivityArguments + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "query_time": {"key": "queryTime", "type": "iso-8601"}, + "count": {"key": "count", "type": "int"}, + "indexed_share_point_arguments": { + "key": "indexedSharePointArguments", + "type": "KnowledgeBaseIndexedSharePointActivityArguments", + }, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + knowledge_source_name: Optional[str] = None, + query_time: Optional[datetime.datetime] = None, + count: Optional[int] = None, + indexed_share_point_arguments: Optional["_models.KnowledgeBaseIndexedSharePointActivityArguments"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword knowledge_source_name: The knowledge source for the retrieval activity. + :paramtype knowledge_source_name: str + :keyword query_time: The query time for this retrieval activity. + :paramtype query_time: ~datetime.datetime + :keyword count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :paramtype count: int + :keyword indexed_share_point_arguments: The indexed SharePoint arguments for the retrieval + activity. + :paramtype indexed_share_point_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseIndexedSharePointActivityArguments + """ + super().__init__( + id=id, + elapsed_ms=elapsed_ms, + error=error, + knowledge_source_name=knowledge_source_name, + query_time=query_time, + count=count, + **kwargs + ) + self.type: str = "indexedSharePoint" + self.indexed_share_point_arguments = indexed_share_point_arguments + + +class KnowledgeBaseIndexedSharePointReference(KnowledgeBaseReference): + """Represents an Azure Blob Storage document reference. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the reference. Required. + :vartype type: str + :ivar id: The ID of the reference. Required. + :vartype id: str + :ivar activity_source: The source activity ID for the reference. Required. + :vartype activity_source: int + :ivar source_data: Dictionary of :code:``. + :vartype source_data: dict[str, any] + :ivar reranker_score: The reranker score for the document reference. + :vartype reranker_score: float + :ivar doc_url: The document URL for the reference. + :vartype doc_url: str + """ + + _validation = { + "type": {"required": True}, + "id": {"required": True}, + "activity_source": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "activity_source": {"key": "activitySource", "type": "int"}, + "source_data": {"key": "sourceData", "type": "{object}"}, + "reranker_score": {"key": "rerankerScore", "type": "float"}, + "doc_url": {"key": "docUrl", "type": "str"}, + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + activity_source: int, + source_data: Optional[dict[str, Any]] = None, + reranker_score: Optional[float] = None, + doc_url: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the reference. Required. + :paramtype id: str + :keyword activity_source: The source activity ID for the reference. Required. + :paramtype activity_source: int + :keyword source_data: Dictionary of :code:``. + :paramtype source_data: dict[str, any] + :keyword reranker_score: The reranker score for the document reference. + :paramtype reranker_score: float + :keyword doc_url: The document URL for the reference. + :paramtype doc_url: str + """ + super().__init__( + id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs + ) + self.type: str = "indexedSharePoint" + self.doc_url = doc_url + + +class KnowledgeBaseMessage(_serialization.Model): + """The natural language message style object. + + All required parameters must be populated in order to send to server. + + :ivar role: The role of the tool response. + :vartype role: str + :ivar content: Required. + :vartype content: + list[~azure.search.documents.knowledgebases.models.KnowledgeBaseMessageContent] + """ + + _validation = { + "content": {"required": True}, + } + + _attribute_map = { + "role": {"key": "role", "type": "str"}, + "content": {"key": "content", "type": "[KnowledgeBaseMessageContent]"}, + } + + def __init__( + self, *, content: list["_models.KnowledgeBaseMessageContent"], role: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword role: The role of the tool response. + :paramtype role: str + :keyword content: Required. + :paramtype content: + list[~azure.search.documents.knowledgebases.models.KnowledgeBaseMessageContent] + """ + super().__init__(**kwargs) + self.role = role + self.content = content + + +class KnowledgeBaseMessageContent(_serialization.Model): + """Specifies the type of the message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeBaseMessageImageContent, KnowledgeBaseMessageTextContent + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the message. Required. Known values are: "text" and "image". + :vartype type: str or + ~azure.search.documents.knowledgebases.models.KnowledgeBaseMessageContentType + """ + + _validation = { + "type": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + } + + _subtype_map = {"type": {"image": "KnowledgeBaseMessageImageContent", "text": "KnowledgeBaseMessageTextContent"}} + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.type: Optional[str] = None + + +class KnowledgeBaseMessageImageContent(KnowledgeBaseMessageContent): + """Text message type. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the message. Required. Known values are: "text" and "image". + :vartype type: str or + ~azure.search.documents.knowledgebases.models.KnowledgeBaseMessageContentType + :ivar image: Required. + :vartype image: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseMessageImageContentImage + """ + + _validation = { + "type": {"required": True}, + "image": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "image": {"key": "image", "type": "KnowledgeBaseMessageImageContentImage"}, + } + + def __init__(self, *, image: "_models.KnowledgeBaseMessageImageContentImage", **kwargs: Any) -> None: + """ + :keyword image: Required. + :paramtype image: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseMessageImageContentImage + """ + super().__init__(**kwargs) + self.type: str = "image" + self.image = image + + +class KnowledgeBaseMessageImageContentImage(_serialization.Model): + """KnowledgeBaseMessageImageContentImage. + + All required parameters must be populated in order to send to server. + + :ivar url: The url of the image. Required. + :vartype url: str + """ + + _validation = { + "url": {"required": True}, + } + + _attribute_map = { + "url": {"key": "url", "type": "str"}, + } + + def __init__(self, *, url: str, **kwargs: Any) -> None: + """ + :keyword url: The url of the image. Required. + :paramtype url: str + """ + super().__init__(**kwargs) + self.url = url + + +class KnowledgeBaseMessageTextContent(KnowledgeBaseMessageContent): + """Text message type. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the message. Required. Known values are: "text" and "image". + :vartype type: str or + ~azure.search.documents.knowledgebases.models.KnowledgeBaseMessageContentType + :ivar text: Required. + :vartype text: str + """ + + _validation = { + "type": {"required": True}, + "text": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "text": {"key": "text", "type": "str"}, + } + + def __init__(self, *, text: str, **kwargs: Any) -> None: + """ + :keyword text: Required. + :paramtype text: str + """ + super().__init__(**kwargs) + self.type: str = "text" + self.text = text + + +class KnowledgeBaseModelAnswerSynthesisActivityRecord(KnowledgeBaseActivityRecord): # pylint: disable=name-too-long + """Represents an LLM answer synthesis activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar input_tokens: The number of input tokens for the LLM answer synthesis activity. + :vartype input_tokens: int + :ivar output_tokens: The number of output tokens for the LLM answer synthesis activity. + :vartype output_tokens: int + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "input_tokens": {"key": "inputTokens", "type": "int"}, + "output_tokens": {"key": "outputTokens", "type": "int"}, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + input_tokens: Optional[int] = None, + output_tokens: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword input_tokens: The number of input tokens for the LLM answer synthesis activity. + :paramtype input_tokens: int + :keyword output_tokens: The number of output tokens for the LLM answer synthesis activity. + :paramtype output_tokens: int + """ + super().__init__(id=id, elapsed_ms=elapsed_ms, error=error, **kwargs) + self.type: str = "modelAnswerSynthesis" + self.input_tokens = input_tokens + self.output_tokens = output_tokens + + +class KnowledgeBaseModelQueryPlanningActivityRecord(KnowledgeBaseActivityRecord): # pylint: disable=name-too-long + """Represents an LLM query planning activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar input_tokens: The number of input tokens for the LLM query planning activity. + :vartype input_tokens: int + :ivar output_tokens: The number of output tokens for the LLM query planning activity. + :vartype output_tokens: int + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "input_tokens": {"key": "inputTokens", "type": "int"}, + "output_tokens": {"key": "outputTokens", "type": "int"}, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + input_tokens: Optional[int] = None, + output_tokens: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword input_tokens: The number of input tokens for the LLM query planning activity. + :paramtype input_tokens: int + :keyword output_tokens: The number of output tokens for the LLM query planning activity. + :paramtype output_tokens: int + """ + super().__init__(id=id, elapsed_ms=elapsed_ms, error=error, **kwargs) + self.type: str = "modelQueryPlanning" + self.input_tokens = input_tokens + self.output_tokens = output_tokens + + +class KnowledgeBaseRemoteSharePointActivityArguments(_serialization.Model): # pylint: disable=name-too-long + """Represents the arguments the remote SharePoint retrieval activity was run with. + + :ivar search: The search string used to query the remote SharePoint knowledge source. + :vartype search: str + :ivar filter_expression_add_on: The filter expression add-on for the retrieval activity. + :vartype filter_expression_add_on: str + """ + + _attribute_map = { + "search": {"key": "search", "type": "str"}, + "filter_expression_add_on": {"key": "filterExpressionAddOn", "type": "str"}, + } + + def __init__( + self, *, search: Optional[str] = None, filter_expression_add_on: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword search: The search string used to query the remote SharePoint knowledge source. + :paramtype search: str + :keyword filter_expression_add_on: The filter expression add-on for the retrieval activity. + :paramtype filter_expression_add_on: str + """ + super().__init__(**kwargs) + self.search = search + self.filter_expression_add_on = filter_expression_add_on + + +class KnowledgeBaseRemoteSharePointActivityRecord( + KnowledgeBaseRetrievalActivityRecord +): # pylint: disable=name-too-long + """Represents a remote SharePoint retrieval activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar knowledge_source_name: The knowledge source for the retrieval activity. + :vartype knowledge_source_name: str + :ivar query_time: The query time for this retrieval activity. + :vartype query_time: ~datetime.datetime + :ivar count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :vartype count: int + :ivar remote_share_point_arguments: The remote SharePoint arguments for the retrieval activity. + :vartype remote_share_point_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseRemoteSharePointActivityArguments + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "query_time": {"key": "queryTime", "type": "iso-8601"}, + "count": {"key": "count", "type": "int"}, + "remote_share_point_arguments": { + "key": "remoteSharePointArguments", + "type": "KnowledgeBaseRemoteSharePointActivityArguments", + }, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + knowledge_source_name: Optional[str] = None, + query_time: Optional[datetime.datetime] = None, + count: Optional[int] = None, + remote_share_point_arguments: Optional["_models.KnowledgeBaseRemoteSharePointActivityArguments"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword knowledge_source_name: The knowledge source for the retrieval activity. + :paramtype knowledge_source_name: str + :keyword query_time: The query time for this retrieval activity. + :paramtype query_time: ~datetime.datetime + :keyword count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :paramtype count: int + :keyword remote_share_point_arguments: The remote SharePoint arguments for the retrieval + activity. + :paramtype remote_share_point_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseRemoteSharePointActivityArguments + """ + super().__init__( + id=id, + elapsed_ms=elapsed_ms, + error=error, + knowledge_source_name=knowledge_source_name, + query_time=query_time, + count=count, + **kwargs + ) + self.type: str = "remoteSharePoint" + self.remote_share_point_arguments = remote_share_point_arguments + + +class KnowledgeBaseRemoteSharePointReference(KnowledgeBaseReference): + """Represents a remote SharePoint document reference. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the reference. Required. + :vartype type: str + :ivar id: The ID of the reference. Required. + :vartype id: str + :ivar activity_source: The source activity ID for the reference. Required. + :vartype activity_source: int + :ivar source_data: Dictionary of :code:``. + :vartype source_data: dict[str, any] + :ivar reranker_score: The reranker score for the document reference. + :vartype reranker_score: float + :ivar web_url: The url the reference data originated from. + :vartype web_url: str + :ivar search_sensitivity_label_info: Information about the sensitivity label applied to a + SharePoint document. + :vartype search_sensitivity_label_info: + ~azure.search.documents.knowledgebases.models.SharePointSensitivityLabelInfo + """ + + _validation = { + "type": {"required": True}, + "id": {"required": True}, + "activity_source": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "activity_source": {"key": "activitySource", "type": "int"}, + "source_data": {"key": "sourceData", "type": "{object}"}, + "reranker_score": {"key": "rerankerScore", "type": "float"}, + "web_url": {"key": "webUrl", "type": "str"}, + "search_sensitivity_label_info": { + "key": "searchSensitivityLabelInfo", + "type": "SharePointSensitivityLabelInfo", + }, + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + activity_source: int, + source_data: Optional[dict[str, Any]] = None, + reranker_score: Optional[float] = None, + web_url: Optional[str] = None, + search_sensitivity_label_info: Optional["_models.SharePointSensitivityLabelInfo"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the reference. Required. + :paramtype id: str + :keyword activity_source: The source activity ID for the reference. Required. + :paramtype activity_source: int + :keyword source_data: Dictionary of :code:``. + :paramtype source_data: dict[str, any] + :keyword reranker_score: The reranker score for the document reference. + :paramtype reranker_score: float + :keyword web_url: The url the reference data originated from. + :paramtype web_url: str + :keyword search_sensitivity_label_info: Information about the sensitivity label applied to a + SharePoint document. + :paramtype search_sensitivity_label_info: + ~azure.search.documents.knowledgebases.models.SharePointSensitivityLabelInfo + """ + super().__init__( + id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs + ) + self.type: str = "remoteSharePoint" + self.web_url = web_url + self.search_sensitivity_label_info = search_sensitivity_label_info + + +class KnowledgeBaseRetrievalRequest(_serialization.Model): + """The input contract for the retrieval request. + + :ivar messages: A list of chat message style input. + :vartype messages: list[~azure.search.documents.knowledgebases.models.KnowledgeBaseMessage] + :ivar intents: A list of intended queries to execute without model query planning. + :vartype intents: list[~azure.search.documents.knowledgebases.models.KnowledgeRetrievalIntent] + :ivar max_runtime_in_seconds: The maximum runtime in seconds. + :vartype max_runtime_in_seconds: int + :ivar max_output_size: Limits the maximum size of the content in the output. + :vartype max_output_size: int + :ivar retrieval_reasoning_effort: + :vartype retrieval_reasoning_effort: + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffort + :ivar include_activity: Indicates retrieval results should include activity information. + :vartype include_activity: bool + :ivar output_mode: The output configuration for this retrieval. Known values are: + "extractiveData" and "answerSynthesis". + :vartype output_mode: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalOutputMode + :ivar knowledge_source_params: A list of runtime parameters for the knowledge sources. + :vartype knowledge_source_params: + list[~azure.search.documents.knowledgebases.models.KnowledgeSourceParams] + """ + + _attribute_map = { + "messages": {"key": "messages", "type": "[KnowledgeBaseMessage]"}, + "intents": {"key": "intents", "type": "[KnowledgeRetrievalIntent]"}, + "max_runtime_in_seconds": {"key": "maxRuntimeInSeconds", "type": "int"}, + "max_output_size": {"key": "maxOutputSize", "type": "int"}, + "retrieval_reasoning_effort": {"key": "retrievalReasoningEffort", "type": "KnowledgeRetrievalReasoningEffort"}, + "include_activity": {"key": "includeActivity", "type": "bool"}, + "output_mode": {"key": "outputMode", "type": "str"}, + "knowledge_source_params": {"key": "knowledgeSourceParams", "type": "[KnowledgeSourceParams]"}, + } + + def __init__( + self, + *, + messages: Optional[list["_models.KnowledgeBaseMessage"]] = None, + intents: Optional[list["_models.KnowledgeRetrievalIntent"]] = None, + max_runtime_in_seconds: Optional[int] = None, + max_output_size: Optional[int] = None, + retrieval_reasoning_effort: Optional["_models.KnowledgeRetrievalReasoningEffort"] = None, + include_activity: Optional[bool] = None, + output_mode: Optional[Union[str, "_models.KnowledgeRetrievalOutputMode"]] = None, + knowledge_source_params: Optional[list["_models.KnowledgeSourceParams"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword messages: A list of chat message style input. + :paramtype messages: list[~azure.search.documents.knowledgebases.models.KnowledgeBaseMessage] + :keyword intents: A list of intended queries to execute without model query planning. + :paramtype intents: + list[~azure.search.documents.knowledgebases.models.KnowledgeRetrievalIntent] + :keyword max_runtime_in_seconds: The maximum runtime in seconds. + :paramtype max_runtime_in_seconds: int + :keyword max_output_size: Limits the maximum size of the content in the output. + :paramtype max_output_size: int + :keyword retrieval_reasoning_effort: + :paramtype retrieval_reasoning_effort: + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffort + :keyword include_activity: Indicates retrieval results should include activity information. + :paramtype include_activity: bool + :keyword output_mode: The output configuration for this retrieval. Known values are: + "extractiveData" and "answerSynthesis". + :paramtype output_mode: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalOutputMode + :keyword knowledge_source_params: A list of runtime parameters for the knowledge sources. + :paramtype knowledge_source_params: + list[~azure.search.documents.knowledgebases.models.KnowledgeSourceParams] + """ + super().__init__(**kwargs) + self.messages = messages + self.intents = intents + self.max_runtime_in_seconds = max_runtime_in_seconds + self.max_output_size = max_output_size + self.retrieval_reasoning_effort = retrieval_reasoning_effort + self.include_activity = include_activity + self.output_mode = output_mode + self.knowledge_source_params = knowledge_source_params + + +class KnowledgeBaseRetrievalResponse(_serialization.Model): + """The output contract for the retrieval response. + + :ivar response: + :vartype response: list[~azure.search.documents.knowledgebases.models.KnowledgeBaseMessage] + :ivar activity: The activity records for tracking progress and billing implications. + :vartype activity: + list[~azure.search.documents.knowledgebases.models.KnowledgeBaseActivityRecord] + :ivar references: The references for the retrieval data used in the response. + :vartype references: list[~azure.search.documents.knowledgebases.models.KnowledgeBaseReference] + """ + + _attribute_map = { + "response": {"key": "response", "type": "[KnowledgeBaseMessage]"}, + "activity": {"key": "activity", "type": "[KnowledgeBaseActivityRecord]"}, + "references": {"key": "references", "type": "[KnowledgeBaseReference]"}, + } + + def __init__( + self, + *, + response: Optional[list["_models.KnowledgeBaseMessage"]] = None, + activity: Optional[list["_models.KnowledgeBaseActivityRecord"]] = None, + references: Optional[list["_models.KnowledgeBaseReference"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword response: + :paramtype response: list[~azure.search.documents.knowledgebases.models.KnowledgeBaseMessage] + :keyword activity: The activity records for tracking progress and billing implications. + :paramtype activity: + list[~azure.search.documents.knowledgebases.models.KnowledgeBaseActivityRecord] + :keyword references: The references for the retrieval data used in the response. + :paramtype references: + list[~azure.search.documents.knowledgebases.models.KnowledgeBaseReference] + """ + super().__init__(**kwargs) + self.response = response + self.activity = activity + self.references = references + + +class KnowledgeBaseSearchIndexActivityArguments(_serialization.Model): # pylint: disable=name-too-long + """Represents the arguments the search index retrieval activity was run with. + + :ivar search: The search string used to query the search index. + :vartype search: str + :ivar filter: The filter string. + :vartype filter: str + :ivar source_data_fields: What fields were selected for search. + :vartype source_data_fields: + list[~azure.search.documents.knowledgebases.models.SearchIndexFieldReference] + :ivar search_fields: What fields were searched against. + :vartype search_fields: + list[~azure.search.documents.knowledgebases.models.SearchIndexFieldReference] + :ivar semantic_configuration_name: What semantic configuration was used from the search index. + :vartype semantic_configuration_name: str + """ + + _attribute_map = { + "search": {"key": "search", "type": "str"}, + "filter": {"key": "filter", "type": "str"}, + "source_data_fields": {"key": "sourceDataFields", "type": "[SearchIndexFieldReference]"}, + "search_fields": {"key": "searchFields", "type": "[SearchIndexFieldReference]"}, + "semantic_configuration_name": {"key": "semanticConfigurationName", "type": "str"}, + } + + def __init__( + self, + *, + search: Optional[str] = None, + filter: Optional[str] = None, # pylint: disable=redefined-builtin + source_data_fields: Optional[list["_models.SearchIndexFieldReference"]] = None, + search_fields: Optional[list["_models.SearchIndexFieldReference"]] = None, + semantic_configuration_name: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword search: The search string used to query the search index. + :paramtype search: str + :keyword filter: The filter string. + :paramtype filter: str + :keyword source_data_fields: What fields were selected for search. + :paramtype source_data_fields: + list[~azure.search.documents.knowledgebases.models.SearchIndexFieldReference] + :keyword search_fields: What fields were searched against. + :paramtype search_fields: + list[~azure.search.documents.knowledgebases.models.SearchIndexFieldReference] + :keyword semantic_configuration_name: What semantic configuration was used from the search + index. + :paramtype semantic_configuration_name: str + """ + super().__init__(**kwargs) + self.search = search + self.filter = filter + self.source_data_fields = source_data_fields + self.search_fields = search_fields + self.semantic_configuration_name = semantic_configuration_name + + +class KnowledgeBaseSearchIndexActivityRecord(KnowledgeBaseRetrievalActivityRecord): + """Represents a search index retrieval activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar knowledge_source_name: The knowledge source for the retrieval activity. + :vartype knowledge_source_name: str + :ivar query_time: The query time for this retrieval activity. + :vartype query_time: ~datetime.datetime + :ivar count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :vartype count: int + :ivar search_index_arguments: The search index arguments for the retrieval activity. + :vartype search_index_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseSearchIndexActivityArguments + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "query_time": {"key": "queryTime", "type": "iso-8601"}, + "count": {"key": "count", "type": "int"}, + "search_index_arguments": {"key": "searchIndexArguments", "type": "KnowledgeBaseSearchIndexActivityArguments"}, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + knowledge_source_name: Optional[str] = None, + query_time: Optional[datetime.datetime] = None, + count: Optional[int] = None, + search_index_arguments: Optional["_models.KnowledgeBaseSearchIndexActivityArguments"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword knowledge_source_name: The knowledge source for the retrieval activity. + :paramtype knowledge_source_name: str + :keyword query_time: The query time for this retrieval activity. + :paramtype query_time: ~datetime.datetime + :keyword count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :paramtype count: int + :keyword search_index_arguments: The search index arguments for the retrieval activity. + :paramtype search_index_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseSearchIndexActivityArguments + """ + super().__init__( + id=id, + elapsed_ms=elapsed_ms, + error=error, + knowledge_source_name=knowledge_source_name, + query_time=query_time, + count=count, + **kwargs + ) + self.type: str = "searchIndex" + self.search_index_arguments = search_index_arguments + + +class KnowledgeBaseSearchIndexReference(KnowledgeBaseReference): + """Represents an Azure Search document reference. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the reference. Required. + :vartype type: str + :ivar id: The ID of the reference. Required. + :vartype id: str + :ivar activity_source: The source activity ID for the reference. Required. + :vartype activity_source: int + :ivar source_data: Dictionary of :code:``. + :vartype source_data: dict[str, any] + :ivar reranker_score: The reranker score for the document reference. + :vartype reranker_score: float + :ivar doc_key: The document key for the reference. + :vartype doc_key: str + """ + + _validation = { + "type": {"required": True}, + "id": {"required": True}, + "activity_source": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "activity_source": {"key": "activitySource", "type": "int"}, + "source_data": {"key": "sourceData", "type": "{object}"}, + "reranker_score": {"key": "rerankerScore", "type": "float"}, + "doc_key": {"key": "docKey", "type": "str"}, + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + activity_source: int, + source_data: Optional[dict[str, Any]] = None, + reranker_score: Optional[float] = None, + doc_key: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the reference. Required. + :paramtype id: str + :keyword activity_source: The source activity ID for the reference. Required. + :paramtype activity_source: int + :keyword source_data: Dictionary of :code:``. + :paramtype source_data: dict[str, any] + :keyword reranker_score: The reranker score for the document reference. + :paramtype reranker_score: float + :keyword doc_key: The document key for the reference. + :paramtype doc_key: str + """ + super().__init__( + id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs + ) + self.type: str = "searchIndex" + self.doc_key = doc_key + + +class KnowledgeBaseWebActivityArguments(_serialization.Model): + """Represents the arguments the web retrieval activity was run with. + + :ivar search: The search string used to query the web. + :vartype search: str + :ivar language: The language for the retrieval activity. + :vartype language: str + :ivar market: The market for the retrieval activity. + :vartype market: str + :ivar count: The number of web results returned. + :vartype count: int + :ivar freshness: The freshness for the retrieval activity. + :vartype freshness: str + """ + + _attribute_map = { + "search": {"key": "search", "type": "str"}, + "language": {"key": "language", "type": "str"}, + "market": {"key": "market", "type": "str"}, + "count": {"key": "count", "type": "int"}, + "freshness": {"key": "freshness", "type": "str"}, + } + + def __init__( + self, + *, + search: Optional[str] = None, + language: Optional[str] = None, + market: Optional[str] = None, + count: Optional[int] = None, + freshness: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword search: The search string used to query the web. + :paramtype search: str + :keyword language: The language for the retrieval activity. + :paramtype language: str + :keyword market: The market for the retrieval activity. + :paramtype market: str + :keyword count: The number of web results returned. + :paramtype count: int + :keyword freshness: The freshness for the retrieval activity. + :paramtype freshness: str + """ + super().__init__(**kwargs) + self.search = search + self.language = language + self.market = market + self.count = count + self.freshness = freshness + + +class KnowledgeBaseWebActivityRecord(KnowledgeBaseRetrievalActivityRecord): + """Represents a web retrieval activity record. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the activity record. Required. + :vartype id: int + :ivar type: The type of the activity record. Required. + :vartype type: str + :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :vartype elapsed_ms: int + :ivar error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :ivar knowledge_source_name: The knowledge source for the retrieval activity. + :vartype knowledge_source_name: str + :ivar query_time: The query time for this retrieval activity. + :vartype query_time: ~datetime.datetime + :ivar count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :vartype count: int + :ivar web_arguments: The web arguments for the retrieval activity. + :vartype web_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseWebActivityArguments + """ + + _validation = { + "id": {"required": True}, + "type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "int"}, + "type": {"key": "type", "type": "str"}, + "elapsed_ms": {"key": "elapsedMs", "type": "int"}, + "error": {"key": "error", "type": "KnowledgeBaseErrorDetail"}, + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "query_time": {"key": "queryTime", "type": "iso-8601"}, + "count": {"key": "count", "type": "int"}, + "web_arguments": {"key": "webArguments", "type": "KnowledgeBaseWebActivityArguments"}, + } + + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + elapsed_ms: Optional[int] = None, + error: Optional["_models.KnowledgeBaseErrorDetail"] = None, + knowledge_source_name: Optional[str] = None, + query_time: Optional[datetime.datetime] = None, + count: Optional[int] = None, + web_arguments: Optional["_models.KnowledgeBaseWebActivityArguments"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the activity record. Required. + :paramtype id: int + :keyword elapsed_ms: The elapsed time in milliseconds for the retrieval activity. + :paramtype elapsed_ms: int + :keyword error: The error detail explaining why the operation failed. This property is only + included when the activity does not succeed. + :paramtype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail + :keyword knowledge_source_name: The knowledge source for the retrieval activity. + :paramtype knowledge_source_name: str + :keyword query_time: The query time for this retrieval activity. + :paramtype query_time: ~datetime.datetime + :keyword count: The count of documents retrieved that were sufficiently relevant to pass the + reranker threshold. + :paramtype count: int + :keyword web_arguments: The web arguments for the retrieval activity. + :paramtype web_arguments: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseWebActivityArguments + """ + super().__init__( + id=id, + elapsed_ms=elapsed_ms, + error=error, + knowledge_source_name=knowledge_source_name, + query_time=query_time, + count=count, + **kwargs + ) + self.type: str = "web" + self.web_arguments = web_arguments + + +class KnowledgeBaseWebReference(KnowledgeBaseReference): + """Represents a web document reference. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the reference. Required. + :vartype type: str + :ivar id: The ID of the reference. Required. + :vartype id: str + :ivar activity_source: The source activity ID for the reference. Required. + :vartype activity_source: int + :ivar source_data: Dictionary of :code:``. + :vartype source_data: dict[str, any] + :ivar reranker_score: The reranker score for the document reference. + :vartype reranker_score: float + :ivar url: The url the reference data originated from. + :vartype url: str + :ivar title: The title of the web document. + :vartype title: str + """ + + _validation = { + "type": {"required": True}, + "id": {"required": True}, + "activity_source": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "activity_source": {"key": "activitySource", "type": "int"}, + "source_data": {"key": "sourceData", "type": "{object}"}, + "reranker_score": {"key": "rerankerScore", "type": "float"}, + "url": {"key": "url", "type": "str"}, + "title": {"key": "title", "type": "str"}, + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + activity_source: int, + source_data: Optional[dict[str, Any]] = None, + reranker_score: Optional[float] = None, + url: Optional[str] = None, + title: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the reference. Required. + :paramtype id: str + :keyword activity_source: The source activity ID for the reference. Required. + :paramtype activity_source: int + :keyword source_data: Dictionary of :code:``. + :paramtype source_data: dict[str, any] + :keyword reranker_score: The reranker score for the document reference. + :paramtype reranker_score: float + :keyword url: The url the reference data originated from. + :paramtype url: str + :keyword title: The title of the web document. + :paramtype title: str + """ + super().__init__( + id=id, activity_source=activity_source, source_data=source_data, reranker_score=reranker_score, **kwargs + ) + self.type: str = "web" + self.url = url + self.title = title + + +class KnowledgeRetrievalIntent(_serialization.Model): + """An intended query to execute without model query planning. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeRetrievalSemanticIntent + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the intent. Required. "semantic" + :vartype type: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalIntentType + """ + + _validation = { + "type": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + } + + _subtype_map = {"type": {"semantic": "KnowledgeRetrievalSemanticIntent"}} + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.type: Optional[str] = None + + +class KnowledgeRetrievalReasoningEffort(_serialization.Model): + """KnowledgeRetrievalReasoningEffort. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + KnowledgeRetrievalLowReasoningEffort, KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffortKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + _subtype_map = { + "kind": { + "low": "KnowledgeRetrievalLowReasoningEffort", + "medium": "KnowledgeRetrievalMediumReasoningEffort", + "minimal": "KnowledgeRetrievalMinimalReasoningEffort", + } + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: Optional[str] = None + + +class KnowledgeRetrievalLowReasoningEffort(KnowledgeRetrievalReasoningEffort): + """Run knowledge retrieval with low reasoning effort. + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffortKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: str = "low" + + +class KnowledgeRetrievalMediumReasoningEffort(KnowledgeRetrievalReasoningEffort): + """Run knowledge retrieval with medium reasoning effort. + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffortKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: str = "medium" + + +class KnowledgeRetrievalMinimalReasoningEffort(KnowledgeRetrievalReasoningEffort): + """Run knowledge retrieval with minimal reasoning effort. + + All required parameters must be populated in order to send to server. + + :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and + "medium". + :vartype kind: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffortKind + """ + + _validation = { + "kind": {"required": True}, + } + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kind: str = "minimal" + + +class KnowledgeRetrievalSemanticIntent(KnowledgeRetrievalIntent): + """KnowledgeRetrievalSemanticIntent. + + All required parameters must be populated in order to send to server. + + :ivar type: The type of the intent. Required. "semantic" + :vartype type: str or + ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalIntentType + :ivar search: The semantic query to execute. Required. + :vartype search: str + """ + + _validation = { + "type": {"required": True}, + "search": {"required": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "search": {"key": "search", "type": "str"}, + } + + def __init__(self, *, search: str, **kwargs: Any) -> None: + """ + :keyword search: The semantic query to execute. Required. + :paramtype search: str + """ + super().__init__(**kwargs) + self.type: str = "semantic" + self.search = search + + +class RemoteSharePointKnowledgeSourceParams(KnowledgeSourceParams): + """Specifies runtime parameters for a remote SharePoint knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar knowledge_source_name: The name of the index the params apply to. Required. + :vartype knowledge_source_name: str + :ivar include_references: Indicates whether references should be included for data retrieved + from this source. + :vartype include_references: bool + :ivar include_reference_source_data: Indicates whether references should include the structured + data obtained during retrieval in their payload. + :vartype include_reference_source_data: bool + :ivar always_query_source: Indicates that this knowledge source should bypass source selection + and always be queried at retrieval time. + :vartype always_query_source: bool + :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :vartype reranker_threshold: float + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeSourceKind + :ivar filter_expression_add_on: A filter condition applied to the SharePoint data source. It + must be specified in the Keyword Query Language syntax. It will be combined as a conjunction + with the filter expression specified in the knowledge source definition. + :vartype filter_expression_add_on: str + """ + + _validation = { + "knowledge_source_name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "include_references": {"key": "includeReferences", "type": "bool"}, + "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, + "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, + "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "kind": {"key": "kind", "type": "str"}, + "filter_expression_add_on": {"key": "filterExpressionAddOn", "type": "str"}, + } + + def __init__( + self, + *, + knowledge_source_name: str, + include_references: Optional[bool] = None, + include_reference_source_data: Optional[bool] = None, + always_query_source: Optional[bool] = None, + reranker_threshold: Optional[float] = None, + filter_expression_add_on: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword knowledge_source_name: The name of the index the params apply to. Required. + :paramtype knowledge_source_name: str + :keyword include_references: Indicates whether references should be included for data retrieved + from this source. + :paramtype include_references: bool + :keyword include_reference_source_data: Indicates whether references should include the + structured data obtained during retrieval in their payload. + :paramtype include_reference_source_data: bool + :keyword always_query_source: Indicates that this knowledge source should bypass source + selection and always be queried at retrieval time. + :paramtype always_query_source: bool + :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :paramtype reranker_threshold: float + :keyword filter_expression_add_on: A filter condition applied to the SharePoint data source. It + must be specified in the Keyword Query Language syntax. It will be combined as a conjunction + with the filter expression specified in the knowledge source definition. + :paramtype filter_expression_add_on: str + """ + super().__init__( + knowledge_source_name=knowledge_source_name, + include_references=include_references, + include_reference_source_data=include_reference_source_data, + always_query_source=always_query_source, + reranker_threshold=reranker_threshold, + **kwargs + ) + self.kind: str = "remoteSharePoint" + self.filter_expression_add_on = filter_expression_add_on + + +class RequestOptions(_serialization.Model): + """Parameter group. + + :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :vartype x_ms_client_request_id: str + """ + + _attribute_map = { + "x_ms_client_request_id": {"key": "x-ms-client-request-id", "type": "str"}, + } + + def __init__(self, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :paramtype x_ms_client_request_id: str + """ + super().__init__(**kwargs) + self.x_ms_client_request_id = x_ms_client_request_id + + +class SearchIndexFieldReference(_serialization.Model): + """SearchIndexFieldReference. + + All required parameters must be populated in order to send to server. + + :ivar name: Required. + :vartype name: str + """ + + _validation = { + "name": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + } + + def __init__(self, *, name: str, **kwargs: Any) -> None: + """ + :keyword name: Required. + :paramtype name: str + """ + super().__init__(**kwargs) + self.name = name + + +class SearchIndexKnowledgeSourceParams(KnowledgeSourceParams): + """Specifies runtime parameters for a search index knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar knowledge_source_name: The name of the index the params apply to. Required. + :vartype knowledge_source_name: str + :ivar include_references: Indicates whether references should be included for data retrieved + from this source. + :vartype include_references: bool + :ivar include_reference_source_data: Indicates whether references should include the structured + data obtained during retrieval in their payload. + :vartype include_reference_source_data: bool + :ivar always_query_source: Indicates that this knowledge source should bypass source selection + and always be queried at retrieval time. + :vartype always_query_source: bool + :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :vartype reranker_threshold: float + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeSourceKind + :ivar filter_add_on: A filter condition applied to the index (e.g., 'State eq VA'). + :vartype filter_add_on: str + """ + + _validation = { + "knowledge_source_name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "include_references": {"key": "includeReferences", "type": "bool"}, + "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, + "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, + "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "kind": {"key": "kind", "type": "str"}, + "filter_add_on": {"key": "filterAddOn", "type": "str"}, + } + + def __init__( + self, + *, + knowledge_source_name: str, + include_references: Optional[bool] = None, + include_reference_source_data: Optional[bool] = None, + always_query_source: Optional[bool] = None, + reranker_threshold: Optional[float] = None, + filter_add_on: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword knowledge_source_name: The name of the index the params apply to. Required. + :paramtype knowledge_source_name: str + :keyword include_references: Indicates whether references should be included for data retrieved + from this source. + :paramtype include_references: bool + :keyword include_reference_source_data: Indicates whether references should include the + structured data obtained during retrieval in their payload. + :paramtype include_reference_source_data: bool + :keyword always_query_source: Indicates that this knowledge source should bypass source + selection and always be queried at retrieval time. + :paramtype always_query_source: bool + :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :paramtype reranker_threshold: float + :keyword filter_add_on: A filter condition applied to the index (e.g., 'State eq VA'). + :paramtype filter_add_on: str + """ + super().__init__( + knowledge_source_name=knowledge_source_name, + include_references=include_references, + include_reference_source_data=include_reference_source_data, + always_query_source=always_query_source, + reranker_threshold=reranker_threshold, + **kwargs + ) + self.kind: str = "searchIndex" + self.filter_add_on = filter_add_on + + +class SharePointSensitivityLabelInfo(_serialization.Model): + """Information about the sensitivity label applied to a SharePoint document. + + :ivar display_name: The display name for the sensitivity label. + :vartype display_name: str + :ivar sensitivity_label_id: The ID of the sensitivity label. + :vartype sensitivity_label_id: str + :ivar tooltip: The tooltip that should be displayed for the label in a UI. + :vartype tooltip: str + :ivar priority: The priority in which the sensitivity label is applied. + :vartype priority: int + :ivar color: The color that the UI should display for the label, if configured. + :vartype color: str + :ivar is_encrypted: Indicates whether the sensitivity label enforces encryption. + :vartype is_encrypted: bool + """ + + _attribute_map = { + "display_name": {"key": "displayName", "type": "str"}, + "sensitivity_label_id": {"key": "sensitivityLabelId", "type": "str"}, + "tooltip": {"key": "tooltip", "type": "str"}, + "priority": {"key": "priority", "type": "int"}, + "color": {"key": "color", "type": "str"}, + "is_encrypted": {"key": "isEncrypted", "type": "bool"}, + } + + def __init__( + self, + *, + display_name: Optional[str] = None, + sensitivity_label_id: Optional[str] = None, + tooltip: Optional[str] = None, + priority: Optional[int] = None, + color: Optional[str] = None, + is_encrypted: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword display_name: The display name for the sensitivity label. + :paramtype display_name: str + :keyword sensitivity_label_id: The ID of the sensitivity label. + :paramtype sensitivity_label_id: str + :keyword tooltip: The tooltip that should be displayed for the label in a UI. + :paramtype tooltip: str + :keyword priority: The priority in which the sensitivity label is applied. + :paramtype priority: int + :keyword color: The color that the UI should display for the label, if configured. + :paramtype color: str + :keyword is_encrypted: Indicates whether the sensitivity label enforces encryption. + :paramtype is_encrypted: bool + """ + super().__init__(**kwargs) + self.display_name = display_name + self.sensitivity_label_id = sensitivity_label_id + self.tooltip = tooltip + self.priority = priority + self.color = color + self.is_encrypted = is_encrypted + + +class WebKnowledgeSourceParams(KnowledgeSourceParams): + """Specifies runtime parameters for a web knowledge source. + + All required parameters must be populated in order to send to server. + + :ivar knowledge_source_name: The name of the index the params apply to. Required. + :vartype knowledge_source_name: str + :ivar include_references: Indicates whether references should be included for data retrieved + from this source. + :vartype include_references: bool + :ivar include_reference_source_data: Indicates whether references should include the structured + data obtained during retrieval in their payload. + :vartype include_reference_source_data: bool + :ivar always_query_source: Indicates that this knowledge source should bypass source selection + and always be queried at retrieval time. + :vartype always_query_source: bool + :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :vartype reranker_threshold: float + :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", + "azureBlob", "web", "remoteSharePoint", "indexedSharePoint", and "indexedOneLake". + :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeSourceKind + :ivar language: The language of the web results. + :vartype language: str + :ivar market: The market of the web results. + :vartype market: str + :ivar count: The number of web results to return. + :vartype count: int + :ivar freshness: The freshness of web results. + :vartype freshness: str + """ + + _validation = { + "knowledge_source_name": {"required": True}, + "kind": {"required": True}, + } + + _attribute_map = { + "knowledge_source_name": {"key": "knowledgeSourceName", "type": "str"}, + "include_references": {"key": "includeReferences", "type": "bool"}, + "include_reference_source_data": {"key": "includeReferenceSourceData", "type": "bool"}, + "always_query_source": {"key": "alwaysQuerySource", "type": "bool"}, + "reranker_threshold": {"key": "rerankerThreshold", "type": "float"}, + "kind": {"key": "kind", "type": "str"}, + "language": {"key": "language", "type": "str"}, + "market": {"key": "market", "type": "str"}, + "count": {"key": "count", "type": "int"}, + "freshness": {"key": "freshness", "type": "str"}, + } + + def __init__( + self, + *, + knowledge_source_name: str, + include_references: Optional[bool] = None, + include_reference_source_data: Optional[bool] = None, + always_query_source: Optional[bool] = None, + reranker_threshold: Optional[float] = None, + language: Optional[str] = None, + market: Optional[str] = None, + count: Optional[int] = None, + freshness: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword knowledge_source_name: The name of the index the params apply to. Required. + :paramtype knowledge_source_name: str + :keyword include_references: Indicates whether references should be included for data retrieved + from this source. + :paramtype include_references: bool + :keyword include_reference_source_data: Indicates whether references should include the + structured data obtained during retrieval in their payload. + :paramtype include_reference_source_data: bool + :keyword always_query_source: Indicates that this knowledge source should bypass source + selection and always be queried at retrieval time. + :paramtype always_query_source: bool + :keyword reranker_threshold: The reranker threshold all retrieved documents must meet to be + included in the response. + :paramtype reranker_threshold: float + :keyword language: The language of the web results. + :paramtype language: str + :keyword market: The market of the web results. + :paramtype market: str + :keyword count: The number of web results to return. + :paramtype count: int + :keyword freshness: The freshness of web results. + :paramtype freshness: str + """ + super().__init__( + knowledge_source_name=knowledge_source_name, + include_references=include_references, + include_reference_source_data=include_reference_source_data, + always_query_source=always_query_source, + reranker_threshold=reranker_threshold, + **kwargs + ) + self.kind: str = "web" + self.language = language + self.market = market + self.count = count + self.freshness = freshness diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_patch.py similarity index 100% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/models/_patch.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/models/_patch.py diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/__init__.py similarity index 94% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/__init__.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/__init__.py index ee2a3f9ac8bb..60ca88c85f3c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/aio/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/_knowledge_retrieval_operations.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/_knowledge_retrieval_operations.py similarity index 76% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/_knowledge_retrieval_operations.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/_knowledge_retrieval_operations.py index cf6369b46890..636ba80ec25b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/_knowledge_retrieval_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/_knowledge_retrieval_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.39.0) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.10.8, generator: @autorest/python@6.42.1) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from collections.abc import MutableMapping @@ -22,7 +22,7 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._configuration import KnowledgeAgentRetrievalClientConfiguration +from .._configuration import KnowledgeBaseRetrievalClientConfiguration from .._utils.serialization import Deserializer, Serializer T = TypeVar("T") @@ -41,7 +41,7 @@ def build_retrieve_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-08-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -71,7 +71,7 @@ class KnowledgeRetrievalOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.search.documents.agent.KnowledgeAgentRetrievalClient`'s + :class:`~azure.search.documents.knowledgebases.KnowledgeBaseRetrievalClient`'s :attr:`knowledge_retrieval` attribute. """ @@ -80,7 +80,7 @@ class KnowledgeRetrievalOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: KnowledgeAgentRetrievalClientConfiguration = ( + self._config: KnowledgeBaseRetrievalClientConfiguration = ( input_args.pop(0) if input_args else kwargs.pop("config") ) self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") @@ -89,28 +89,29 @@ def __init__(self, *args, **kwargs) -> None: @overload def retrieve( self, - retrieval_request: _models.KnowledgeAgentRetrievalRequest, + retrieval_request: _models.KnowledgeBaseRetrievalRequest, x_ms_query_source_authorization: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgentRetrievalResponse: - """KnowledgeAgent retrieves relevant data from backing stores. + ) -> _models.KnowledgeBaseRetrievalResponse: + """KnowledgeBase retrieves relevant data from backing stores. :param retrieval_request: The retrieval request to process. Required. - :type retrieval_request: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalRequest + :type retrieval_request: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest :param x_ms_query_source_authorization: Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.agent.models.RequestOptions + :type request_options: ~azure.search.documents.knowledgebases.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgentRetrievalResponse or the result of cls(response) - :rtype: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalResponse + :return: KnowledgeBaseRetrievalResponse or the result of cls(response) + :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse :raises ~azure.core.exceptions.HttpResponseError: """ @@ -123,8 +124,8 @@ def retrieve( *, content_type: str = "application/json", **kwargs: Any - ) -> _models.KnowledgeAgentRetrievalResponse: - """KnowledgeAgent retrieves relevant data from backing stores. + ) -> _models.KnowledgeBaseRetrievalResponse: + """KnowledgeBase retrieves relevant data from backing stores. :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: IO[bytes] @@ -133,37 +134,37 @@ def retrieve( None. :type x_ms_query_source_authorization: str :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.agent.models.RequestOptions + :type request_options: ~azure.search.documents.knowledgebases.models.RequestOptions :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: KnowledgeAgentRetrievalResponse or the result of cls(response) - :rtype: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalResponse + :return: KnowledgeBaseRetrievalResponse or the result of cls(response) + :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace def retrieve( self, - retrieval_request: Union[_models.KnowledgeAgentRetrievalRequest, IO[bytes]], + retrieval_request: Union[_models.KnowledgeBaseRetrievalRequest, IO[bytes]], x_ms_query_source_authorization: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.KnowledgeAgentRetrievalResponse: - """KnowledgeAgent retrieves relevant data from backing stores. + ) -> _models.KnowledgeBaseRetrievalResponse: + """KnowledgeBase retrieves relevant data from backing stores. :param retrieval_request: The retrieval request to process. Is either a - KnowledgeAgentRetrievalRequest type or a IO[bytes] type. Required. - :type retrieval_request: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalRequest or - IO[bytes] + KnowledgeBaseRetrievalRequest type or a IO[bytes] type. Required. + :type retrieval_request: + ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest or IO[bytes] :param x_ms_query_source_authorization: Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. Default value is None. :type x_ms_query_source_authorization: str :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.agent.models.RequestOptions - :return: KnowledgeAgentRetrievalResponse or the result of cls(response) - :rtype: ~azure.search.documents.agent.models.KnowledgeAgentRetrievalResponse + :type request_options: ~azure.search.documents.knowledgebases.models.RequestOptions + :return: KnowledgeBaseRetrievalResponse or the result of cls(response) + :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -179,7 +180,7 @@ def retrieve( api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.KnowledgeAgentRetrievalResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.KnowledgeBaseRetrievalResponse] = kwargs.pop("cls", None) _x_ms_client_request_id = None if request_options is not None: @@ -190,7 +191,7 @@ def retrieve( if isinstance(retrieval_request, (IOBase, bytes)): _content = retrieval_request else: - _json = self._serialize.body(retrieval_request, "KnowledgeAgentRetrievalRequest") + _json = self._serialize.body(retrieval_request, "KnowledgeBaseRetrievalRequest") _request = build_retrieve_request( x_ms_client_request_id=_x_ms_client_request_id, @@ -204,7 +205,9 @@ def retrieve( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "agentName": self._serialize.url("self._config.agent_name", self._config.agent_name, "str"), + "knowledgeBaseName": self._serialize.url( + "self._config.knowledge_base_name", self._config.knowledge_base_name, "str" + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -215,12 +218,15 @@ def retrieve( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 206]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize("KnowledgeAgentRetrievalResponse", pipeline_response.http_response) + deserialized = self._deserialize("KnowledgeBaseRetrievalResponse", pipeline_response.http_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/_patch.py similarity index 100% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/operations/_patch.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/operations/_patch.py diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_generated/py.typed b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/py.typed similarity index 100% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_generated/py.typed rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_generated/py.typed diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/_agent_client.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_knowledgebase_client.py similarity index 70% rename from sdk/search/azure-search-documents/azure/search/documents/agent/_agent_client.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_knowledgebase_client.py index f53ab27ca7bc..bb6900e773ab 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/_agent_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_knowledgebase_client.py @@ -8,48 +8,52 @@ from azure.core.credentials import AzureKeyCredential, TokenCredential from azure.core.tracing.decorator import distributed_trace from .._api_versions import DEFAULT_VERSION -from ._generated import KnowledgeAgentRetrievalClient as _KnowledgeAgentRetrievalClient +from ._generated import KnowledgeBaseRetrievalClient as _KnowledgeBaseRetrievalClient from ._generated.models import ( - KnowledgeAgentRetrievalRequest, + KnowledgeBaseRetrievalRequest, RequestOptions, - KnowledgeAgentRetrievalResponse, + KnowledgeBaseRetrievalResponse, ) from .._headers_mixin import HeadersMixin from .._utils import get_authentication_policy from .._version import SDK_MONIKER -class KnowledgeAgentRetrievalClient(HeadersMixin): - """A client that can be used to query an agent. +class KnowledgeBaseRetrievalClient(HeadersMixin): + """A client that can be used to query a knowledge base. - :param endpoint: The URL endpoint of an Azure search service + :param endpoint: The URL endpoint of an Azure search service. :type endpoint: str - :param agent_name: The name of the agent. Required. - :type agent_name: str - :param credential: A credential to authorize search client requests + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :param credential: A credential to authorize search client requests. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword str api_version: The Search API version to use for requests. - :keyword str audience: sets the Audience to use for authentication with Microsoft Entra ID. The + :keyword str audience: Sets the audience to use for authentication with Microsoft Entra ID. The audience is not considered when using a shared key. If audience is not provided, the public cloud audience will be assumed. """ _ODATA_ACCEPT: str = "application/json;odata.metadata=none" - _client: _KnowledgeAgentRetrievalClient + _client: _KnowledgeBaseRetrievalClient def __init__( - self, endpoint: str, agent_name: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any + self, + endpoint: str, + knowledge_base_name: str, + credential: Union[AzureKeyCredential, TokenCredential], + **kwargs: Any, ) -> None: self._api_version = kwargs.pop("api_version", DEFAULT_VERSION) self._endpoint = endpoint - self._agent_name = agent_name + self._knowledge_base_name = knowledge_base_name self._credential = credential audience = kwargs.pop("audience", None) if isinstance(credential, AzureKeyCredential): self._aad = False - self._client = _KnowledgeAgentRetrievalClient( + self._client = _KnowledgeBaseRetrievalClient( endpoint=endpoint, - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, sdk_moniker=SDK_MONIKER, api_version=self._api_version, **kwargs @@ -57,9 +61,9 @@ def __init__( else: self._aad = True authentication_policy = get_authentication_policy(credential, audience=audience) - self._client = _KnowledgeAgentRetrievalClient( + self._client = _KnowledgeBaseRetrievalClient( endpoint=endpoint, - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, authentication_policy=authentication_policy, sdk_moniker=SDK_MONIKER, api_version=self._api_version, @@ -68,8 +72,8 @@ def __init__( self.knowledge_retrieval = self._client.knowledge_retrieval def __repr__(self) -> str: - return "".format( - repr(self._endpoint), repr(self._agent_name) + return "".format( + repr(self._endpoint), repr(self._knowledge_base_name) )[:1024] def close(self) -> None: @@ -83,11 +87,11 @@ def close(self) -> None: @distributed_trace def retrieve( self, - retrieval_request: Union[KnowledgeAgentRetrievalRequest, IO[bytes]], + retrieval_request: Union[KnowledgeBaseRetrievalRequest, IO[bytes]], x_ms_query_source_authorization: Optional[str] = None, request_options: Optional[RequestOptions] = None, **kwargs: Any - ) -> KnowledgeAgentRetrievalResponse: + ) -> KnowledgeBaseRetrievalResponse: kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return self._client.knowledge_retrieval.retrieve( retrieval_request=retrieval_request, diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/__init__.py similarity index 91% rename from sdk/search/azure-search-documents/azure/search/documents/agent/aio/__init__.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/__init__.py index 18d9cc8b759e..9cf619a92074 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/__init__.py @@ -24,6 +24,6 @@ # # -------------------------------------------------------------------------- -from ._agent_client_async import KnowledgeAgentRetrievalClient +from ._knowledgebase_client_async import KnowledgeBaseRetrievalClient -__all__ = ("KnowledgeAgentRetrievalClient",) +__all__ = ("KnowledgeBaseRetrievalClient",) diff --git a/sdk/search/azure-search-documents/azure/search/documents/agent/aio/_agent_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_knowledgebase_client_async.py similarity index 71% rename from sdk/search/azure-search-documents/azure/search/documents/agent/aio/_agent_client_async.py rename to sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_knowledgebase_client_async.py index de658044da46..e850222dae49 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/agent/aio/_agent_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_knowledgebase_client_async.py @@ -9,48 +9,52 @@ from azure.core.credentials_async import AsyncTokenCredential from azure.core.tracing.decorator_async import distributed_trace_async from ..._utils import get_authentication_policy -from .._generated.aio import KnowledgeAgentRetrievalClient as _KnowledgeAgentRetrievalClient +from .._generated.aio import KnowledgeBaseRetrievalClient as _KnowledgeBaseRetrievalClient from .._generated.models import ( - KnowledgeAgentRetrievalRequest, + KnowledgeBaseRetrievalRequest, RequestOptions, - KnowledgeAgentRetrievalResponse, + KnowledgeBaseRetrievalResponse, ) from ..._api_versions import DEFAULT_VERSION from ..._headers_mixin import HeadersMixin from ..._version import SDK_MONIKER -class KnowledgeAgentRetrievalClient(HeadersMixin): - """A client that can be used to query an agent. +class KnowledgeBaseRetrievalClient(HeadersMixin): + """A client that can be used to query a knowledge base. - :param endpoint: The URL endpoint of an Azure search service + :param endpoint: The URL endpoint of an Azure search service. :type endpoint: str - :param agent_name: The name of the agent. Required. - :type agent_name: str - :param credential: A credential to authorize search client requests + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :param credential: A credential to authorize search client requests. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword str api_version: The Search API version to use for requests. - :keyword str audience: sets the Audience to use for authentication with Microsoft Entra ID. The + :keyword str audience: Sets the audience to use for authentication with Microsoft Entra ID. The audience is not considered when using a shared key. If audience is not provided, the public cloud audience will be assumed. """ _ODATA_ACCEPT: str = "application/json;odata.metadata=none" - _client: _KnowledgeAgentRetrievalClient + _client: _KnowledgeBaseRetrievalClient def __init__( - self, endpoint: str, agent_name: str, credential: Union[AzureKeyCredential, AsyncTokenCredential], **kwargs: Any + self, + endpoint: str, + knowledge_base_name: str, + credential: Union[AzureKeyCredential, AsyncTokenCredential], + **kwargs: Any, ) -> None: self._api_version = kwargs.pop("api_version", DEFAULT_VERSION) self._endpoint: str = endpoint - self._agent_name: str = agent_name + self._knowledge_base_name: str = knowledge_base_name self._credential = credential audience = kwargs.pop("audience", None) if isinstance(credential, AzureKeyCredential): self._aad = False - self._client = _KnowledgeAgentRetrievalClient( + self._client = _KnowledgeBaseRetrievalClient( endpoint=endpoint, - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, sdk_moniker=SDK_MONIKER, api_version=self._api_version, **kwargs @@ -58,9 +62,9 @@ def __init__( else: self._aad = True authentication_policy = get_authentication_policy(credential, audience=audience, is_async=True) - self._client = _KnowledgeAgentRetrievalClient( + self._client = _KnowledgeBaseRetrievalClient( endpoint=endpoint, - agent_name=agent_name, + knowledge_base_name=knowledge_base_name, authentication_policy=authentication_policy, sdk_moniker=SDK_MONIKER, api_version=self._api_version, @@ -69,8 +73,8 @@ def __init__( self.knowledge_retrieval = self._client.knowledge_retrieval def __repr__(self) -> str: - return "".format( - repr(self._endpoint), repr(self._agent_name) + return "".format( + repr(self._endpoint), repr(self._knowledge_base_name) )[:1024] async def close(self) -> None: @@ -84,11 +88,11 @@ async def close(self) -> None: @distributed_trace_async async def retrieve( self, - retrieval_request: Union[KnowledgeAgentRetrievalRequest, IO[bytes]], + retrieval_request: Union[KnowledgeBaseRetrievalRequest, IO[bytes]], x_ms_query_source_authorization: Optional[str] = None, request_options: Optional[RequestOptions] = None, **kwargs: Any - ) -> KnowledgeAgentRetrievalResponse: + ) -> KnowledgeBaseRetrievalResponse: kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return await self._client.knowledge_retrieval.retrieve( retrieval_request=retrieval_request, diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py new file mode 100644 index 000000000000..5914bd52e5cf --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py @@ -0,0 +1,146 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +from .._generated.models import ( + AzureBlobKnowledgeSourceParams, + ErrorAdditionalInfo, + ErrorDetail, + ErrorResponse, + IndexedOneLakeKnowledgeSourceParams, + IndexedSharePointKnowledgeSourceParams, + KnowledgeBaseActivityRecord, + KnowledgeBaseAgenticReasoningActivityRecord, + KnowledgeBaseAzureBlobActivityArguments, + KnowledgeBaseAzureBlobActivityRecord, + KnowledgeBaseAzureBlobReference, + KnowledgeBaseErrorAdditionalInfo, + KnowledgeBaseErrorDetail, + KnowledgeBaseIndexedOneLakeActivityArguments, + KnowledgeBaseIndexedOneLakeActivityRecord, + KnowledgeBaseIndexedOneLakeReference, + KnowledgeBaseIndexedSharePointActivityArguments, + KnowledgeBaseIndexedSharePointActivityRecord, + KnowledgeBaseIndexedSharePointReference, + KnowledgeBaseMessage, + KnowledgeBaseMessageContent, + KnowledgeBaseMessageContentType, + KnowledgeBaseMessageImageContent, + KnowledgeBaseMessageImageContentImage, + KnowledgeBaseMessageTextContent, + KnowledgeBaseModelAnswerSynthesisActivityRecord, + KnowledgeBaseModelQueryPlanningActivityRecord, + KnowledgeBaseReference, + KnowledgeBaseRemoteSharePointActivityArguments, + KnowledgeBaseRemoteSharePointActivityRecord, + KnowledgeBaseRemoteSharePointReference, + KnowledgeBaseRetrievalActivityRecord, + KnowledgeBaseRetrievalRequest, + KnowledgeBaseRetrievalResponse, + KnowledgeBaseSearchIndexActivityArguments, + KnowledgeBaseSearchIndexActivityRecord, + KnowledgeBaseSearchIndexReference, + KnowledgeBaseWebActivityArguments, + KnowledgeBaseWebActivityRecord, + KnowledgeBaseWebReference, + KnowledgeRetrievalIntent, + KnowledgeRetrievalIntentType, + KnowledgeRetrievalLowReasoningEffort, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeRetrievalOutputMode, + KnowledgeRetrievalReasoningEffort, + KnowledgeRetrievalReasoningEffortKind, + KnowledgeRetrievalSemanticIntent, + KnowledgeSourceKind, + KnowledgeSourceParams, + RemoteSharePointKnowledgeSourceParams, + RequestOptions, + SearchIndexFieldReference, + SearchIndexKnowledgeSourceParams, + SharePointSensitivityLabelInfo, + WebKnowledgeSourceParams, +) + + +__all__ = ( + "AzureBlobKnowledgeSourceParams", + "ErrorAdditionalInfo", + "ErrorDetail", + "ErrorResponse", + "IndexedOneLakeKnowledgeSourceParams", + "IndexedSharePointKnowledgeSourceParams", + "KnowledgeBaseActivityRecord", + "KnowledgeBaseAgenticReasoningActivityRecord", + "KnowledgeBaseAzureBlobActivityArguments", + "KnowledgeBaseAzureBlobActivityRecord", + "KnowledgeBaseAzureBlobReference", + "KnowledgeBaseErrorAdditionalInfo", + "KnowledgeBaseErrorDetail", + "KnowledgeBaseIndexedOneLakeActivityArguments", + "KnowledgeBaseIndexedOneLakeActivityRecord", + "KnowledgeBaseIndexedOneLakeReference", + "KnowledgeBaseIndexedSharePointActivityArguments", + "KnowledgeBaseIndexedSharePointActivityRecord", + "KnowledgeBaseIndexedSharePointReference", + "KnowledgeBaseMessage", + "KnowledgeBaseMessageContent", + "KnowledgeBaseMessageContentType", + "KnowledgeBaseMessageImageContent", + "KnowledgeBaseMessageImageContentImage", + "KnowledgeBaseMessageTextContent", + "KnowledgeBaseModelAnswerSynthesisActivityRecord", + "KnowledgeBaseModelQueryPlanningActivityRecord", + "KnowledgeBaseReference", + "KnowledgeBaseRemoteSharePointActivityArguments", + "KnowledgeBaseRemoteSharePointActivityRecord", + "KnowledgeBaseRemoteSharePointReference", + "KnowledgeBaseRetrievalActivityRecord", + "KnowledgeBaseRetrievalRequest", + "KnowledgeBaseRetrievalResponse", + "KnowledgeBaseSearchIndexActivityArguments", + "KnowledgeBaseSearchIndexActivityRecord", + "KnowledgeBaseSearchIndexReference", + "KnowledgeBaseWebActivityArguments", + "KnowledgeBaseWebActivityRecord", + "KnowledgeBaseWebReference", + "KnowledgeRetrievalIntent", + "KnowledgeRetrievalIntentType", + "KnowledgeRetrievalLowReasoningEffort", + "KnowledgeRetrievalMediumReasoningEffort", + "KnowledgeRetrievalMinimalReasoningEffort", + "KnowledgeRetrievalOutputMode", + "KnowledgeRetrievalReasoningEffort", + "KnowledgeRetrievalReasoningEffortKind", + "KnowledgeRetrievalSemanticIntent", + "KnowledgeSourceKind", + "KnowledgeSourceParams", + "RemoteSharePointKnowledgeSourceParams", + "RequestOptions", + "SearchIndexFieldReference", + "SearchIndexKnowledgeSourceParams", + "SharePointSensitivityLabelInfo", + "WebKnowledgeSourceParams", +) diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py index 3c166e74983c..7c292e09e25e 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py @@ -18,7 +18,9 @@ class TestSearchBatchingClientAsync: @await_prepared_test async def test_search_indexing_buffered_sender_kwargs(self): - async with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, window=100) as client: + async with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, window=100 + ) as client: assert client._batch_action_count == 512 assert client._max_retries_per_action == 3 assert client._auto_flush_interval == 60 @@ -26,7 +28,9 @@ async def test_search_indexing_buffered_sender_kwargs(self): @await_prepared_test async def test_batch_queue(self): - async with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, auto_flush=False) as client: + async with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, auto_flush=False + ) as client: assert client._index_documents_batch await client.upload_documents(["upload1"]) await client.delete_documents(["delete1", "delete2"]) @@ -43,7 +47,9 @@ async def test_batch_queue(self): "azure.search.documents.aio._search_indexing_buffered_sender_async.SearchIndexingBufferedSender._process_if_needed" ) async def test_process_if_needed(self, mock_process_if_needed): - async with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL) as client: + async with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL + ) as client: await client.upload_documents(["upload1"]) await client.delete_documents(["delete1", "delete2"]) assert mock_process_if_needed.called @@ -53,7 +59,9 @@ async def test_process_if_needed(self, mock_process_if_needed): "azure.search.documents.aio._search_indexing_buffered_sender_async.SearchIndexingBufferedSender._cleanup" ) async def test_context_manager(self, mock_cleanup): - async with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, auto_flush=False) as client: + async with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, auto_flush=False + ) as client: await client.upload_documents(["upload1"]) await client.delete_documents(["delete1", "delete2"]) assert mock_cleanup.called @@ -72,7 +80,9 @@ async def test_flush(self): "_index_documents_actions", side_effect=HttpResponseError("Error"), ): - async with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, auto_flush=False) as client: + async with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, auto_flush=False + ) as client: client._index_key = "hotelId" await client.upload_documents([DOCUMENT]) await client.flush() diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_configuration_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_configuration_live_async.py new file mode 100644 index 000000000000..ab8981ba605c --- /dev/null +++ b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_configuration_live_async.py @@ -0,0 +1,163 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +import pytest + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, get_credential +from devtools_testutils.aio import recorded_by_proxy_async + +from azure.search.documents.indexes.aio import SearchIndexClient +from azure.search.documents.indexes.models import ( + KnowledgeBase, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeSourceReference, + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _AsyncTestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_source: WebKnowledgeSource, + base_name: str, + created_base: KnowledgeBase, + ) -> None: + self.index_client = index_client + self.source_name = source_name + self.created_source = created_source + self.base_name = base_name + self.created_base = created_base + + +class TestKnowledgeBaseConfigurationLiveAsync(AzureRecordedTestCase): + async def _create_context(self, endpoint: str) -> "_AsyncTestContext": + credential = get_credential(is_async=True) + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("cfgks") + create_source = WebKnowledgeSource( + name=source_name, + description="configuration source", + web_parameters=WebKnowledgeSourceParameters( + domains=WebKnowledgeSourceDomains( + allowed_domains=[ + WebKnowledgeSourceDomain( + address="https://learn.microsoft.com", + include_subpages=True, + ) + ] + ) + ), + ) + base_name = self.get_resource_name("cfgkb") + + # best-effort cleanup in case a previous run failed before teardown + try: + await index_client.delete_knowledge_base(base_name) + except HttpResponseError: + pass + try: + await index_client.delete_knowledge_source(source_name) + except HttpResponseError: + pass + + created_source = await index_client.create_knowledge_source(create_source) + + create_base = KnowledgeBase( + name=base_name, + description="configurable knowledge base", + knowledge_sources=[KnowledgeSourceReference(name=source_name)], + retrieval_reasoning_effort=KnowledgeRetrievalMinimalReasoningEffort(), + output_mode="extractiveData", + ) + + try: + created_base = await index_client.create_knowledge_base(create_base) + except HttpResponseError: + try: + await index_client.delete_knowledge_source(created_source) + except HttpResponseError: + pass + raise + + return _AsyncTestContext( + index_client, source_name, created_source, base_name, created_base + ) + + async def _cleanup(self, ctx: "_AsyncTestContext") -> None: + try: + try: + await ctx.index_client.delete_knowledge_base( + ctx.created_base, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + try: + await ctx.index_client.delete_knowledge_source( + ctx.created_source, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + await ctx.index_client.close() + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_knowledge_base_configuration_round_trip(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + created = ctx.created_base + assert isinstance( + created.retrieval_reasoning_effort, + KnowledgeRetrievalMinimalReasoningEffort, + ) + assert created.output_mode == "extractiveData" + assert created.retrieval_instructions is None + assert created.answer_instructions is None + + update_model = KnowledgeBase( + name=ctx.base_name, + description="config updated", + knowledge_sources=[KnowledgeSourceReference(name=ctx.source_name)], + retrieval_reasoning_effort=KnowledgeRetrievalMediumReasoningEffort(), + output_mode="answerSynthesis", + retrieval_instructions="summarize with details", + answer_instructions="include citations and summaries", + ) + update_model.e_tag = created.e_tag + + with pytest.raises(HttpResponseError) as ex: + await ctx.index_client.create_or_update_knowledge_base( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + + assert "Retrieval instructions cannot be specified" in str(ex.value) + + fetched = await ctx.index_client.get_knowledge_base(ctx.base_name) + assert isinstance( + fetched.retrieval_reasoning_effort, + KnowledgeRetrievalMinimalReasoningEffort, + ) + assert fetched.output_mode == "extractiveData" + assert fetched.retrieval_instructions is None + assert fetched.answer_instructions is None + finally: + await self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_live_async.py new file mode 100644 index 000000000000..40b7efa414b9 --- /dev/null +++ b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_base_live_async.py @@ -0,0 +1,236 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +import asyncio + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, get_credential +from devtools_testutils.aio import recorded_by_proxy_async + +from azure.search.documents.indexes.aio import SearchIndexClient +from azure.search.documents.indexes.models import ( + KnowledgeBase, + KnowledgeSourceReference, + SearchServiceStatistics, + ServiceIndexersRuntime, + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _AsyncTestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_source: WebKnowledgeSource, + base_name: str, + created_base: KnowledgeBase, + ) -> None: + self.index_client = index_client + self.source_name = source_name + self.created_source = created_source + self.base_name = base_name + self.created_base = created_base + + +class TestKnowledgeBaseLiveAsync(AzureRecordedTestCase): + async def _create_context(self, endpoint: str) -> "_AsyncTestContext": + credential = get_credential(is_async=True) + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("ksrc") + base_name = self.get_resource_name("kb") + create_source = WebKnowledgeSource( + name=source_name, + description="knowledge base dependent source", + web_parameters=WebKnowledgeSourceParameters( + domains=WebKnowledgeSourceDomains( + allowed_domains=[ + WebKnowledgeSourceDomain( + address="https://learn.microsoft.com", + include_subpages=True, + ) + ] + ) + ), + ) + created_source = await index_client.create_knowledge_source(create_source) + + create_base = KnowledgeBase( + name=base_name, + description="initial knowledge base", + knowledge_sources=[KnowledgeSourceReference(name=source_name)], + ) + created_base = await index_client.create_knowledge_base(create_base) + return _AsyncTestContext( + index_client, source_name, created_source, base_name, created_base + ) + + async def _cleanup(self, ctx: "_AsyncTestContext") -> None: + try: + try: + await ctx.index_client.delete_knowledge_base( + ctx.created_base, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + try: + await ctx.index_client.delete_knowledge_source( + ctx.created_source, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + await ctx.index_client.close() + + async def _poll_status_snapshots( + self, + ctx: "_AsyncTestContext", + *, + wait_for: str = "active", + interval: float = 5.0, + attempts: int = 36, + ): + snapshots = [] + for _ in range(attempts): + status = await ctx.index_client.get_knowledge_source_status(ctx.source_name) + snapshots.append(status) + if status.synchronization_status == wait_for: + return snapshots + await asyncio.sleep(interval) + return snapshots + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_knowledge_base_create(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + assert ctx.created_base.name == ctx.base_name + assert ctx.created_base.knowledge_sources + assert ctx.created_base.knowledge_sources[0].name == ctx.source_name + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_knowledge_base_update(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + update_model = KnowledgeBase( + name=ctx.base_name, + description="updated knowledge base description", + knowledge_sources=[KnowledgeSourceReference(name=ctx.source_name)], + ) + update_model.e_tag = ctx.created_base.e_tag + + revised = await ctx.index_client.create_or_update_knowledge_base( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + ctx.created_base = revised + assert revised.description == "updated knowledge base description" + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_knowledge_base_read(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + fetched = await ctx.index_client.get_knowledge_base(ctx.base_name) + listed = [item async for item in ctx.index_client.list_knowledge_bases()] + + assert fetched.name == ctx.base_name + assert ( + fetched.knowledge_sources + and fetched.knowledge_sources[0].name == ctx.source_name + ) + assert any(item.name == ctx.base_name for item in listed) + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_knowledge_base_delete(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + await ctx.index_client.delete_knowledge_base( + ctx.created_base, + match_condition=MatchConditions.IfNotModified, + ) + remaining = [item async for item in ctx.index_client.list_knowledge_bases()] + assert all(item.name != ctx.base_name for item in remaining) + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_knowledge_source_status_tracking(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + snapshots = await self._poll_status_snapshots(ctx) + assert snapshots, "Expected at least one status snapshot" + + first = snapshots[0] + last = snapshots[-1] + assert first.synchronization_status in {"creating", "active"} + assert last.synchronization_status == "active" + + if last.statistics is not None: + assert last.statistics.total_synchronization >= 0 + assert last.statistics.average_items_processed_per_synchronization >= 0 + if last.current_synchronization_state is not None: + assert last.current_synchronization_state.items_updates_processed >= 0 + if last.last_synchronization_state is not None: + assert last.last_synchronization_state.items_updates_processed >= 0 + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_service_indexer_runtime_statistics(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + snapshots = await self._poll_status_snapshots(ctx) + assert snapshots, "Expected at least one status snapshot" + + service_stats = ( + await ctx.index_client._client.get_service_statistics() + ) # pylint:disable=protected-access + assert isinstance(service_stats, SearchServiceStatistics) + + runtime = service_stats.indexers_runtime + assert isinstance(runtime, ServiceIndexersRuntime) + assert runtime.used_seconds >= -1 + assert runtime.beginning_time <= runtime.ending_time + if runtime.remaining_seconds is not None: + assert runtime.remaining_seconds >= -1 + + counters = service_stats.counters + assert counters.indexer_counter is not None + assert counters.indexer_counter.usage >= 0 + assert counters.indexer_counter.quota >= counters.indexer_counter.usage + + limits = service_stats.limits + if limits.max_cumulative_indexer_runtime_seconds is not None: + assert limits.max_cumulative_indexer_runtime_seconds > 0 + finally: + await self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_remote_sharepoint_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_remote_sharepoint_live_async.py new file mode 100644 index 000000000000..a5c678e07822 --- /dev/null +++ b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_remote_sharepoint_live_async.py @@ -0,0 +1,138 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, get_credential +from devtools_testutils.aio import recorded_by_proxy_async + +from azure.search.documents.indexes.aio import SearchIndexClient +from azure.search.documents.indexes.models import ( + RemoteSharePointKnowledgeSource, + RemoteSharePointKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _AsyncTestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_revision: RemoteSharePointKnowledgeSource, + ) -> None: + self.index_client = index_client + self.source_name = source_name + self.created_revision = created_revision + + +class TestRemoteSharePointKnowledgeSourceLiveAsync(AzureRecordedTestCase): + async def _create_context(self, endpoint: str) -> "_AsyncTestContext": + credential = get_credential(is_async=True) + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("spsource") + create_model = RemoteSharePointKnowledgeSource( + name=source_name, + description="initial sharepoint source", + remote_share_point_parameters=RemoteSharePointKnowledgeSourceParameters( + filter_expression="Title:Test", + resource_metadata=["Title", "Path"], + ), + ) + created = await index_client.create_knowledge_source(create_model) + return _AsyncTestContext(index_client, source_name, created) + + async def _cleanup(self, ctx: "_AsyncTestContext") -> None: + try: + try: + await ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + await ctx.index_client.close() + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_remote_sharepoint_knowledge_source_create( + self, endpoint: str + ) -> None: + ctx = await self._create_context(endpoint) + try: + assert ctx.created_revision.name == ctx.source_name + assert ctx.created_revision.kind == "remoteSharePoint" + params = ctx.created_revision.remote_share_point_parameters + assert params is not None + assert params.filter_expression == "Title:Test" + assert params.resource_metadata is not None + assert {"Title", "Path"}.issubset(set(params.resource_metadata)) + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_remote_sharepoint_knowledge_source_update( + self, endpoint: str + ) -> None: + ctx = await self._create_context(endpoint) + try: + update_model = RemoteSharePointKnowledgeSource( + name=ctx.source_name, + description="updated description", + remote_share_point_parameters=ctx.created_revision.remote_share_point_parameters, + ) + update_model.e_tag = ctx.created_revision.e_tag + + revised = await ctx.index_client.create_or_update_knowledge_source( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + ctx.created_revision = revised + assert revised.description == "updated description" + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_remote_sharepoint_knowledge_source_read(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + fetched = await ctx.index_client.get_knowledge_source(ctx.source_name) + status = await ctx.index_client.get_knowledge_source_status(ctx.source_name) + listed = [item async for item in ctx.index_client.list_knowledge_sources()] + + assert fetched.name == ctx.source_name + assert status.synchronization_status in {"creating", "active", "deleting"} + assert any(item.name == ctx.source_name for item in listed) + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_remote_sharepoint_knowledge_source_delete( + self, endpoint: str + ) -> None: + ctx = await self._create_context(endpoint) + try: + await ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + remaining = [ + item async for item in ctx.index_client.list_knowledge_sources() + ] + assert all(item.name != ctx.source_name for item in remaining) + finally: + await ctx.index_client.close() diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_web_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_web_live_async.py new file mode 100644 index 000000000000..840718b35a9c --- /dev/null +++ b/sdk/search/azure-search-documents/tests/async_tests/test_knowledge_source_web_live_async.py @@ -0,0 +1,139 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, get_credential +from devtools_testutils.aio import recorded_by_proxy_async + +from azure.search.documents.indexes.aio import SearchIndexClient +from azure.search.documents.indexes.models import ( + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _AsyncTestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_revision: WebKnowledgeSource, + ): + self.index_client = index_client + self.source_name = source_name + self.created_revision = created_revision + + +class TestWebKnowledgeSourceLiveAsync(AzureRecordedTestCase): + async def _create_context(self, endpoint: str) -> "_AsyncTestContext": + credential = get_credential(is_async=True) + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("websource") + create_model = WebKnowledgeSource( + name=source_name, + description="initial web source", + web_parameters=WebKnowledgeSourceParameters( + domains=WebKnowledgeSourceDomains( + allowed_domains=[ + WebKnowledgeSourceDomain( + address="https://learn.microsoft.com", + include_subpages=True, + ) + ] + ) + ), + ) + created = await index_client.create_knowledge_source(create_model) + return _AsyncTestContext(index_client, source_name, created) + + async def _cleanup(self, ctx: "_AsyncTestContext") -> None: + try: + try: + await ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + await ctx.index_client.close() + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_web_knowledge_source_create(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + assert ctx.created_revision.name == ctx.source_name + assert ctx.created_revision.kind == "web" + assert ctx.created_revision.web_parameters is not None + domains = ctx.created_revision.web_parameters.domains + assert domains is not None and domains.allowed_domains is not None + assert domains.allowed_domains[0].address == "https://learn.microsoft.com" + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_web_knowledge_source_update(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + update_model = WebKnowledgeSource( + name=ctx.source_name, + description="updated description", + web_parameters=ctx.created_revision.web_parameters, + ) + update_model.e_tag = ctx.created_revision.e_tag + + revised = await ctx.index_client.create_or_update_knowledge_source( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + ctx.created_revision = revised + assert revised.description == "updated description" + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_web_knowledge_source_read(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + fetched = await ctx.index_client.get_knowledge_source(ctx.source_name) + status = await ctx.index_client.get_knowledge_source_status(ctx.source_name) + listed = [item async for item in ctx.index_client.list_knowledge_sources()] + + assert fetched.name == ctx.source_name + assert status.synchronization_status in {"creating", "active", "deleting"} + assert any(item.name == ctx.source_name for item in listed) + finally: + await self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy_async + async def test_web_knowledge_source_delete(self, endpoint: str) -> None: + ctx = await self._create_context(endpoint) + try: + await ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + remaining = [ + item async for item in ctx.index_client.list_knowledge_sources() + ] + assert all(item.name != ctx.source_name for item in remaining) + finally: + await ctx.index_client.close() diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py index c41691889a96..39fb7f32a4ed 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py @@ -4,7 +4,11 @@ # ------------------------------------ from unittest import mock from azure.core.credentials import AzureKeyCredential -from azure.search.documents._generated.models import SearchDocumentsResult, SearchResult +from azure.search.documents._generated.models import ( + FacetResult, + SearchDocumentsResult, + SearchResult, +) from azure.search.documents.aio import SearchClient from azure.search.documents.aio._search_client_async import AsyncSearchPageIterator from test_search_index_client_async import await_prepared_test @@ -28,3 +32,60 @@ async def test_get_count_reset_continuation_token(self, mock_search_post): result._first_page_iterator_instance.continuation_token = "fake token" await result.get_count() assert not result._first_page_iterator_instance.continuation_token + + @await_prepared_test + @mock.patch( + "azure.search.documents._generated.aio.operations._documents_operations.DocumentsOperations.search_post" + ) + async def test_search_enable_elevated_read(self, mock_search_post): + client = SearchClient("endpoint", "index name", CREDENTIAL) + result = await client.search( + search_text="search text", + x_ms_enable_elevated_read=True, + x_ms_query_source_authorization="aad:fake-user", + ) + search_result = SearchDocumentsResult() + search_result.results = [SearchResult(additional_properties={"key": "val"})] + mock_search_post.return_value = search_result + await result.__anext__() + + assert mock_search_post.called + assert mock_search_post.call_args[1]["x_ms_enable_elevated_read"] is True + assert ( + mock_search_post.call_args[1]["x_ms_query_source_authorization"] + == "aad:fake-user" + ) + + @await_prepared_test + @mock.patch( + "azure.search.documents._generated.aio.operations._documents_operations.DocumentsOperations.search_post" + ) + async def test_get_facets_with_aggregations(self, mock_search_post): + client = SearchClient("endpoint", "index name", CREDENTIAL) + result = await client.search(search_text="*") + + search_result = SearchDocumentsResult() + search_result.results = [SearchResult(additional_properties={"id": "1"})] + + facet_bucket = FacetResult() + facet_bucket.count = 4 + facet_bucket.avg = 120.5 + facet_bucket.min = 75.0 + facet_bucket.max = 240.0 + facet_bucket.cardinality = 3 + + search_result.facets = {"baseRate": [facet_bucket]} + mock_search_post.return_value = search_result + + await result.__anext__() + facets = await result.get_facets() + + assert facets is not None + assert "baseRate" in facets + assert len(facets["baseRate"]) == 1 + bucket = facets["baseRate"][0] + assert bucket["count"] == 4 + assert bucket["avg"] == 120.5 + assert bucket["min"] == 75.0 + assert bucket["max"] == 240.0 + assert bucket["cardinality"] == 3 diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_buffered_sender_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_buffered_sender_live_async.py index 05ad3b2a6bdd..cddb9b1e30ef 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_buffered_sender_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_buffered_sender_live_async.py @@ -21,7 +21,9 @@ class TestSearchIndexingBufferedSenderAsync(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy_async async def test_search_client_index_buffered_sender(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(is_async=True), retry_backoff_factor=60 + ) batch_client = SearchIndexingBufferedSender( endpoint, index_name, get_credential(is_async=True), retry_backoff_factor=60 ) @@ -29,13 +31,27 @@ async def test_search_client_index_buffered_sender(self, endpoint, index_name): async with client: async with batch_client: doc_count = 10 - doc_count = await self._test_upload_documents_new(client, batch_client, doc_count) - doc_count = await self._test_upload_documents_existing(client, batch_client, doc_count) - doc_count = await self._test_delete_documents_existing(client, batch_client, doc_count) - doc_count = await self._test_delete_documents_missing(client, batch_client, doc_count) - doc_count = await self._test_merge_documents_existing(client, batch_client, doc_count) - doc_count = await self._test_merge_documents_missing(client, batch_client, doc_count) - doc_count = await self._test_merge_or_upload_documents(client, batch_client, doc_count) + doc_count = await self._test_upload_documents_new( + client, batch_client, doc_count + ) + doc_count = await self._test_upload_documents_existing( + client, batch_client, doc_count + ) + doc_count = await self._test_delete_documents_existing( + client, batch_client, doc_count + ) + doc_count = await self._test_delete_documents_missing( + client, batch_client, doc_count + ) + doc_count = await self._test_merge_documents_existing( + client, batch_client, doc_count + ) + doc_count = await self._test_merge_documents_missing( + client, batch_client, doc_count + ) + doc_count = await self._test_merge_or_upload_documents( + client, batch_client, doc_count + ) finally: await batch_client.close() diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_index_document_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_index_document_live_async.py index c021c11c6fb8..4af2cc9698de 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_index_document_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_index_document_live_async.py @@ -21,7 +21,9 @@ class TestSearchClientDocumentsAsync(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy_async async def test_search_client_index_document(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(is_async=True), retry_backoff_factor=60 + ) doc_count = 10 async with client: doc_count = await self._test_upload_documents_new(client, doc_count) diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_search_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_search_live_async.py index e7e31c666197..8ac745e79649 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_search_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_search_live_async.py @@ -18,7 +18,9 @@ class TestClientTestAsync(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy_async async def test_search_client(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(is_async=True), retry_backoff_factor=60 + ) async with client: await self._test_get_search_simple(client) await self._test_get_search_simple_with_top(client) @@ -28,6 +30,7 @@ async def test_search_client(self, endpoint, index_name): await self._test_get_search_coverage(client) await self._test_get_search_facets_none(client) await self._test_get_search_facets_result(client) + await self._test_get_search_facet_metrics(client) await self._test_autocomplete(client) await self._test_suggest(client) @@ -63,7 +66,9 @@ async def _test_get_search_filter(self, client): order_by="hotelName desc", ): results.append(x) - assert [x["hotelName"] for x in results] == sorted([x["hotelName"] for x in results], reverse=True) + assert [x["hotelName"] for x in results] == sorted( + [x["hotelName"] for x in results], reverse=True + ) expected = { "category", "hotelName", @@ -88,7 +93,9 @@ async def _test_get_search_filter_array(self, client): order_by="hotelName desc", ): results.append(x) - assert [x["hotelName"] for x in results] == sorted([x["hotelName"] for x in results], reverse=True) + assert [x["hotelName"] for x in results] == sorted( + [x["hotelName"] for x in results], reverse=True + ) expected = { "category", "hotelName", @@ -126,7 +133,9 @@ async def _test_get_search_facets_none(self, client): async def _test_get_search_facets_result(self, client): select = ("hotelName", "category", "description") - results = await client.search(search_text="WiFi", facets=["category"], select=",".join(select)) + results = await client.search( + search_text="WiFi", facets=["category"], select=",".join(select) + ) assert await results.get_facets() == { "category": [ {"value": "Budget", "count": 4}, @@ -134,6 +143,42 @@ async def _test_get_search_facets_result(self, client): ] } + async def _test_get_search_facet_metrics(self, client): + facets = [ + "rooms/baseRate,metric:sum", + "rooms/baseRate,metric:avg", + "rooms/baseRate,metric:min", + "rooms/baseRate,metric:max,default:0", + "rooms/sleepsCount,metric:cardinality,precisionThreshold:10", + ] + results = await client.search(search_text="*", facets=facets) + facet_payload = await results.get_facets() + assert facet_payload is not None + + base_rate_metrics = facet_payload.get("rooms/baseRate", []) + assert len(base_rate_metrics) == 4 + + observed_metrics = {} + for bucket in base_rate_metrics: + for metric in ("sum", "avg", "min", "max"): + value = bucket.get(metric) + if value is not None: + observed_metrics[metric] = value + + expected_metrics = { + "sum": 27.91, + "avg": 6.9775, + "min": 2.44, + "max": 9.69, + } + for metric, expected in expected_metrics.items(): + assert metric in observed_metrics + assert observed_metrics[metric] == pytest.approx(expected, abs=0.001) + + sleeps_metrics = facet_payload.get("rooms/sleepsCount", []) + assert len(sleeps_metrics) == 1 + assert sleeps_metrics[0].get("cardinality") == 1 + async def _test_autocomplete(self, client): results = await client.autocomplete(search_text="mot", suggester_name="sg") assert results == [{"text": "motel", "query_plus_text": "motel"}] diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_alias_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_alias_live_async.py index 6fdfe85ae2ba..0ebf267545f5 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_alias_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_alias_live_async.py @@ -30,7 +30,9 @@ class TestSearchClientAlias(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy_async async def test_alias(self, endpoint): - client = SearchIndexClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchIndexClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) aliases = ["resort", "motel"] async with client: @@ -42,7 +44,9 @@ async def test_alias(self, endpoint): # point an old alias to a new index new_index_name = "hotel" - await self._test_update_alias_to_new_index(client, aliases[1], new_index_name, index_name) + await self._test_update_alias_to_new_index( + client, aliases[1], new_index_name, index_name + ) await self._test_get_alias(client, aliases) @@ -66,7 +70,9 @@ async def _test_create_or_update_alias(self, client, alias_name, index_name): assert result.name == alias_name assert set(result.indexes) == {index_name} - async def _test_update_alias_to_new_index(self, client, alias_name, new_index, old_index): + async def _test_update_alias_to_new_index( + self, client, alias_name, new_index, old_index + ): await self._create_index(client, new_index) alias = SearchAlias(name=alias_name, indexes=[new_index]) result = await client.create_or_update_alias(alias) diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_async.py index 473555cf29a1..e70080fc9f43 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_async.py @@ -23,8 +23,23 @@ def await_prepared_test(test_fn): @functools.wraps(test_fn) def run(test_class_instance, *args, **kwargs): trim_kwargs_from_test_function(test_fn, kwargs) - loop = asyncio.get_event_loop() - return loop.run_until_complete(test_fn(test_class_instance, **kwargs)) + + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + owns_loop = True + else: + owns_loop = False + + try: + return loop.run_until_complete(test_fn(test_class_instance, **kwargs)) + finally: + if owns_loop: + loop.run_until_complete(loop.shutdown_asyncgens()) + loop.close() + asyncio.set_event_loop(None) return run @@ -58,7 +73,9 @@ def test_get_search_client(self): def test_get_search_client_inherit_api_version(self): credential = AzureKeyCredential(key="old_api_key") - client = SearchIndexClient("endpoint", credential, api_version=ApiVersion.V2020_06_30) + client = SearchIndexClient( + "endpoint", credential, api_version=ApiVersion.V2020_06_30 + ) search_client = client.get_search_client("index") assert isinstance(search_client, SearchClient) assert search_client._api_version == ApiVersion.V2020_06_30 diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_data_source_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_data_source_live_async.py index e5d15a396ff5..a05064f20d4a 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_data_source_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_data_source_live_async.py @@ -31,26 +31,34 @@ def _create_data_source_connection(self, cs, name): @recorded_by_proxy_async async def test_data_source(self, endpoint, **kwargs): storage_cs = kwargs.get("search_storage_connection_string") - client = SearchIndexerClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchIndexerClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) async with client: await self._test_create_datasource(client, storage_cs) await self._test_delete_datasource(client, storage_cs) await self._test_get_datasource(client, storage_cs) await self._test_list_datasources(client, storage_cs) await self._test_create_or_update_datasource(client, storage_cs) - await self._test_create_or_update_datasource_if_unchanged(client, storage_cs) + await self._test_create_or_update_datasource_if_unchanged( + client, storage_cs + ) await self._test_delete_datasource_if_unchanged(client, storage_cs) async def _test_create_datasource(self, client, storage_cs): ds_name = "create" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) result = await client.create_data_source_connection(data_source_connection) assert result.name == ds_name assert result.type == "azureblob" async def _test_delete_datasource(self, client, storage_cs): ds_name = "delete" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) await client.create_data_source_connection(data_source_connection) expected_count = len(await client.get_data_source_connections()) - 1 await client.delete_data_source_connection(ds_name) @@ -58,23 +66,33 @@ async def _test_delete_datasource(self, client, storage_cs): async def _test_get_datasource(self, client, storage_cs): ds_name = "get" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) await client.create_data_source_connection(data_source_connection) result = await client.get_data_source_connection(ds_name) assert result.name == ds_name async def _test_list_datasources(self, client, storage_cs): - data_source_connection1 = self._create_data_source_connection(storage_cs, "list") - data_source_connection2 = self._create_data_source_connection(storage_cs, "list2") + data_source_connection1 = self._create_data_source_connection( + storage_cs, "list" + ) + data_source_connection2 = self._create_data_source_connection( + storage_cs, "list2" + ) await client.create_data_source_connection(data_source_connection1) await client.create_data_source_connection(data_source_connection2) result = await client.get_data_source_connections() assert isinstance(result, list) - assert set(x.name for x in result).intersection(set(["list", "list2"])) == set(["list", "list2"]) + assert set(x.name for x in result).intersection(set(["list", "list2"])) == set( + ["list", "list2"] + ) async def _test_create_or_update_datasource(self, client, storage_cs): ds_name = "cou" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) await client.create_data_source_connection(data_source_connection) expected_count = len(await client.get_data_source_connections()) data_source_connection.description = "updated" @@ -86,7 +104,9 @@ async def _test_create_or_update_datasource(self, client, storage_cs): async def _test_create_or_update_datasource_if_unchanged(self, client, storage_cs): ds_name = "couunch" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) created = await client.create_data_source_connection(data_source_connection) etag = created.e_tag @@ -95,7 +115,9 @@ async def _test_create_or_update_datasource_if_unchanged(self, client, storage_c await client.create_or_update_data_source_connection(data_source_connection) # prepare data source connection - data_source_connection.e_tag = etag # reset to the original data source connection + data_source_connection.e_tag = ( + etag # reset to the original data source connection + ) data_source_connection.description = "changed" with pytest.raises(HttpResponseError): await client.create_or_update_data_source_connection( @@ -104,7 +126,9 @@ async def _test_create_or_update_datasource_if_unchanged(self, client, storage_c async def _test_delete_datasource_if_unchanged(self, client, storage_cs): ds_name = "delunch" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) created = await client.create_data_source_connection(data_source_connection) etag = created.e_tag @@ -113,7 +137,9 @@ async def _test_delete_datasource_if_unchanged(self, client, storage_cs): await client.create_or_update_data_source_connection(data_source_connection) # prepare data source connection - data_source_connection.e_tag = etag # reset to the original data source connection + data_source_connection.e_tag = ( + etag # reset to the original data source connection + ) with pytest.raises(HttpResponseError): await client.delete_data_source_connection( data_source_connection, match_condition=MatchConditions.IfNotModified diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_live_async.py index 8afea2e16425..ba77658de89b 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_live_async.py @@ -1,3 +1,5 @@ +from datetime import timedelta + # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for @@ -15,7 +17,11 @@ from azure.search.documents.indexes.models import ( AnalyzeTextOptions, CorsOptions, + FreshnessScoringFunction, + FreshnessScoringParameters, + SearchField, SearchIndex, + ScoringFunctionAggregation, ScoringProfile, SimpleField, SearchFieldDataType, @@ -27,7 +33,9 @@ class TestSearchIndexClientAsync(AzureRecordedTestCase): @search_decorator(schema=None, index_batch=None) @recorded_by_proxy_async async def test_search_index_client(self, endpoint, index_name): - client = SearchIndexClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchIndexClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) index_name = "hotels" async with client: await self._test_get_service_statistics(client) @@ -117,7 +125,9 @@ async def _test_delete_indexes_if_unchanged(self, client): index.e_tag = etag with pytest.raises(HttpResponseError): - await client.delete_index(index, match_condition=MatchConditions.IfNotModified) + await client.delete_index( + index, match_condition=MatchConditions.IfNotModified + ) async def _test_create_or_update_index(self, client): name = "hotels-cou" @@ -177,10 +187,14 @@ async def _test_create_or_update_indexes_if_unchanged(self, client): index.e_tag = etag with pytest.raises(HttpResponseError): - await client.create_or_update_index(index, match_condition=MatchConditions.IfNotModified) + await client.create_or_update_index( + index, match_condition=MatchConditions.IfNotModified + ) async def _test_analyze_text(self, client, index_name): - analyze_request = AnalyzeTextOptions(text="One's ", analyzer_name="standard.lucene") + analyze_request = AnalyzeTextOptions( + text="One's ", analyzer_name="standard.lucene" + ) result = await client.analyze_text(index_name, analyze_request) assert len(result.tokens) == 2 @@ -188,3 +202,128 @@ async def _test_delete_indexes(self, client): result = client.list_indexes() async for index in result: await client.delete_index(index.name) + + @SearchEnvVarPreparer() + @recorded_by_proxy_async + async def test_purview_enabled_index( + self, search_service_endpoint, search_service_name + ): + del search_service_name # unused + endpoint = search_service_endpoint + client = SearchIndexClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) + + index_name = self.get_resource_name("purview-index") + fields = [ + SearchField( + name="id", + type=SearchFieldDataType.String, + key=True, + filterable=True, + sortable=True, + ), + SearchField( + name="sensitivityLabel", + type=SearchFieldDataType.String, + filterable=True, + sensitivity_label=True, + ), + ] + index = SearchIndex(name=index_name, fields=fields, purview_enabled=True) + + async with client: + created = await client.create_index(index) + try: + assert created.purview_enabled is True + for field in created.fields: + if field.name == "sensitivityLabel": + assert field.sensitivity_label is True + break + else: + raise AssertionError( + "Expected sensitivityLabel field to be present" + ) + + fetched = await client.get_index(index_name) + assert fetched.purview_enabled is True + for field in fetched.fields: + if field.name == "sensitivityLabel": + assert field.sensitivity_label is True + break + else: + raise AssertionError( + "Expected sensitivityLabel field to be present" + ) + finally: + try: + await client.delete_index(index_name) + except HttpResponseError: + pass + + @SearchEnvVarPreparer() + @recorded_by_proxy_async + async def test_scoring_profile_product_aggregation( + self, search_service_endpoint, search_service_name + ): + del search_service_name # unused + endpoint = search_service_endpoint + client = SearchIndexClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) + + index_name = self.get_resource_name("agg-product") + fields = [ + SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True), + SimpleField( + name="lastUpdated", + type=SearchFieldDataType.DateTimeOffset, + filterable=True, + ), + ] + scoring_profile = ScoringProfile( + name="product-score", + function_aggregation=ScoringFunctionAggregation.PRODUCT, + functions=[ + FreshnessScoringFunction( + field_name="lastUpdated", + boost=2.5, + parameters=FreshnessScoringParameters( + boosting_duration=timedelta(days=7) + ), + ) + ], + ) + index = SearchIndex( + name=index_name, fields=fields, scoring_profiles=[scoring_profile] + ) + + async with client: + created = await client.create_index(index) + try: + assert ( + created.scoring_profiles[0].function_aggregation + == ScoringFunctionAggregation.PRODUCT + ) + + fetched = await client.get_index(index_name) + assert ( + fetched.scoring_profiles[0].function_aggregation + == ScoringFunctionAggregation.PRODUCT + ) + + fetched.scoring_profiles[0].function_aggregation = ( + ScoringFunctionAggregation.SUM + ) + await client.create_or_update_index(index=fetched) + + updated = await client.get_index(index_name) + assert ( + updated.scoring_profiles[0].function_aggregation + == ScoringFunctionAggregation.SUM + ) + finally: + try: + await client.delete_index(index_name) + except HttpResponseError: + pass diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_skillset_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_skillset_live_async.py index 97308ba3ad33..b9e19a8a45dd 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_skillset_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_skillset_live_async.py @@ -29,7 +29,9 @@ class TestSearchClientSkillsets(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy_async async def test_skillset_crud(self, endpoint): - client = SearchIndexerClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchIndexerClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) async with client: await self._test_create_skillset(client) await self._test_get_skillset(client) @@ -45,7 +47,11 @@ async def _test_create_skillset(self, client): s1 = EntityRecognitionSkill( name="skill1", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizationsS1")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizationsS1" + ) + ], description="Skill Version 1", model_version="1", include_typeless_entities=True, @@ -54,7 +60,11 @@ async def _test_create_skillset(self, client): s2 = EntityRecognitionSkill( name="skill2", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizationsS2")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizationsS2" + ) + ], skill_version=EntityRecognitionSkillVersion.LATEST, description="Skill Version 3", model_version="3", @@ -72,7 +82,9 @@ async def _test_create_skillset(self, client): s4 = SentimentSkill( name="skill4", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="confidenceScores", target_name="scoreS4")], + outputs=[ + OutputFieldMappingEntry(name="confidenceScores", target_name="scoreS4") + ], skill_version=SentimentSkillVersion.V3, description="Sentiment V3", include_opinion_mining=True, @@ -81,11 +93,15 @@ async def _test_create_skillset(self, client): s5 = EntityLinkingSkill( name="skill5", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="entities", target_name="entitiesS5")], + outputs=[ + OutputFieldMappingEntry(name="entities", target_name="entitiesS5") + ], minimum_precision=0.5, ) - skillset = SearchIndexerSkillset(name=name, skills=list([s1, s2, s3, s4, s5]), description="desc") + skillset = SearchIndexerSkillset( + name=name, skills=list([s1, s2, s3, s4, s5]), description="desc" + ) result = await client.create_skillset(skillset) assert isinstance(result, SearchIndexerSkillset) @@ -111,9 +127,15 @@ async def _test_get_skillset(self, client): name = "test-ss-get" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], + ) + skillset = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc" ) - skillset = SearchIndexerSkillset(name=name, skills=list([s]), description="desc") await client.create_skillset(skillset) result = await client.get_skillset(name) assert isinstance(result, SearchIndexerSkillset) @@ -128,29 +150,47 @@ async def _test_get_skillsets(self, client): name2 = "test-ss-list-2" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name1, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name1, skills=list([s]), description="desc1" + ) await client.create_skillset(skillset1) - skillset2 = SearchIndexerSkillset(name=name2, skills=list([s]), description="desc2") + skillset2 = SearchIndexerSkillset( + name=name2, skills=list([s]), description="desc2" + ) await client.create_skillset(skillset2) result = await client.get_skillsets() assert isinstance(result, list) assert all(isinstance(x, SearchIndexerSkillset) for x in result) - assert set(x.name for x in result).intersection([name1, name2]) == set([name1, name2]) + assert set(x.name for x in result).intersection([name1, name2]) == set( + [name1, name2] + ) async def _test_create_or_update_skillset(self, client): name = "test-ss-create-or-update" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc1" + ) await client.create_or_update_skillset(skillset1) expected_count = len(await client.get_skillsets()) - skillset2 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc2") + skillset2 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc2" + ) await client.create_or_update_skillset(skillset2) assert len(await client.get_skillsets()) == expected_count @@ -163,13 +203,21 @@ async def _test_create_or_update_skillset_inplace(self, client): name = "test-ss-create-or-update-inplace" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc1" + ) ss = await client.create_or_update_skillset(skillset1) expected_count = len(await client.get_skillsets()) - skillset2 = SearchIndexerSkillset(name=name, skills=[s], description="desc2", skillset=ss) + skillset2 = SearchIndexerSkillset( + name=name, skills=[s], description="desc2", skillset=ss + ) await client.create_or_update_skillset(skillset2) assert len(await client.get_skillsets()) == expected_count @@ -182,34 +230,52 @@ async def _test_create_or_update_skillset_if_unchanged(self, client): name = "test-ss-create-or-update-unchanged" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc1" + ) ss = await client.create_or_update_skillset(skillset1) ss.e_tag = "changed_etag" with pytest.raises(HttpResponseError): - await client.create_or_update_skillset(ss, match_condition=MatchConditions.IfNotModified) + await client.create_or_update_skillset( + ss, match_condition=MatchConditions.IfNotModified + ) async def _test_delete_skillset_if_unchanged(self, client): name = "test-ss-deleted-unchanged" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset = SearchIndexerSkillset(name=name, skills=list([s]), description="desc") + skillset = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc" + ) result = await client.create_skillset(skillset) etag = result.e_tag - skillset1 = SearchIndexerSkillset(name=name, skills=list([s]), description="updated") + skillset1 = SearchIndexerSkillset( + name=name, skills=list([s]), description="updated" + ) updated = await client.create_or_update_skillset(skillset1) updated.e_tag = etag with pytest.raises(HttpResponseError): - await client.delete_skillset(updated, match_condition=MatchConditions.IfNotModified) + await client.delete_skillset( + updated, match_condition=MatchConditions.IfNotModified + ) async def _test_delete_skillset(self, client): result = await client.get_skillset_names() diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_synonym_map_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_synonym_map_live_async.py index 41a92e92f91c..044ff088f61d 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_synonym_map_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_index_client_synonym_map_live_async.py @@ -21,7 +21,9 @@ class TestSearchClientSynonymMaps(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy_async async def test_synonym_map(self, endpoint): - client = SearchIndexClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchIndexClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) async with client: await self._test_create_synonym_map(client) await self._test_delete_synonym_map(client) @@ -79,7 +81,9 @@ async def _test_delete_synonym_map_if_unchanged(self, client): result.e_tag = etag with pytest.raises(HttpResponseError): - await client.delete_synonym_map(result, match_condition=MatchConditions.IfNotModified) + await client.delete_synonym_map( + result, match_condition=MatchConditions.IfNotModified + ) await client.delete_synonym_map(name) async def _test_get_synonym_map(self, client): diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_indexer_client_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_indexer_client_live_async.py index 31cc3aa6f6a5..fc72833f6a34 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_indexer_client_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_indexer_client_live_async.py @@ -28,22 +28,48 @@ class TestSearchIndexerClientTestAsync(AzureRecordedTestCase): async def test_search_indexers(self, endpoint, **kwargs): storage_cs = kwargs.get("search_storage_connection_string") container_name = kwargs.get("search_storage_container_name") - client = SearchIndexerClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) - index_client = SearchIndexClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) + client = SearchIndexerClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) + index_client = SearchIndexClient( + endpoint, get_credential(is_async=True), retry_backoff_factor=60 + ) async with client: async with index_client: - await self._test_create_indexer(client, index_client, storage_cs, container_name) - await self._test_delete_indexer(client, index_client, storage_cs, container_name) - await self._test_get_indexer(client, index_client, storage_cs, container_name) - await self._test_list_indexer(client, index_client, storage_cs, container_name) - await self._test_create_or_update_indexer(client, index_client, storage_cs, container_name) - await self._test_reset_indexer(client, index_client, storage_cs, container_name) - await self._test_run_indexer(client, index_client, storage_cs, container_name) - await self._test_get_indexer_status(client, index_client, storage_cs, container_name) - await self._test_create_or_update_indexer_if_unchanged(client, index_client, storage_cs, container_name) - await self._test_delete_indexer_if_unchanged(client, index_client, storage_cs, container_name) - - async def _prepare_indexer(self, client, index_client, storage_cs, name, container_name): + await self._test_create_indexer( + client, index_client, storage_cs, container_name + ) + await self._test_delete_indexer( + client, index_client, storage_cs, container_name + ) + await self._test_get_indexer( + client, index_client, storage_cs, container_name + ) + await self._test_list_indexer( + client, index_client, storage_cs, container_name + ) + await self._test_create_or_update_indexer( + client, index_client, storage_cs, container_name + ) + await self._test_reset_indexer( + client, index_client, storage_cs, container_name + ) + await self._test_run_indexer( + client, index_client, storage_cs, container_name + ) + await self._test_get_indexer_status( + client, index_client, storage_cs, container_name + ) + await self._test_create_or_update_indexer_if_unchanged( + client, index_client, storage_cs, container_name + ) + await self._test_delete_indexer_if_unchanged( + client, index_client, storage_cs, container_name + ) + + async def _prepare_indexer( + self, client, index_client, storage_cs, name, container_name + ): data_source_connection = SearchIndexerDataSourceConnection( name=f"{name}-ds", type="azureblob", @@ -52,22 +78,34 @@ async def _prepare_indexer(self, client, index_client, storage_cs, name, contain ) ds = await client.create_data_source_connection(data_source_connection) - fields = [{"name": "hotelId", "type": "Edm.String", "key": True, "searchable": False}] + fields = [ + {"name": "hotelId", "type": "Edm.String", "key": True, "searchable": False} + ] index = SearchIndex(name=f"{name}-hotels", fields=fields) ind = await index_client.create_index(index) - return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name) + return SearchIndexer( + name=name, data_source_name=ds.name, target_index_name=ind.name + ) - async def _test_create_indexer(self, client, index_client, storage_cs, container_name): + async def _test_create_indexer( + self, client, index_client, storage_cs, container_name + ): name = "create" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) result = await client.create_indexer(indexer) assert result.name == name assert result.target_index_name == f"{name}-hotels" assert result.data_source_name == f"{name}-ds" - async def _test_delete_indexer(self, client, index_client, storage_cs, container_name): + async def _test_delete_indexer( + self, client, index_client, storage_cs, container_name + ): name = "delete" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) await client.create_indexer(indexer) expected = len(await client.get_indexers()) - 1 await client.delete_indexer(name) @@ -75,25 +113,39 @@ async def _test_delete_indexer(self, client, index_client, storage_cs, container async def _test_get_indexer(self, client, index_client, storage_cs, container_name): name = "get" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) await client.create_indexer(indexer) result = await client.get_indexer(name) assert result.name == name - async def _test_list_indexer(self, client, index_client, storage_cs, container_name): + async def _test_list_indexer( + self, client, index_client, storage_cs, container_name + ): name1 = "list1" name2 = "list2" - indexer1 = await self._prepare_indexer(client, index_client, storage_cs, name1, container_name) - indexer2 = await self._prepare_indexer(client, index_client, storage_cs, name2, container_name) + indexer1 = await self._prepare_indexer( + client, index_client, storage_cs, name1, container_name + ) + indexer2 = await self._prepare_indexer( + client, index_client, storage_cs, name2, container_name + ) await client.create_indexer(indexer1) await client.create_indexer(indexer2) result = await client.get_indexers() assert isinstance(result, list) - assert set(x.name for x in result).intersection([name1, name2]) == set([name1, name2]) + assert set(x.name for x in result).intersection([name1, name2]) == set( + [name1, name2] + ) - async def _test_create_or_update_indexer(self, client, index_client, storage_cs, container_name): + async def _test_create_or_update_indexer( + self, client, index_client, storage_cs, container_name + ): name = "cou" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) await client.create_indexer(indexer) expected = len(await client.get_indexers()) indexer.description = "updated" @@ -103,9 +155,13 @@ async def _test_create_or_update_indexer(self, client, index_client, storage_cs, assert result.name == name assert result.description == "updated" - async def _test_reset_indexer(self, client, index_client, storage_cs, container_name): + async def _test_reset_indexer( + self, client, index_client, storage_cs, container_name + ): name = "reset" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) await client.create_indexer(indexer) await client.reset_indexer(name) assert (await client.get_indexer_status(name)).last_result.status.lower() in ( @@ -115,21 +171,31 @@ async def _test_reset_indexer(self, client, index_client, storage_cs, container_ async def _test_run_indexer(self, client, index_client, storage_cs, container_name): name = "run" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) await client.create_indexer(indexer) await client.run_indexer(name) assert (await client.get_indexer_status(name)).status == "running" - async def _test_get_indexer_status(self, client, index_client, storage_cs, container_name): + async def _test_get_indexer_status( + self, client, index_client, storage_cs, container_name + ): name = "get-status" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) await client.create_indexer(indexer) status = await client.get_indexer_status(name) assert status.status is not None - async def _test_create_or_update_indexer_if_unchanged(self, client, index_client, storage_cs, container_name): + async def _test_create_or_update_indexer_if_unchanged( + self, client, index_client, storage_cs, container_name + ): name = "couunch" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) created = await client.create_indexer(indexer) etag = created.e_tag @@ -138,11 +204,17 @@ async def _test_create_or_update_indexer_if_unchanged(self, client, index_client indexer.e_tag = etag with pytest.raises(HttpResponseError): - await client.create_or_update_indexer(indexer, match_condition=MatchConditions.IfNotModified) + await client.create_or_update_indexer( + indexer, match_condition=MatchConditions.IfNotModified + ) - async def _test_delete_indexer_if_unchanged(self, client, index_client, storage_cs, container_name): + async def _test_delete_indexer_if_unchanged( + self, client, index_client, storage_cs, container_name + ): name = "delunch" - indexer = await self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = await self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) result = await client.create_indexer(indexer) etag = result.e_tag @@ -151,4 +223,6 @@ async def _test_delete_indexer_if_unchanged(self, client, index_client, storage_ indexer.e_tag = etag with pytest.raises(HttpResponseError): - await client.delete_indexer(indexer, match_condition=MatchConditions.IfNotModified) + await client.delete_indexer( + indexer, match_condition=MatchConditions.IfNotModified + ) diff --git a/sdk/search/azure-search-documents/tests/conftest.py b/sdk/search/azure-search-documents/tests/conftest.py index 0ecac854421a..4bb3d4315025 100644 --- a/sdk/search/azure-search-documents/tests/conftest.py +++ b/sdk/search/azure-search-documents/tests/conftest.py @@ -30,7 +30,9 @@ def add_sanitizers(test_proxy): # Remove storage connection strings from recordings add_general_regex_sanitizer(value="AccountKey=FAKE;", regex=r"AccountKey=([^;]+);") # Remove storage account names from recordings - add_general_regex_sanitizer(value="AccountName=fakestoragecs;", regex=r"AccountName=([^;]+);") + add_general_regex_sanitizer( + value="AccountName=fakestoragecs;", regex=r"AccountName=([^;]+);" + ) # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: # - AZSDK3493: $..name remove_batch_sanitizers(["AZSDK3493"]) diff --git a/sdk/search/azure-search-documents/tests/perfstress_tests/autocomplete.py b/sdk/search/azure-search-documents/tests/perfstress_tests/autocomplete.py index ccd2a6a595f6..78066af8e220 100644 --- a/sdk/search/azure-search-documents/tests/perfstress_tests/autocomplete.py +++ b/sdk/search/azure-search-documents/tests/perfstress_tests/autocomplete.py @@ -18,8 +18,12 @@ def __init__(self, arguments): service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT") index_name = os.getenv("AZURE_SEARCH_INDEX_NAME") key = os.getenv("AZURE_SEARCH_API_KEY") - self.service_client = SyncClient(service_endpoint, index_name, AzureKeyCredential(api_key)) - self.async_service_client = AsyncClient(service_endpoint, index_name, AzureKeyCredential(api_key)) + self.service_client = SyncClient( + service_endpoint, index_name, AzureKeyCredential(api_key) + ) + self.async_service_client = AsyncClient( + service_endpoint, index_name, AzureKeyCredential(api_key) + ) async def close(self): await self.async_service_client.close() @@ -38,15 +42,23 @@ def add_arguments(parser): def run_sync(self): if self.args.num_documents == -1: - results = len(self.service_client.autocomplete(search_text="mot", suggester_name="sg")) + results = len( + self.service_client.autocomplete(search_text="mot", suggester_name="sg") + ) else: results = len( - self.service_client.autocomplete(search_text="mot", suggester_name="sg", top=self.args.num_documents) + self.service_client.autocomplete( + search_text="mot", suggester_name="sg", top=self.args.num_documents + ) ) async def run_async(self): if self.args.num_documents == -1: - results = len(await self.async_service_client.autocomplete(search_text="mot", suggester_name="sg")) + results = len( + await self.async_service_client.autocomplete( + search_text="mot", suggester_name="sg" + ) + ) else: results = len( await self.async_service_client.autocomplete( diff --git a/sdk/search/azure-search-documents/tests/perfstress_tests/search_documents.py b/sdk/search/azure-search-documents/tests/perfstress_tests/search_documents.py index 19fc00efec37..2620c7301cba 100644 --- a/sdk/search/azure-search-documents/tests/perfstress_tests/search_documents.py +++ b/sdk/search/azure-search-documents/tests/perfstress_tests/search_documents.py @@ -19,8 +19,12 @@ def __init__(self, arguments): service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT") index_name = os.getenv("AZURE_SEARCH_INDEX_NAME") key = os.getenv("AZURE_SEARCH_API_KEY") - self.service_client = SyncClient(service_endpoint, index_name, AzureKeyCredential(api_key)) - self.async_service_client = AsyncClient(service_endpoint, index_name, AzureKeyCredential(api_key)) + self.service_client = SyncClient( + service_endpoint, index_name, AzureKeyCredential(api_key) + ) + self.async_service_client = AsyncClient( + service_endpoint, index_name, AzureKeyCredential(api_key) + ) @staticmethod def add_arguments(parser): @@ -44,13 +48,19 @@ def run_sync(self): if self.args.num_documents == -1: results = len(self.service_client.search(search_text="luxury")) else: - results = len(self.service_client.search(search_text="luxury", top=self.args.num_documents)) + results = len( + self.service_client.search( + search_text="luxury", top=self.args.num_documents + ) + ) async def run_async(self): if self.args.num_documents == -1: results = await self.async_service_client.search(search_text="luxury") else: - results = await self.async_service_client.search(search_text="luxury", top=self.args.num_documents) + results = await self.async_service_client.search( + search_text="luxury", top=self.args.num_documents + ) count = 0 async for result in results: count += count diff --git a/sdk/search/azure-search-documents/tests/perfstress_tests/suggest.py b/sdk/search/azure-search-documents/tests/perfstress_tests/suggest.py index d906ba271ef8..8c4653baf5cd 100644 --- a/sdk/search/azure-search-documents/tests/perfstress_tests/suggest.py +++ b/sdk/search/azure-search-documents/tests/perfstress_tests/suggest.py @@ -18,8 +18,12 @@ def __init__(self, arguments): service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT") index_name = os.getenv("AZURE_SEARCH_INDEX_NAME") key = os.getenv("AZURE_SEARCH_API_KEY") - self.service_client = SyncClient(service_endpoint, index_name, AzureKeyCredential(api_key)) - self.async_service_client = AsyncClient(service_endpoint, index_name, AzureKeyCredential(api_key)) + self.service_client = SyncClient( + service_endpoint, index_name, AzureKeyCredential(api_key) + ) + self.async_service_client = AsyncClient( + service_endpoint, index_name, AzureKeyCredential(api_key) + ) async def close(self): await self.async_service_client.close() @@ -38,15 +42,23 @@ def add_arguments(parser): def run_sync(self): if self.args.num_documents == -1: - results = len(self.service_client.suggest(search_text="mot", suggester_name="sg")) + results = len( + self.service_client.suggest(search_text="mot", suggester_name="sg") + ) else: results = len( - self.service_client.suggest(search_text="mot", suggester_name="sg", top=self.args.num_documents) + self.service_client.suggest( + search_text="mot", suggester_name="sg", top=self.args.num_documents + ) ) async def run_async(self): if self.args.num_documents == -1: - results = len(await self.async_service_client.suggest(search_text="mot", suggester_name="sg")) + results = len( + await self.async_service_client.suggest( + search_text="mot", suggester_name="sg" + ) + ) else: results = len( await self.async_service_client.suggest( diff --git a/sdk/search/azure-search-documents/tests/search_service_preparer.py b/sdk/search/azure-search-documents/tests/search_service_preparer.py index d4de15e3b660..eb66cb0f28af 100644 --- a/sdk/search/azure-search-documents/tests/search_service_preparer.py +++ b/sdk/search/azure-search-documents/tests/search_service_preparer.py @@ -75,7 +75,10 @@ def _clean_up_indexers(endpoint, cred): for skillset in client.get_skillset_names(): client.delete_skillset(skillset) except HttpResponseError as ex: - if "skillset related operations are not enabled in this region" in ex.message.lower(): + if ( + "skillset related operations are not enabled in this region" + in ex.message.lower() + ): pass else: raise diff --git a/sdk/search/azure-search-documents/tests/test_buffered_sender.py b/sdk/search/azure-search-documents/tests/test_buffered_sender.py index 5af0b3d5efbf..48819e0ae458 100644 --- a/sdk/search/azure-search-documents/tests/test_buffered_sender.py +++ b/sdk/search/azure-search-documents/tests/test_buffered_sender.py @@ -16,14 +16,18 @@ class TestSearchBatchingClient: def test_search_indexing_buffered_sender_kwargs(self): - with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, window=100) as client: + with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, window=100 + ) as client: assert client._batch_action_count == 512 assert client._max_retries_per_action == 3 assert client._auto_flush_interval == 60 assert client._auto_flush def test_batch_queue(self): - with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, auto_flush=False) as client: + with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, auto_flush=False + ) as client: assert client._index_documents_batch client.upload_documents(["upload1"]) client.delete_documents(["delete1", "delete2"]) @@ -44,14 +48,20 @@ def test_batch_queue(self): "azure.search.documents._search_indexing_buffered_sender.SearchIndexingBufferedSender._process_if_needed" ) def test_process_if_needed(self, mock_process_if_needed): - with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL) as client: + with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL + ) as client: client.upload_documents(["upload1"]) client.delete_documents(["delete1", "delete2"]) assert mock_process_if_needed.called - @mock.patch("azure.search.documents._search_indexing_buffered_sender.SearchIndexingBufferedSender._cleanup") + @mock.patch( + "azure.search.documents._search_indexing_buffered_sender.SearchIndexingBufferedSender._cleanup" + ) def test_context_manager(self, mock_cleanup): - with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, auto_flush=False) as client: + with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, auto_flush=False + ) as client: client.upload_documents(["upload1"]) client.delete_documents(["delete1", "delete2"]) assert mock_cleanup.called @@ -69,7 +79,9 @@ def test_flush(self): "_index_documents_actions", side_effect=HttpResponseError("Error"), ): - with SearchIndexingBufferedSender("endpoint", "index name", CREDENTIAL, auto_flush=False) as client: + with SearchIndexingBufferedSender( + "endpoint", "index name", CREDENTIAL, auto_flush=False + ) as client: client._index_key = "hotelId" client.upload_documents([DOCUMENT]) client.flush() diff --git a/sdk/search/azure-search-documents/tests/test_index_documents_batch.py b/sdk/search/azure-search-documents/tests/test_index_documents_batch.py index c9d8c488d1ce..5de862828f1f 100644 --- a/sdk/search/azure-search-documents/tests/test_index_documents_batch.py +++ b/sdk/search/azure-search-documents/tests/test_index_documents_batch.py @@ -62,7 +62,9 @@ def test_add_method(self, method_name): method(("doc6", "doc7")) assert len(batch.actions) == 7 - assert all(action.action_type == METHOD_MAP[method_name] for action in batch.actions) + assert all( + action.action_type == METHOD_MAP[method_name] for action in batch.actions + ) assert all(type(action) == IndexAction for action in batch.actions) expected = ["doc{}".format(i) for i in range(1, 8)] diff --git a/sdk/search/azure-search-documents/tests/test_index_field_helpers.py b/sdk/search/azure-search-documents/tests/test_index_field_helpers.py index 8e33f161af67..622e4fdde4f0 100644 --- a/sdk/search/azure-search-documents/tests/test_index_field_helpers.py +++ b/sdk/search/azure-search-documents/tests/test_index_field_helpers.py @@ -44,7 +44,9 @@ def test_single(self): def test_collection(self): fld = ComplexField(name="foo", fields=[], collection=True) assert fld.name == "foo" - assert fld.type == SearchFieldDataType.Collection(SearchFieldDataType.ComplexType) + assert fld.type == SearchFieldDataType.Collection( + SearchFieldDataType.ComplexType + ) assert fld.sortable is None assert fld.facetable is None diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py b/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py new file mode 100644 index 000000000000..5691bc1fd9eb --- /dev/null +++ b/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py @@ -0,0 +1,163 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +import pytest + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential + +from azure.search.documents.indexes import SearchIndexClient +from azure.search.documents.indexes.models import ( + KnowledgeBase, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeSourceReference, + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _TestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_source: WebKnowledgeSource, + base_name: str, + created_base: KnowledgeBase, + ) -> None: + self.index_client = index_client + self.source_name = source_name + self.created_source = created_source + self.base_name = base_name + self.created_base = created_base + + +class TestKnowledgeBaseConfigurationLive(AzureRecordedTestCase): + def _create_context(self, endpoint: str) -> "_TestContext": + credential = get_credential() + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("cfgks") + create_source = WebKnowledgeSource( + name=source_name, + description="configuration source", + web_parameters=WebKnowledgeSourceParameters( + domains=WebKnowledgeSourceDomains( + allowed_domains=[ + WebKnowledgeSourceDomain( + address="https://learn.microsoft.com", + include_subpages=True, + ) + ] + ) + ), + ) + base_name = self.get_resource_name("cfgkb") + + # best-effort cleanup in case a previous run failed before teardown + try: + index_client.delete_knowledge_base(base_name) + except HttpResponseError: + pass + try: + index_client.delete_knowledge_source(source_name) + except HttpResponseError: + pass + + created_source = index_client.create_knowledge_source(create_source) + + create_base = KnowledgeBase( + name=base_name, + description="configurable knowledge base", + knowledge_sources=[KnowledgeSourceReference(name=source_name)], + retrieval_reasoning_effort=KnowledgeRetrievalMinimalReasoningEffort(), + output_mode="extractiveData", + ) + + try: + created_base = index_client.create_knowledge_base(create_base) + except HttpResponseError: + # creation failed; remove the knowledge source created above before raising + try: + index_client.delete_knowledge_source(created_source) + except HttpResponseError: + pass + raise + + return _TestContext( + index_client, source_name, created_source, base_name, created_base + ) + + def _cleanup(self, ctx: "_TestContext") -> None: + try: + try: + ctx.index_client.delete_knowledge_base( + ctx.created_base, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + try: + ctx.index_client.delete_knowledge_source( + ctx.created_source, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + ctx.index_client.close() + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_knowledge_base_configuration_round_trip(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + created = ctx.created_base + assert isinstance( + created.retrieval_reasoning_effort, + KnowledgeRetrievalMinimalReasoningEffort, + ) + assert created.output_mode == "extractiveData" + assert created.retrieval_instructions is None + assert created.answer_instructions is None + + update_model = KnowledgeBase( + name=ctx.base_name, + description="config updated", + knowledge_sources=[KnowledgeSourceReference(name=ctx.source_name)], + retrieval_reasoning_effort=KnowledgeRetrievalMediumReasoningEffort(), + output_mode="answerSynthesis", + retrieval_instructions="summarize with details", + answer_instructions="include citations and summaries", + ) + update_model.e_tag = created.e_tag + + with pytest.raises(HttpResponseError) as ex: + ctx.index_client.create_or_update_knowledge_base( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + + assert "Retrieval instructions cannot be specified" in str(ex.value) + + fetched = ctx.index_client.get_knowledge_base(ctx.base_name) + assert isinstance( + fetched.retrieval_reasoning_effort, + KnowledgeRetrievalMinimalReasoningEffort, + ) + assert fetched.output_mode == "extractiveData" + assert fetched.retrieval_instructions is None + assert fetched.answer_instructions is None + finally: + self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_base_live.py b/sdk/search/azure-search-documents/tests/test_knowledge_base_live.py new file mode 100644 index 000000000000..186ab6d2d0d2 --- /dev/null +++ b/sdk/search/azure-search-documents/tests/test_knowledge_base_live.py @@ -0,0 +1,235 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +import time + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential + +from azure.search.documents.indexes import SearchIndexClient +from azure.search.documents.indexes.models import ( + KnowledgeBase, + KnowledgeSourceReference, + SearchServiceStatistics, + ServiceIndexersRuntime, + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _TestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_source: WebKnowledgeSource, + base_name: str, + created_base: KnowledgeBase, + ) -> None: + self.index_client = index_client + self.source_name = source_name + self.created_source = created_source + self.base_name = base_name + self.created_base = created_base + + +class TestKnowledgeBaseLive(AzureRecordedTestCase): + def _create_context(self, endpoint: str) -> "_TestContext": + credential = get_credential() + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("ksrc") + base_name = self.get_resource_name("kb") + create_source = WebKnowledgeSource( + name=source_name, + description="knowledge base dependent source", + web_parameters=WebKnowledgeSourceParameters( + domains=WebKnowledgeSourceDomains( + allowed_domains=[ + WebKnowledgeSourceDomain( + address="https://learn.microsoft.com", + include_subpages=True, + ) + ] + ) + ), + ) + created_source = index_client.create_knowledge_source(create_source) + + create_base = KnowledgeBase( + name=base_name, + description="initial knowledge base", + knowledge_sources=[KnowledgeSourceReference(name=source_name)], + ) + created_base = index_client.create_knowledge_base(create_base) + return _TestContext( + index_client, source_name, created_source, base_name, created_base + ) + + def _cleanup(self, ctx: "_TestContext") -> None: + try: + try: + ctx.index_client.delete_knowledge_base( + ctx.created_base, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + try: + ctx.index_client.delete_knowledge_source( + ctx.created_source, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + ctx.index_client.close() + + def _poll_status_snapshots( + self, + ctx: "_TestContext", + *, + wait_for: str = "active", + interval: float = 5.0, + attempts: int = 36, + ): + snapshots = [] + for _ in range(attempts): + status = ctx.index_client.get_knowledge_source_status(ctx.source_name) + snapshots.append(status) + if status.synchronization_status == wait_for: + return snapshots + time.sleep(interval) + return snapshots + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_knowledge_base_create(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + assert ctx.created_base.name == ctx.base_name + assert ctx.created_base.knowledge_sources + assert ctx.created_base.knowledge_sources[0].name == ctx.source_name + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_knowledge_base_update(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + update_model = KnowledgeBase( + name=ctx.base_name, + description="updated knowledge base description", + knowledge_sources=[KnowledgeSourceReference(name=ctx.source_name)], + ) + update_model.e_tag = ctx.created_base.e_tag + + revised = ctx.index_client.create_or_update_knowledge_base( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + ctx.created_base = revised + assert revised.description == "updated knowledge base description" + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_knowledge_base_read(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + fetched = ctx.index_client.get_knowledge_base(ctx.base_name) + listed = list(ctx.index_client.list_knowledge_bases()) + + assert fetched.name == ctx.base_name + assert ( + fetched.knowledge_sources + and fetched.knowledge_sources[0].name == ctx.source_name + ) + assert any(item.name == ctx.base_name for item in listed) + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_knowledge_base_delete(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + ctx.index_client.delete_knowledge_base( + ctx.created_base, + match_condition=MatchConditions.IfNotModified, + ) + remaining = list(ctx.index_client.list_knowledge_bases()) + assert all(item.name != ctx.base_name for item in remaining) + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_knowledge_source_status_tracking(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + snapshots = self._poll_status_snapshots(ctx) + assert snapshots, "Expected at least one status snapshot" + + first = snapshots[0] + last = snapshots[-1] + assert first.synchronization_status in {"creating", "active"} + assert last.synchronization_status == "active" + + if last.statistics is not None: + assert last.statistics.total_synchronization >= 0 + assert last.statistics.average_items_processed_per_synchronization >= 0 + if last.current_synchronization_state is not None: + assert last.current_synchronization_state.items_updates_processed >= 0 + if last.last_synchronization_state is not None: + assert last.last_synchronization_state.items_updates_processed >= 0 + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_service_indexer_runtime_statistics(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + snapshots = self._poll_status_snapshots(ctx) + assert snapshots, "Expected at least one status snapshot" + + service_stats = ( + ctx.index_client._client.get_service_statistics() + ) # pylint:disable=protected-access + assert isinstance(service_stats, SearchServiceStatistics) + + runtime = service_stats.indexers_runtime + assert isinstance(runtime, ServiceIndexersRuntime) + assert runtime.used_seconds >= -1 + assert runtime.beginning_time <= runtime.ending_time + if runtime.remaining_seconds is not None: + assert runtime.remaining_seconds >= -1 + + counters = service_stats.counters + assert counters.indexer_counter is not None + assert counters.indexer_counter.usage >= 0 + assert counters.indexer_counter.quota >= counters.indexer_counter.usage + + limits = service_stats.limits + if limits.max_cumulative_indexer_runtime_seconds is not None: + assert limits.max_cumulative_indexer_runtime_seconds > 0 + finally: + self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py b/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py new file mode 100644 index 000000000000..a23794f4c52e --- /dev/null +++ b/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py @@ -0,0 +1,129 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential + +from azure.search.documents.indexes import SearchIndexClient +from azure.search.documents.indexes.models import ( + RemoteSharePointKnowledgeSource, + RemoteSharePointKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _TestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_revision: RemoteSharePointKnowledgeSource, + ) -> None: + self.index_client = index_client + self.source_name = source_name + self.created_revision = created_revision + + +class TestRemoteSharePointKnowledgeSourceLive(AzureRecordedTestCase): + def _create_context(self, endpoint: str) -> "_TestContext": + credential = get_credential() + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("spsource") + create_model = RemoteSharePointKnowledgeSource( + name=source_name, + description="initial sharepoint source", + remote_share_point_parameters=RemoteSharePointKnowledgeSourceParameters( + filter_expression="Title:Test", + resource_metadata=["Title", "Path"], + ), + ) + created = index_client.create_knowledge_source(create_model) + return _TestContext(index_client, source_name, created) + + def _cleanup(self, ctx: "_TestContext") -> None: + try: + try: + ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + ctx.index_client.close() + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_remote_sharepoint_knowledge_source_create(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + assert ctx.created_revision.name == ctx.source_name + assert ctx.created_revision.kind == "remoteSharePoint" + params = ctx.created_revision.remote_share_point_parameters + assert params is not None + assert params.filter_expression == "Title:Test" + assert params.resource_metadata is not None + assert {"Title", "Path"}.issubset(set(params.resource_metadata)) + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_remote_sharepoint_knowledge_source_update(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + update_model = RemoteSharePointKnowledgeSource( + name=ctx.source_name, + description="updated description", + remote_share_point_parameters=ctx.created_revision.remote_share_point_parameters, + ) + update_model.e_tag = ctx.created_revision.e_tag + + revised = ctx.index_client.create_or_update_knowledge_source( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + ctx.created_revision = revised + assert revised.description == "updated description" + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_remote_sharepoint_knowledge_source_read(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + fetched = ctx.index_client.get_knowledge_source(ctx.source_name) + status = ctx.index_client.get_knowledge_source_status(ctx.source_name) + listed = list(ctx.index_client.list_knowledge_sources()) + + assert fetched.name == ctx.source_name + assert status.synchronization_status in {"creating", "active", "deleting"} + assert any(item.name == ctx.source_name for item in listed) + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_remote_sharepoint_knowledge_source_delete(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + remaining = list(ctx.index_client.list_knowledge_sources()) + assert all(item.name != ctx.source_name for item in remaining) + finally: + ctx.index_client.close() diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_source_web_live.py b/sdk/search/azure-search-documents/tests/test_knowledge_source_web_live.py new file mode 100644 index 000000000000..e10cbfc5fbb3 --- /dev/null +++ b/sdk/search/azure-search-documents/tests/test_knowledge_source_web_live.py @@ -0,0 +1,136 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from __future__ import annotations + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError +from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential + +from azure.search.documents.indexes import SearchIndexClient +from azure.search.documents.indexes.models import ( + WebKnowledgeSource, + WebKnowledgeSourceDomain, + WebKnowledgeSourceDomains, + WebKnowledgeSourceParameters, +) + +from search_service_preparer import SearchEnvVarPreparer, search_decorator + + +class _TestContext: + def __init__( + self, + index_client: SearchIndexClient, + source_name: str, + created_revision: WebKnowledgeSource, + ): + self.index_client = index_client + self.source_name = source_name + self.created_revision = created_revision + + +class TestWebKnowledgeSourceLive(AzureRecordedTestCase): + def _create_context(self, endpoint: str) -> "_TestContext": + credential = get_credential() + index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) + + source_name = self.get_resource_name("websource") + create_model = WebKnowledgeSource( + name=source_name, + description="initial web source", + web_parameters=WebKnowledgeSourceParameters( + domains=WebKnowledgeSourceDomains( + allowed_domains=[ + WebKnowledgeSourceDomain( + address="https://learn.microsoft.com", + include_subpages=True, + ) + ] + ) + ), + ) + created = index_client.create_knowledge_source(create_model) + return _TestContext(index_client, source_name, created) + + def _cleanup(self, ctx: "_TestContext") -> None: + try: + try: + ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + except HttpResponseError: + pass + finally: + ctx.index_client.close() + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_web_knowledge_source_create(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + assert ctx.created_revision.name == ctx.source_name + assert ctx.created_revision.kind == "web" + assert ctx.created_revision.web_parameters is not None + domains = ctx.created_revision.web_parameters.domains + assert domains is not None and domains.allowed_domains is not None + assert domains.allowed_domains[0].address == "https://learn.microsoft.com" + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_web_knowledge_source_update(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + update_model = WebKnowledgeSource( + name=ctx.source_name, + description="updated description", + web_parameters=ctx.created_revision.web_parameters, + ) + update_model.e_tag = ctx.created_revision.e_tag + + revised = ctx.index_client.create_or_update_knowledge_source( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + ctx.created_revision = revised + assert revised.description == "updated description" + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_web_knowledge_source_read(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + fetched = ctx.index_client.get_knowledge_source(ctx.source_name) + status = ctx.index_client.get_knowledge_source_status(ctx.source_name) + listed = list(ctx.index_client.list_knowledge_sources()) + + assert fetched.name == ctx.source_name + assert status.synchronization_status in {"creating", "active", "deleting"} + assert any(item.name == ctx.source_name for item in listed) + finally: + self._cleanup(ctx) + + @SearchEnvVarPreparer() + @search_decorator(schema=None, index_batch=None) + @recorded_by_proxy + def test_web_knowledge_source_delete(self, endpoint: str) -> None: + ctx = self._create_context(endpoint) + try: + ctx.index_client.delete_knowledge_source( + ctx.created_revision, + match_condition=MatchConditions.IfNotModified, + ) + remaining = list(ctx.index_client.list_knowledge_sources()) + assert all(item.name != ctx.source_name for item in remaining) + finally: + ctx.index_client.close() diff --git a/sdk/search/azure-search-documents/tests/test_models.py b/sdk/search/azure-search-documents/tests/test_models.py index 299ee69091a6..cc46c17e33ca 100644 --- a/sdk/search/azure-search-documents/tests/test_models.py +++ b/sdk/search/azure-search-documents/tests/test_models.py @@ -3,7 +3,20 @@ # Licensed under the MIT License. # ------------------------------------ +from datetime import timedelta + from azure.search.documents.indexes.models import ( + ContentUnderstandingSkill, + ContentUnderstandingSkillChunkingProperties, + ContentUnderstandingSkillExtractionOptions, + FreshnessScoringFunction, + FreshnessScoringParameters, + InputFieldMappingEntry, + OutputFieldMappingEntry, + ScoringFunctionAggregation, + ScoringProfile, + SearchField, + SearchIndex, SearchIndexerDataContainer, SearchIndexerDataSourceConnection, SearchResourceEncryptionKey, @@ -23,7 +36,6 @@ def test_encryption_key_serialization(): key_name="key", key_version="key_version", vault_uri="vault_uri", - application_id="application_id", ) data_source_connection = SearchIndexerDataSourceConnection( name="datasource-name", @@ -42,4 +54,141 @@ def test_encryption_key_serialization(): encryption_key=encryption_key, ) packed_search_indexer = search_indexer._to_generated() - assert isinstance(packed_search_indexer.encryption_key, SearchResourceEncryptionKeyGen) + assert isinstance( + packed_search_indexer.encryption_key, SearchResourceEncryptionKeyGen + ) + + +def test_search_index_purview_enabled_round_trip(): + fields = [SearchField(name="id", type="Edm.String", key=True)] + index = SearchIndex( + name="idx", + fields=fields, + purview_enabled=True, + permission_filter_option="enabled", + ) + + generated = index._to_generated() + assert generated.purview_enabled is True + + round_tripped = SearchIndex._from_generated(generated) + assert round_tripped is not None + assert round_tripped.purview_enabled is True + + +def test_content_understanding_skill_round_trip(): + skill = ContentUnderstandingSkill( + name="content-skill", + description="extract structured signals", + context="/document", + inputs=[ + InputFieldMappingEntry(name="text", source="/document/content"), + ], + outputs=[ + OutputFieldMappingEntry(name="structured", target_name="structuredContent"), + ], + extraction_options=[ + ContentUnderstandingSkillExtractionOptions.IMAGES, + ContentUnderstandingSkillExtractionOptions.LOCATION_METADATA, + ], + chunking_properties=ContentUnderstandingSkillChunkingProperties( + maximum_length=600, + overlap_length=50, + ), + ) + skillset = SearchIndexerSkillset(name="cu-skillset", skills=[skill]) + + generated_skillset = skillset._to_generated() + assert isinstance(generated_skillset.skills[0], ContentUnderstandingSkill) + generated_skill = generated_skillset.skills[0] + assert generated_skill.chunking_properties.maximum_length == 600 + assert generated_skill.chunking_properties.overlap_length == 50 + assert generated_skill.extraction_options == ["images", "locationMetadata"] + + generated_skill.description = "updated description" + generated_skill.chunking_properties.maximum_length = 700 + + round_tripped = SearchIndexerSkillset._from_generated(generated_skillset) + assert round_tripped is not None + round_trip_skill = round_tripped.skills[0] + assert isinstance(round_trip_skill, ContentUnderstandingSkill) + assert round_trip_skill.description == "updated description" + assert round_trip_skill.chunking_properties.maximum_length == 700 + assert round_trip_skill.extraction_options == ["images", "locationMetadata"] + + +def test_content_understanding_skill_payload_shape(): + skill = ContentUnderstandingSkill( + name="content-skill", + inputs=[InputFieldMappingEntry(name="text", source="/document/content")], + outputs=[OutputFieldMappingEntry(name="structured")], + chunking_properties=ContentUnderstandingSkillChunkingProperties(), + ) + payload = SearchIndexerSkillset(name="cu-skillset", skills=[skill]).serialize() + + skill_payload = payload["skills"][0] + assert ( + skill_payload["@odata.type"] + == "#Microsoft.Skills.Util.ContentUnderstandingSkill" + ) + assert skill_payload["chunkingProperties"]["unit"] == "characters" + + +def test_search_index_scoring_profile_product_round_trip(): + fields = [ + SearchField(name="id", type="Edm.String", key=True), + SearchField(name="lastUpdated", type="Edm.DateTimeOffset", filterable=True), + ] + scoring_profile = ScoringProfile( + name="product-score", + function_aggregation=ScoringFunctionAggregation.PRODUCT, + functions=[ + FreshnessScoringFunction( + field_name="lastUpdated", + boost=2.5, + parameters=FreshnessScoringParameters( + boosting_duration=timedelta(days=7) + ), + ) + ], + ) + index = SearchIndex( + name="scoring-index", fields=fields, scoring_profiles=[scoring_profile] + ) + + generated = index._to_generated() + assert generated.scoring_profiles[0].function_aggregation == "product" + + generated.scoring_profiles[0].function_aggregation = "sum" + + round_tripped = SearchIndex._from_generated(generated) + assert round_tripped is not None + assert round_tripped.scoring_profiles[0].function_aggregation == "sum" + + +def test_search_index_scoring_profile_product_payload(): + fields = [ + SearchField(name="id", type="Edm.String", key=True), + SearchField(name="lastUpdated", type="Edm.DateTimeOffset", filterable=True), + ] + scoring_profile = ScoringProfile( + name="product-score", + function_aggregation=ScoringFunctionAggregation.PRODUCT, + functions=[ + FreshnessScoringFunction( + field_name="lastUpdated", + boost=2.0, + parameters=FreshnessScoringParameters( + boosting_duration=timedelta(days=3) + ), + ) + ], + ) + payload = SearchIndex( + name="scoring-index", + fields=fields, + scoring_profiles=[scoring_profile], + ).serialize() + + scoring_payload = payload["scoringProfiles"][0] + assert scoring_payload["functionAggregation"] == "product" diff --git a/sdk/search/azure-search-documents/tests/test_queries.py b/sdk/search/azure-search-documents/tests/test_queries.py index dacac2c74ad1..baad05231809 100644 --- a/sdk/search/azure-search-documents/tests/test_queries.py +++ b/sdk/search/azure-search-documents/tests/test_queries.py @@ -42,7 +42,9 @@ def test_filter(self): query.filter("expr0") assert query.request.filter == "expr0" - query = AutocompleteQuery(search_text="text", suggester_name="sg", filter="expr1") + query = AutocompleteQuery( + search_text="text", suggester_name="sg", filter="expr1" + ) assert query.request.filter == "expr1" query.filter("expr2") assert query.request.filter == "expr2" diff --git a/sdk/search/azure-search-documents/tests/test_regex_flags.py b/sdk/search/azure-search-documents/tests/test_regex_flags.py index cee471b5d1df..01726af1ffa2 100644 --- a/sdk/search/azure-search-documents/tests/test_regex_flags.py +++ b/sdk/search/azure-search-documents/tests/test_regex_flags.py @@ -22,7 +22,9 @@ def test_unpack_search_index(): pattern_tokenizer = _PatternTokenizer(name="test_tokenizer", flags="CANON_EQ") tokenizers = [] tokenizers.append(pattern_tokenizer) - index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers) + index = SearchIndex( + name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers + ) result = SearchIndex._from_generated(index) assert isinstance(result.analyzers[0], PatternAnalyzer) assert isinstance(result.analyzers[0].flags, list) @@ -33,13 +35,19 @@ def test_unpack_search_index(): def test_multi_unpack_search_index(): - pattern_analyzer = _PatternAnalyzer(name="test_analyzer", flags="CANON_EQ|MULTILINE") + pattern_analyzer = _PatternAnalyzer( + name="test_analyzer", flags="CANON_EQ|MULTILINE" + ) analyzers = [] analyzers.append(pattern_analyzer) - pattern_tokenizer = _PatternTokenizer(name="test_tokenizer", flags="CANON_EQ|MULTILINE") + pattern_tokenizer = _PatternTokenizer( + name="test_tokenizer", flags="CANON_EQ|MULTILINE" + ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers) + index = SearchIndex( + name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers + ) result = SearchIndex._from_generated(index) assert isinstance(result.analyzers[0], PatternAnalyzer) assert isinstance(result.analyzers[0].flags, list) @@ -55,10 +63,14 @@ def test_unpack_search_index_enum(): pattern_analyzer = _PatternAnalyzer(name="test_analyzer", flags=RegexFlags.canon_eq) analyzers = [] analyzers.append(pattern_analyzer) - pattern_tokenizer = _PatternTokenizer(name="test_tokenizer", flags=RegexFlags.canon_eq) + pattern_tokenizer = _PatternTokenizer( + name="test_tokenizer", flags=RegexFlags.canon_eq + ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers) + index = SearchIndex( + name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers + ) result = SearchIndex._from_generated(index) assert isinstance(result.analyzers[0], PatternAnalyzer) assert isinstance(result.analyzers[0].flags, list) @@ -75,7 +87,9 @@ def test_pack_search_index(): pattern_tokenizer = PatternTokenizer(name="test_tokenizer", flags=["CANON_EQ"]) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers) + index = SearchIndex( + name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers + ) result = index._to_generated() assert isinstance(result.analyzers[0], _PatternAnalyzer) assert isinstance(result.analyzers[0].flags, str) @@ -86,13 +100,19 @@ def test_pack_search_index(): def test_multi_pack_search_index(): - pattern_analyzer = PatternAnalyzer(name="test_analyzer", flags=["CANON_EQ", "MULTILINE"]) + pattern_analyzer = PatternAnalyzer( + name="test_analyzer", flags=["CANON_EQ", "MULTILINE"] + ) analyzers = [] analyzers.append(pattern_analyzer) - pattern_tokenizer = PatternTokenizer(name="test_analyzer", flags=["CANON_EQ", "MULTILINE"]) + pattern_tokenizer = PatternTokenizer( + name="test_analyzer", flags=["CANON_EQ", "MULTILINE"] + ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers) + index = SearchIndex( + name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers + ) result = index._to_generated() assert isinstance(result.analyzers[0], _PatternAnalyzer) assert isinstance(result.analyzers[0].flags, str) diff --git a/sdk/search/azure-search-documents/tests/test_search_client.py b/sdk/search/azure-search-documents/tests/test_search_client.py index f8833f55fdec..937c154aec9a 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_client.py @@ -9,6 +9,7 @@ from azure.core.credentials import AzureKeyCredential from azure.search.documents._generated.models import ( + FacetResult, SearchDocumentsResult, SearchResult, ) @@ -31,7 +32,9 @@ "merge_or_upload_documents", ] -CRUD_METHOD_MAP = dict(zip(CRUD_METHOD_NAMES, ["upload", "delete", "merge", "mergeOrUpload"])) +CRUD_METHOD_MAP = dict( + zip(CRUD_METHOD_NAMES, ["upload", "delete", "merge", "mergeOrUpload"]) +) class Test_odata: @@ -67,7 +70,10 @@ def test_query_answer_count(self): assert get_answer_query("query", 5) == "query|count-5" def test_query_answer_threshold(self): - assert get_answer_query("query", query_answer_threshold=0.5) == "query|threshold-0.5" + assert ( + get_answer_query("query", query_answer_threshold=0.5) + == "query|threshold-0.5" + ) def test_query_answer_count_threshold(self): assert get_answer_query("query", 5, 0.5) == "query|count-5,threshold-0.5" @@ -108,9 +114,13 @@ def test_headers_merge(self): def test_repr(self): client = SearchClient("endpoint", "index name", CREDENTIAL) - assert repr(client) == "".format(repr("endpoint"), repr("index name")) + assert repr(client) == "".format( + repr("endpoint"), repr("index name") + ) - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.count") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.count" + ) def test_get_document_count(self, mock_count): client = SearchClient("endpoint", "index name", CREDENTIAL) client.get_document_count() @@ -119,7 +129,9 @@ def test_get_document_count(self, mock_count): assert len(mock_count.call_args[1]) == 1 assert mock_count.call_args[1]["headers"] == client._headers - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.get") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.get" + ) def test_get_document(self, mock_get): client = SearchClient("endpoint", "index name", CREDENTIAL) client.get_document("some_key") @@ -140,7 +152,9 @@ def test_get_document(self, mock_get): assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == "foo" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post" + ) def test_search_query_argument(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.search(search_text="search text") @@ -153,24 +167,57 @@ def test_search_query_argument(self, mock_search_post): next(result) assert mock_search_post.called assert mock_search_post.call_args[0] == () - assert mock_search_post.call_args[1]["search_request"].search_text == "search text" + assert ( + mock_search_post.call_args[1]["search_request"].search_text == "search text" + ) - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.suggest_post") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post" + ) + def test_search_enable_elevated_read(self, mock_search_post): + client = SearchClient("endpoint", "index name", CREDENTIAL) + result = client.search( + search_text="search text", + x_ms_enable_elevated_read=True, + x_ms_query_source_authorization="aad:fake-user", + ) + search_result = SearchDocumentsResult() + search_result.results = [SearchResult(additional_properties={"key": "val"})] + mock_search_post.return_value = search_result + next(result) + + assert mock_search_post.called + assert mock_search_post.call_args[1]["x_ms_enable_elevated_read"] is True + assert ( + mock_search_post.call_args[1]["x_ms_query_source_authorization"] + == "aad:fake-user" + ) + + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.suggest_post" + ) def test_suggest_query_argument(self, mock_suggest_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.suggest(search_text="search text", suggester_name="sg") assert mock_suggest_post.called assert mock_suggest_post.call_args[0] == () assert mock_suggest_post.call_args[1]["headers"] == client._headers - assert mock_suggest_post.call_args[1]["suggest_request"].search_text == "search text" + assert ( + mock_suggest_post.call_args[1]["suggest_request"].search_text + == "search text" + ) def test_suggest_bad_argument(self): client = SearchClient("endpoint", "index name", CREDENTIAL) with pytest.raises(TypeError) as e: client.suggest("bad_query") - assert str(e) == "Expected a SuggestQuery for 'query', but got {}".format(repr("bad_query")) + assert str(e) == "Expected a SuggestQuery for 'query', but got {}".format( + repr("bad_query") + ) - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post" + ) def test_get_count_reset_continuation_token(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.search(search_text="search text") @@ -193,20 +240,64 @@ def test_autocomplete_query_argument(self, mock_autocomplete_post): assert mock_autocomplete_post.called assert mock_autocomplete_post.call_args[0] == () assert mock_autocomplete_post.call_args[1]["headers"] == client._headers - assert mock_autocomplete_post.call_args[1]["autocomplete_request"].search_text == "search text" + assert ( + mock_autocomplete_post.call_args[1]["autocomplete_request"].search_text + == "search text" + ) - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.count") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.count" + ) def test_get_document_count_v2020_06_30(self, mock_count): - client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) + client = SearchClient( + "endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30 + ) client.get_document_count() assert mock_count.called assert mock_count.call_args[0] == () assert len(mock_count.call_args[1]) == 1 assert mock_count.call_args[1]["headers"] == client._headers - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.get") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post" + ) + def test_get_facets_with_aggregations(self, mock_search_post): + client = SearchClient("endpoint", "index name", CREDENTIAL) + result = client.search(search_text="*") + + search_result = SearchDocumentsResult() + search_result.results = [SearchResult(additional_properties={"id": "1"})] + + facet_bucket = FacetResult() + facet_bucket.count = 4 + facet_bucket.avg = 120.5 + facet_bucket.min = 75.0 + facet_bucket.max = 240.0 + facet_bucket.cardinality = 3 + + search_result.facets = {"baseRate": [facet_bucket]} + mock_search_post.return_value = search_result + + next(result) + facets = result.get_facets() + + assert facets is not None + assert "baseRate" in facets + assert len(facets["baseRate"]) == 1 + bucket = facets["baseRate"][0] + assert bucket["count"] == 4 + assert bucket["avg"] == 120.5 + assert bucket["min"] == 75.0 + assert bucket["max"] == 240.0 + assert bucket["cardinality"] == 3 + + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.get" + ) def test_get_document_v2020_06_30(self, mock_get): - client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) + client = SearchClient( + "endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30 + ) client.get_document("some_key") assert mock_get.called assert mock_get.call_args[0] == () @@ -225,9 +316,13 @@ def test_get_document_v2020_06_30(self, mock_get): assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == "foo" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post" + ) def test_search_query_argument_v2020_06_30(self, mock_search_post): - client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) + client = SearchClient( + "endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30 + ) result = client.search(search_text="search text") assert isinstance(result, ItemPaged) assert result._page_iterator_class is SearchPageIterator @@ -238,38 +333,60 @@ def test_search_query_argument_v2020_06_30(self, mock_search_post): next(result) assert mock_search_post.called assert mock_search_post.call_args[0] == () - assert mock_search_post.call_args[1]["search_request"].search_text == "search text" + assert ( + mock_search_post.call_args[1]["search_request"].search_text == "search text" + ) - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.suggest_post") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.suggest_post" + ) def test_suggest_query_argument_v2020_06_30(self, mock_suggest_post): - client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) + client = SearchClient( + "endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30 + ) result = client.suggest(search_text="search text", suggester_name="sg") assert mock_suggest_post.called assert mock_suggest_post.call_args[0] == () assert mock_suggest_post.call_args[1]["headers"] == client._headers - assert mock_suggest_post.call_args[1]["suggest_request"].search_text == "search text" + assert ( + mock_suggest_post.call_args[1]["suggest_request"].search_text + == "search text" + ) @mock.patch( "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.autocomplete_post" ) def test_autocomplete_query_argument_v2020_06_30(self, mock_autocomplete_post): - client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) + client = SearchClient( + "endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30 + ) result = client.autocomplete(search_text="search text", suggester_name="sg") assert mock_autocomplete_post.called assert mock_autocomplete_post.call_args[0] == () assert mock_autocomplete_post.call_args[1]["headers"] == client._headers - assert mock_autocomplete_post.call_args[1]["autocomplete_request"].search_text == "search text" + assert ( + mock_autocomplete_post.call_args[1]["autocomplete_request"].search_text + == "search text" + ) def test_autocomplete_bad_argument(self): client = SearchClient("endpoint", "index name", CREDENTIAL) with pytest.raises(TypeError) as e: client.autocomplete("bad_query") - assert str(e) == "Expected a AutocompleteQuery for 'query', but got {}".format(repr("bad_query")) - - @pytest.mark.parametrize("arg", [[], ["doc1"], ["doc1", "doc2"]], ids=lambda x: str(len(x)) + " docs") + assert str( + e + ) == "Expected a AutocompleteQuery for 'query', but got {}".format( + repr("bad_query") + ) + + @pytest.mark.parametrize( + "arg", [[], ["doc1"], ["doc1", "doc2"]], ids=lambda x: str(len(x)) + " docs" + ) @pytest.mark.parametrize("method_name", CRUD_METHOD_NAMES) def test_add_method(self, arg, method_name): - with mock.patch.object(SearchClient, "index_documents", return_value=None) as mock_index_documents: + with mock.patch.object( + SearchClient, "index_documents", return_value=None + ) as mock_index_documents: client = SearchClient("endpoint", "index name", CREDENTIAL) method = getattr(client, method_name) @@ -279,12 +396,17 @@ def test_add_method(self, arg, method_name): assert len(mock_index_documents.call_args[0]) == 1 batch = mock_index_documents.call_args[0][0] assert isinstance(batch, IndexDocumentsBatch) - assert all(action.action_type == CRUD_METHOD_MAP[method_name] for action in batch.actions) + assert all( + action.action_type == CRUD_METHOD_MAP[method_name] + for action in batch.actions + ) assert [action.additional_properties for action in batch.actions] == arg assert mock_index_documents.call_args[1]["headers"] == client._headers assert mock_index_documents.call_args[1]["extra"] == "foo" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.index") + @mock.patch( + "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.index" + ) def test_index_documents(self, mock_index): client = SearchClient("endpoint", "index name", CREDENTIAL) diff --git a/sdk/search/azure-search-documents/tests/test_search_client_basic_live.py b/sdk/search/azure-search-documents/tests/test_search_client_basic_live.py index 5c1500615dee..644b21adc88e 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_basic_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_basic_live.py @@ -17,14 +17,18 @@ class TestSearchClient(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy def test_get_document_count(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(), retry_backoff_factor=60 + ) assert client.get_document_count() == 10 @SearchEnvVarPreparer() @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy def test_get_document(self, endpoint, index_name, index_batch): - client = SearchClient(endpoint, index_name, get_credential(), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(), retry_backoff_factor=60 + ) for hotel_id in range(1, 11): result = client.get_document(key=str(hotel_id)) expected = index_batch["value"][hotel_id - 1] @@ -36,6 +40,8 @@ def test_get_document(self, endpoint, index_name, index_batch): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy def test_get_document_missing(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(), retry_backoff_factor=60 + ) with pytest.raises(HttpResponseError): client.get_document(key="1000") diff --git a/sdk/search/azure-search-documents/tests/test_search_client_buffered_sender_live.py b/sdk/search/azure-search-documents/tests/test_search_client_buffered_sender_live.py index 30d6f221bfd1..1886a1654bb8 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_buffered_sender_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_buffered_sender_live.py @@ -20,17 +20,33 @@ class TestSearchIndexingBufferedSender(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy def test_search_client_index_buffered_sender(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(), retry_backoff_factor=60) - batch_client = SearchIndexingBufferedSender(endpoint, index_name, get_credential(), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(), retry_backoff_factor=60 + ) + batch_client = SearchIndexingBufferedSender( + endpoint, index_name, get_credential(), retry_backoff_factor=60 + ) try: doc_count = 10 doc_count = self._test_upload_documents_new(client, batch_client, doc_count) - doc_count = self._test_upload_documents_existing(client, batch_client, doc_count) - doc_count = self._test_delete_documents_existing(client, batch_client, doc_count) - doc_count = self._test_delete_documents_missing(client, batch_client, doc_count) - doc_count = self._test_merge_documents_existing(client, batch_client, doc_count) - doc_count = self._test_merge_documents_missing(client, batch_client, doc_count) - doc_count = self._test_merge_or_upload_documents(client, batch_client, doc_count) + doc_count = self._test_upload_documents_existing( + client, batch_client, doc_count + ) + doc_count = self._test_delete_documents_existing( + client, batch_client, doc_count + ) + doc_count = self._test_delete_documents_missing( + client, batch_client, doc_count + ) + doc_count = self._test_merge_documents_existing( + client, batch_client, doc_count + ) + doc_count = self._test_merge_documents_missing( + client, batch_client, doc_count + ) + doc_count = self._test_merge_or_upload_documents( + client, batch_client, doc_count + ) finally: batch_client.close() diff --git a/sdk/search/azure-search-documents/tests/test_search_client_index_document_live.py b/sdk/search/azure-search-documents/tests/test_search_client_index_document_live.py index 52219a0819a0..d01ea18eac77 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_index_document_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_index_document_live.py @@ -19,7 +19,9 @@ class TestSearchClientIndexDocument(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy def test_search_client_index_document(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(), retry_backoff_factor=60 + ) doc_count = 10 doc_count = self._test_upload_documents_new(client, doc_count) doc_count = self._test_upload_documents_existing(client, doc_count) diff --git a/sdk/search/azure-search-documents/tests/test_search_client_search_live.py b/sdk/search/azure-search-documents/tests/test_search_client_search_live.py index af86da2542c6..921fab81d82e 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_search_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_search_live.py @@ -4,6 +4,8 @@ # license information. # -------------------------------------------------------------------------- +import math + from azure.search.documents import SearchClient from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential @@ -15,7 +17,9 @@ class TestSearchClient(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy def test_search_client(self, endpoint, index_name): - client = SearchClient(endpoint, index_name, get_credential(), retry_backoff_factor=60) + client = SearchClient( + endpoint, index_name, get_credential(), retry_backoff_factor=60 + ) self._test_get_search_simple(client) self._test_get_search_simple_with_top(client) self._test_get_search_filter(client) @@ -24,6 +28,7 @@ def test_search_client(self, endpoint, index_name): self._test_get_search_coverage(client) self._test_get_search_facets_none(client) self._test_get_search_facets_result(client) + self._test_get_search_facet_metrics(client) self._test_autocomplete(client) self._test_suggest(client) @@ -51,7 +56,9 @@ def _test_get_search_filter(self, client): order_by="hotelName desc", ) ) - assert [x["hotelName"] for x in results] == sorted([x["hotelName"] for x in results], reverse=True) + assert [x["hotelName"] for x in results] == sorted( + [x["hotelName"] for x in results], reverse=True + ) expected = { "category", "hotelName", @@ -76,7 +83,9 @@ def _test_get_search_filter_array(self, client): order_by="hotelName desc", ) ) - assert [x["hotelName"] for x in results] == sorted([x["hotelName"] for x in results], reverse=True) + assert [x["hotelName"] for x in results] == sorted( + [x["hotelName"] for x in results], reverse=True + ) expected = { "category", "hotelName", @@ -114,7 +123,9 @@ def _test_get_search_facets_none(self, client): def _test_get_search_facets_result(self, client): select = ("hotelName", "category", "description") - results = client.search(search_text="WiFi", facets=["category"], select=",".join(select)) + results = client.search( + search_text="WiFi", facets=["category"], select=",".join(select) + ) assert results.get_facets() == { "category": [ {"value": "Budget", "count": 4}, @@ -122,6 +133,45 @@ def _test_get_search_facets_result(self, client): ] } + def _test_get_search_facet_metrics(self, client): + facets = [ + "rooms/baseRate,metric:sum", + "rooms/baseRate,metric:avg", + "rooms/baseRate,metric:min", + "rooms/baseRate,metric:max,default:0", + "rooms/sleepsCount,metric:cardinality,precisionThreshold:10", + ] + results = client.search(search_text="*", facets=facets) + + facet_payload = results.get_facets() + assert facet_payload is not None + + base_rate_metrics = facet_payload.get("rooms/baseRate", []) + assert len(base_rate_metrics) == 4 + + observed_metrics = {} + for bucket in base_rate_metrics: + for metric in ("sum", "avg", "min", "max"): + value = bucket.get(metric) + if value is not None: + observed_metrics[metric] = value + + expected_metrics = { + "sum": 27.91, + "avg": 6.9775, + "min": 2.44, + "max": 9.69, + } + for metric, expected in expected_metrics.items(): + assert metric in observed_metrics + assert math.isclose( + observed_metrics[metric], expected, rel_tol=0.0, abs_tol=0.001 + ) + + sleeps_metrics = facet_payload.get("rooms/sleepsCount", []) + assert len(sleeps_metrics) == 1 + assert sleeps_metrics[0].get("cardinality") == 1 + def _test_autocomplete(self, client): results = client.autocomplete(search_text="mot", suggester_name="sg") assert results == [{"text": "motel", "query_plus_text": "motel"}] diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client.py b/sdk/search/azure-search-documents/tests/test_search_index_client.py index 73973e36d3f1..cdb23a9c32ff 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client.py @@ -46,7 +46,9 @@ def test_get_search_client(self): def test_get_search_client_inherit_api_version(self): credential = AzureKeyCredential(key="old_api_key") - client = SearchIndexClient("endpoint", credential, api_version=ApiVersion.V2020_06_30) + client = SearchIndexClient( + "endpoint", credential, api_version=ApiVersion.V2020_06_30 + ) search_client = client.get_search_client("index") assert isinstance(search_client, SearchClient) assert search_client._api_version == ApiVersion.V2020_06_30 @@ -65,7 +67,9 @@ def test_get_service_statistics(self, mock_get_stats): "azure.search.documents.indexes._generated.operations._search_service_client_operations._SearchServiceClientOperationsMixin.get_service_statistics" ) def test_get_service_statistics_v2020_06_30(self, mock_get_stats): - client = SearchIndexClient("endpoint", CREDENTIAL, api_version=ApiVersion.V2020_06_30) + client = SearchIndexClient( + "endpoint", CREDENTIAL, api_version=ApiVersion.V2020_06_30 + ) client.get_service_statistics() assert mock_get_stats.called assert mock_get_stats.call_args[0] == () @@ -127,4 +131,6 @@ def test_datasource_with_empty_connection_string(self): name="test", type="azureblob", connection_string="", container=container ) packed_data_source_connection = data_source_connection._to_generated() - assert packed_data_source_connection.credentials.connection_string == "" + assert ( + packed_data_source_connection.credentials.connection_string == "" + ) diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py index 0e91ad5774fa..ad5e1f227d9c 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py @@ -39,7 +39,9 @@ def test_alias(self, endpoint): # point an old alias to a new index new_index_name = "hotel" - self._test_update_alias_to_new_index(client, aliases[1], new_index_name, index_name) + self._test_update_alias_to_new_index( + client, aliases[1], new_index_name, index_name + ) self._test_get_alias(client, aliases) diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_data_source_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_data_source_live.py index f60bf1cb39eb..260dbbfea37a 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_data_source_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_data_source_live.py @@ -31,7 +31,9 @@ def _create_data_source_connection(self, cs, name): @recorded_by_proxy def test_data_source(self, endpoint, **kwargs): storage_cs = kwargs.get("search_storage_connection_string") - client = SearchIndexerClient(endpoint, get_credential(), retry_backoff_factor=60) + client = SearchIndexerClient( + endpoint, get_credential(), retry_backoff_factor=60 + ) self._test_create_datasource(client, storage_cs) self._test_delete_datasource(client, storage_cs) self._test_get_datasource(client, storage_cs) @@ -43,14 +45,18 @@ def test_data_source(self, endpoint, **kwargs): def _test_create_datasource(self, client, storage_cs): ds_name = "create" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) result = client.create_data_source_connection(data_source_connection) assert result.name == ds_name assert result.type == "azureblob" def _test_delete_datasource(self, client, storage_cs): ds_name = "delete" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) client.create_data_source_connection(data_source_connection) expected_count = len(client.get_data_source_connections()) - 1 client.delete_data_source_connection(ds_name) @@ -58,23 +64,33 @@ def _test_delete_datasource(self, client, storage_cs): def _test_get_datasource(self, client, storage_cs): ds_name = "get" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) client.create_data_source_connection(data_source_connection) result = client.get_data_source_connection(ds_name) assert result.name == ds_name def _test_list_datasources(self, client, storage_cs): - data_source_connection1 = self._create_data_source_connection(storage_cs, "list") - data_source_connection2 = self._create_data_source_connection(storage_cs, "list2") + data_source_connection1 = self._create_data_source_connection( + storage_cs, "list" + ) + data_source_connection2 = self._create_data_source_connection( + storage_cs, "list2" + ) client.create_data_source_connection(data_source_connection1) client.create_data_source_connection(data_source_connection2) result = client.get_data_source_connections() assert isinstance(result, list) - assert set(x.name for x in result).intersection(set(["list", "list2"])) == set(["list", "list2"]) + assert set(x.name for x in result).intersection(set(["list", "list2"])) == set( + ["list", "list2"] + ) def _test_create_or_update_datasource(self, client, storage_cs): ds_name = "cou" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) client.create_data_source_connection(data_source_connection) expected_count = len(client.get_data_source_connections()) data_source_connection.description = "updated" @@ -86,7 +102,9 @@ def _test_create_or_update_datasource(self, client, storage_cs): def _test_create_or_update_datasource_if_unchanged(self, client, storage_cs): ds_name = "couunch" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) created = client.create_data_source_connection(data_source_connection) etag = created.e_tag @@ -95,7 +113,9 @@ def _test_create_or_update_datasource_if_unchanged(self, client, storage_cs): client.create_or_update_data_source_connection(data_source_connection) # prepare data source connection - data_source_connection.e_tag = etag # reset to the original data source connection + data_source_connection.e_tag = ( + etag # reset to the original data source connection + ) data_source_connection.description = "changed" with pytest.raises(HttpResponseError): client.create_or_update_data_source_connection( @@ -104,7 +124,9 @@ def _test_create_or_update_datasource_if_unchanged(self, client, storage_cs): def _test_delete_datasource_if_unchanged(self, client, storage_cs): ds_name = "delunch" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) created = client.create_data_source_connection(data_source_connection) etag = created.e_tag @@ -113,13 +135,19 @@ def _test_delete_datasource_if_unchanged(self, client, storage_cs): client.create_or_update_data_source_connection(data_source_connection) # prepare data source connection - data_source_connection.e_tag = etag # reset to the original data source connection + data_source_connection.e_tag = ( + etag # reset to the original data source connection + ) with pytest.raises(HttpResponseError): - client.delete_data_source_connection(data_source_connection, match_condition=MatchConditions.IfNotModified) + client.delete_data_source_connection( + data_source_connection, match_condition=MatchConditions.IfNotModified + ) def _test_delete_datasource_string_if_unchanged(self, client, storage_cs): ds_name = "delstrunch" - data_source_connection = self._create_data_source_connection(storage_cs, ds_name) + data_source_connection = self._create_data_source_connection( + storage_cs, ds_name + ) created = client.create_data_source_connection(data_source_connection) etag = created.e_tag @@ -128,7 +156,9 @@ def _test_delete_datasource_string_if_unchanged(self, client, storage_cs): client.create_or_update_data_source_connection(data_source_connection) # prepare data source connection - data_source_connection.e_tag = etag # reset to the original data source connection + data_source_connection.e_tag = ( + etag # reset to the original data source connection + ) with pytest.raises(ValueError): client.delete_data_source_connection( data_source_connection.name, diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_live.py index 149e4d46944c..d0ec01c47903 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_live.py @@ -4,13 +4,19 @@ # license information. # -------------------------------------------------------------------------- +from datetime import timedelta + import pytest from azure.core import MatchConditions from azure.core.exceptions import HttpResponseError from azure.search.documents.indexes.models import ( AnalyzeTextOptions, CorsOptions, + FreshnessScoringFunction, + FreshnessScoringParameters, + SearchField, SearchIndex, + ScoringFunctionAggregation, ScoringProfile, SimpleField, SearchFieldDataType, @@ -42,7 +48,7 @@ def test_search_index_client(self, endpoint, index_name): def _test_get_service_statistics(self, client): result = client.get_service_statistics() assert isinstance(result, dict) - assert set(result.keys()) == {"counters", "limits"} + assert set(result.keys()) == {"counters", "indexers_runtime", "limits"} def _test_list_indexes_empty(self, client): result = client.list_indexes() @@ -146,10 +152,14 @@ def _test_create_or_update_indexes_if_unchanged(self, client): index.e_tag = etag with pytest.raises(HttpResponseError): - client.create_or_update_index(index, match_condition=MatchConditions.IfNotModified) + client.create_or_update_index( + index, match_condition=MatchConditions.IfNotModified + ) def _test_analyze_text(self, client, index_name): - analyze_request = AnalyzeTextOptions(text="One's ", analyzer_name="standard.lucene") + analyze_request = AnalyzeTextOptions( + text="One's ", analyzer_name="standard.lucene" + ) result = client.analyze_text(index_name, analyze_request) assert len(result.tokens) == 2 @@ -183,3 +193,116 @@ def _test_delete_indexes_if_unchanged(self, client): def _test_delete_indexes(self, client): for index in client.list_indexes(): client.delete_index(index) + + @SearchEnvVarPreparer() + @recorded_by_proxy + def test_purview_enabled_index(self, search_service_endpoint, search_service_name): + del search_service_name # unused + endpoint = search_service_endpoint + client = SearchIndexClient(endpoint, get_credential(), retry_backoff_factor=60) + + index_name = self.get_resource_name("purview-index") + fields = [ + SearchField( + name="id", + type=SearchFieldDataType.String, + key=True, + filterable=True, + sortable=True, + ), + SearchField( + name="sensitivityLabel", + type=SearchFieldDataType.String, + filterable=True, + sensitivity_label=True, + ), + ] + index = SearchIndex(name=index_name, fields=fields, purview_enabled=True) + + created = client.create_index(index) + try: + assert created.purview_enabled is True + for field in created.fields: + if field.name == "sensitivityLabel": + assert field.sensitivity_label is True + break + else: + raise AssertionError("Expected sensitivityLabel field to be present") + + fetched = client.get_index(index_name) + assert fetched.purview_enabled is True + for field in fetched.fields: + if field.name == "sensitivityLabel": + assert field.sensitivity_label is True + break + else: + raise AssertionError("Expected sensitivityLabel field to be present") + finally: + try: + client.delete_index(index_name) + except HttpResponseError: + pass + + @SearchEnvVarPreparer() + @recorded_by_proxy + def test_scoring_profile_product_aggregation( + self, search_service_endpoint, search_service_name + ): + del search_service_name # unused + endpoint = search_service_endpoint + client = SearchIndexClient(endpoint, get_credential(), retry_backoff_factor=60) + + index_name = self.get_resource_name("agg-product") + fields = [ + SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True), + SimpleField( + name="lastUpdated", + type=SearchFieldDataType.DateTimeOffset, + filterable=True, + ), + ] + scoring_profile = ScoringProfile( + name="product-score", + function_aggregation=ScoringFunctionAggregation.PRODUCT, + functions=[ + FreshnessScoringFunction( + field_name="lastUpdated", + boost=2.5, + parameters=FreshnessScoringParameters( + boosting_duration=timedelta(days=7) + ), + ) + ], + ) + index = SearchIndex( + name=index_name, fields=fields, scoring_profiles=[scoring_profile] + ) + + created = client.create_index(index) + try: + assert ( + created.scoring_profiles[0].function_aggregation + == ScoringFunctionAggregation.PRODUCT + ) + + fetched = client.get_index(index_name) + assert ( + fetched.scoring_profiles[0].function_aggregation + == ScoringFunctionAggregation.PRODUCT + ) + + fetched.scoring_profiles[0].function_aggregation = ( + ScoringFunctionAggregation.SUM + ) + client.create_or_update_index(index=fetched) + + updated = client.get_index(index_name) + assert ( + updated.scoring_profiles[0].function_aggregation + == ScoringFunctionAggregation.SUM + ) + finally: + try: + client.delete_index(index_name) + except HttpResponseError: + pass diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py index 284ce51594e5..f82b72749050 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py @@ -29,7 +29,9 @@ class TestSearchSkillset(AzureRecordedTestCase): @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy def test_skillset_crud(self, endpoint): - client = SearchIndexerClient(endpoint, get_credential(), retry_backoff_factor=60) + client = SearchIndexerClient( + endpoint, get_credential(), retry_backoff_factor=60 + ) self._test_create_skillset_validation() self._test_create_skillset(client) self._test_get_skillset(client) @@ -43,32 +45,50 @@ def test_skillset_crud(self, endpoint): def _test_create_skillset_validation(self): name = "test-ss-validation" with pytest.raises(ValueError) as err: - client = SearchIndexerClient("fake_endpoint", AzureKeyCredential("fake_key")) + client = SearchIndexerClient( + "fake_endpoint", AzureKeyCredential("fake_key") + ) s1 = EntityRecognitionSkill( - inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizationsS1")], + inputs=[ + InputFieldMappingEntry(name="text", source="/document/content") + ], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizationsS1" + ) + ], description="Skill Version 1", model_version="1", include_typeless_entities=True, ) s2 = EntityRecognitionSkill( - inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizationsS2")], + inputs=[ + InputFieldMappingEntry(name="text", source="/document/content") + ], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizationsS2" + ) + ], skill_version=EntityRecognitionSkillVersion.LATEST, description="Skill Version 3", model_version="3", include_typeless_entities=True, ) s3 = SentimentSkill( - inputs=[InputFieldMappingEntry(name="text", source="/document/content")], + inputs=[ + InputFieldMappingEntry(name="text", source="/document/content") + ], outputs=[OutputFieldMappingEntry(name="score", target_name="scoreS3")], skill_version=SentimentSkillVersion.V1, description="Sentiment V1", include_opinion_mining=True, ) - skillset = SearchIndexerSkillset(name=name, skills=list([s1, s2, s3]), description="desc") + skillset = SearchIndexerSkillset( + name=name, skills=list([s1, s2, s3]), description="desc" + ) client.create_skillset(skillset) assert "include_typeless_entities" in str(err.value) assert "model_version" in str(err.value) @@ -79,7 +99,11 @@ def _test_create_skillset(self, client): s1 = EntityRecognitionSkill( name="skill1", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizationsS1")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizationsS1" + ) + ], description="Skill Version 1", include_typeless_entities=True, ) @@ -87,7 +111,11 @@ def _test_create_skillset(self, client): s2 = EntityRecognitionSkill( name="skill2", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizationsS2")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizationsS2" + ) + ], skill_version=EntityRecognitionSkillVersion.LATEST, description="Skill Version 3", model_version="3", @@ -103,7 +131,9 @@ def _test_create_skillset(self, client): s4 = SentimentSkill( name="skill4", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="confidenceScores", target_name="scoreS4")], + outputs=[ + OutputFieldMappingEntry(name="confidenceScores", target_name="scoreS4") + ], skill_version=SentimentSkillVersion.V3, description="Sentiment V3", include_opinion_mining=True, @@ -112,11 +142,15 @@ def _test_create_skillset(self, client): s5 = EntityLinkingSkill( name="skill5", inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="entities", target_name="entitiesS5")], + outputs=[ + OutputFieldMappingEntry(name="entities", target_name="entitiesS5") + ], minimum_precision=0.5, ) - skillset = SearchIndexerSkillset(name=name, skills=list([s1, s2, s3, s4, s5]), description="desc") + skillset = SearchIndexerSkillset( + name=name, skills=list([s1, s2, s3, s4, s5]), description="desc" + ) dict_skills = [skill.as_dict() for skill in skillset.skills] skillset.skills = dict_skills @@ -146,9 +180,15 @@ def _test_get_skillset(self, client): name = "test-ss-get" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], + ) + skillset = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc" ) - skillset = SearchIndexerSkillset(name=name, skills=list([s]), description="desc") client.create_skillset(skillset) result = client.get_skillset(name) assert isinstance(result, SearchIndexerSkillset) @@ -163,29 +203,47 @@ def _test_get_skillsets(self, client): name2 = "test-ss-list-2" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name1, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name1, skills=list([s]), description="desc1" + ) client.create_skillset(skillset1) - skillset2 = SearchIndexerSkillset(name=name2, skills=list([s]), description="desc2") + skillset2 = SearchIndexerSkillset( + name=name2, skills=list([s]), description="desc2" + ) client.create_skillset(skillset2) result = client.get_skillsets() assert isinstance(result, list) assert all(isinstance(x, SearchIndexerSkillset) for x in result) - assert set(x.name for x in result).intersection([name1, name2]) == set([name1, name2]) + assert set(x.name for x in result).intersection([name1, name2]) == set( + [name1, name2] + ) def _test_create_or_update_skillset(self, client): name = "test-ss-create-or-update" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc1" + ) client.create_or_update_skillset(skillset1) expected_count = len(client.get_skillsets()) - skillset2 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc2") + skillset2 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc2" + ) client.create_or_update_skillset(skillset2) assert len(client.get_skillsets()) == expected_count @@ -198,13 +256,21 @@ def _test_create_or_update_skillset_inplace(self, client): name = "test-ss-create-or-update-inplace" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc1" + ) ss = client.create_or_update_skillset(skillset1) expected_count = len(client.get_skillsets()) - skillset2 = SearchIndexerSkillset(name=name, skills=[s], description="desc2", skillset=ss) + skillset2 = SearchIndexerSkillset( + name=name, skills=[s], description="desc2", skillset=ss + ) client.create_or_update_skillset(skillset2) assert len(client.get_skillsets()) == expected_count @@ -217,35 +283,53 @@ def _test_create_or_update_skillset_if_unchanged(self, client): name = "test-ss-create-or-update-unchanged" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset1 = SearchIndexerSkillset(name=name, skills=list([s]), description="desc1") + skillset1 = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc1" + ) ss = client.create_or_update_skillset(skillset1) ss.e_tag = "changed_etag" with pytest.raises(HttpResponseError): - client.create_or_update_skillset(ss, match_condition=MatchConditions.IfNotModified) + client.create_or_update_skillset( + ss, match_condition=MatchConditions.IfNotModified + ) def _test_delete_skillset_if_unchanged(self, client): name = "test-ss-deleted-unchanged" s = EntityRecognitionSkill( inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")], + outputs=[ + OutputFieldMappingEntry( + name="organizations", target_name="organizations" + ) + ], ) - skillset = SearchIndexerSkillset(name=name, skills=list([s]), description="desc") + skillset = SearchIndexerSkillset( + name=name, skills=list([s]), description="desc" + ) result = client.create_skillset(skillset) etag = result.e_tag - skillset = SearchIndexerSkillset(name=name, skills=list([s]), description="updated") + skillset = SearchIndexerSkillset( + name=name, skills=list([s]), description="updated" + ) updated = client.create_or_update_skillset(skillset) updated.e_tag = etag with pytest.raises(HttpResponseError): - client.delete_skillset(updated, match_condition=MatchConditions.IfNotModified) + client.delete_skillset( + updated, match_condition=MatchConditions.IfNotModified + ) def _test_delete_skillset(self, client): for skillset in client.get_skillset_names(): diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_synonym_map_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_synonym_map_live.py index 42a4154b0f45..b53e85a6b41c 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_synonym_map_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_synonym_map_live.py @@ -77,7 +77,9 @@ def _test_delete_synonym_map_if_unchanged(self, client): result.e_tag = etag with pytest.raises(HttpResponseError): - client.delete_synonym_map(result, match_condition=MatchConditions.IfNotModified) + client.delete_synonym_map( + result, match_condition=MatchConditions.IfNotModified + ) client.delete_synonym_map(name) def _test_get_synonym_map(self, client): diff --git a/sdk/search/azure-search-documents/tests/test_search_indexer_client_live.py b/sdk/search/azure-search-documents/tests/test_search_indexer_client_live.py index 5dc6daacdab2..cd6dfb314b95 100644 --- a/sdk/search/azure-search-documents/tests/test_search_indexer_client_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_indexer_client_live.py @@ -28,18 +28,28 @@ class TestSearchIndexerClientTest(AzureRecordedTestCase): def test_search_indexers(self, endpoint, **kwargs): storage_cs = kwargs.get("search_storage_connection_string") container_name = kwargs.get("search_storage_container_name") - client = SearchIndexerClient(endpoint, get_credential(), retry_backoff_factor=60) - index_client = SearchIndexClient(endpoint, get_credential(), retry_backoff_factor=60) + client = SearchIndexerClient( + endpoint, get_credential(), retry_backoff_factor=60 + ) + index_client = SearchIndexClient( + endpoint, get_credential(), retry_backoff_factor=60 + ) self._test_create_indexer(client, index_client, storage_cs, container_name) self._test_delete_indexer(client, index_client, storage_cs, container_name) self._test_get_indexer(client, index_client, storage_cs, container_name) self._test_list_indexer(client, index_client, storage_cs, container_name) - self._test_create_or_update_indexer(client, index_client, storage_cs, container_name) + self._test_create_or_update_indexer( + client, index_client, storage_cs, container_name + ) self._test_reset_indexer(client, index_client, storage_cs, container_name) self._test_run_indexer(client, index_client, storage_cs, container_name) self._test_get_indexer_status(client, index_client, storage_cs, container_name) - self._test_create_or_update_indexer_if_unchanged(client, index_client, storage_cs, container_name) - self._test_delete_indexer_if_unchanged(client, index_client, storage_cs, container_name) + self._test_create_or_update_indexer_if_unchanged( + client, index_client, storage_cs, container_name + ) + self._test_delete_indexer_if_unchanged( + client, index_client, storage_cs, container_name + ) def _prepare_indexer(self, client, index_client, storage_cs, name, container_name): data_source_connection = SearchIndexerDataSourceConnection( @@ -50,14 +60,20 @@ def _prepare_indexer(self, client, index_client, storage_cs, name, container_nam ) ds = client.create_data_source_connection(data_source_connection) - fields = [{"name": "hotelId", "type": "Edm.String", "key": True, "searchable": False}] + fields = [ + {"name": "hotelId", "type": "Edm.String", "key": True, "searchable": False} + ] index = SearchIndex(name=f"{name}-hotels", fields=fields) ind = index_client.create_index(index) - return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name) + return SearchIndexer( + name=name, data_source_name=ds.name, target_index_name=ind.name + ) def _test_create_indexer(self, client, index_client, storage_cs, container_name): name = "create" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) result = client.create_indexer(indexer) assert result.name == name assert result.target_index_name == f"{name}-hotels" @@ -65,7 +81,9 @@ def _test_create_indexer(self, client, index_client, storage_cs, container_name) def _test_delete_indexer(self, client, index_client, storage_cs, container_name): name = "delete" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) client.create_indexer(indexer) expected = len(client.get_indexers()) - 1 client.delete_indexer(name) @@ -73,7 +91,9 @@ def _test_delete_indexer(self, client, index_client, storage_cs, container_name) def _test_get_indexer(self, client, index_client, storage_cs, container_name): name = "get" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) client.create_indexer(indexer) result = client.get_indexer(name) assert result.name == name @@ -81,17 +101,27 @@ def _test_get_indexer(self, client, index_client, storage_cs, container_name): def _test_list_indexer(self, client, index_client, storage_cs, container_name): name1 = "list1" name2 = "list2" - indexer1 = self._prepare_indexer(client, index_client, storage_cs, name1, container_name) - indexer2 = self._prepare_indexer(client, index_client, storage_cs, name2, container_name) + indexer1 = self._prepare_indexer( + client, index_client, storage_cs, name1, container_name + ) + indexer2 = self._prepare_indexer( + client, index_client, storage_cs, name2, container_name + ) client.create_indexer(indexer1) client.create_indexer(indexer2) result = client.get_indexers() assert isinstance(result, list) - assert set(x.name for x in result).intersection([name1, name2]) == set([name1, name2]) + assert set(x.name for x in result).intersection([name1, name2]) == set( + [name1, name2] + ) - def _test_create_or_update_indexer(self, client, index_client, storage_cs, container_name): + def _test_create_or_update_indexer( + self, client, index_client, storage_cs, container_name + ): name = "cou" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) client.create_indexer(indexer) expected = len(client.get_indexers()) if self.is_live: @@ -105,7 +135,9 @@ def _test_create_or_update_indexer(self, client, index_client, storage_cs, conta def _test_reset_indexer(self, client, index_client, storage_cs, container_name): name = "reset" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) client.create_indexer(indexer) if self.is_live: time.sleep(10) @@ -117,21 +149,31 @@ def _test_reset_indexer(self, client, index_client, storage_cs, container_name): def _test_run_indexer(self, client, index_client, storage_cs, container_name): name = "run" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) client.create_indexer(indexer) client.run_indexer(name) assert (client.get_indexer_status(name)).status == "running" - def _test_get_indexer_status(self, client, index_client, storage_cs, container_name): + def _test_get_indexer_status( + self, client, index_client, storage_cs, container_name + ): name = "get-status" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) client.create_indexer(indexer) status = client.get_indexer_status(name) assert status.status is not None - def _test_create_or_update_indexer_if_unchanged(self, client, index_client, storage_cs, container_name): + def _test_create_or_update_indexer_if_unchanged( + self, client, index_client, storage_cs, container_name + ): name = "couunch" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) created = client.create_indexer(indexer) etag = created.e_tag if self.is_live: @@ -141,11 +183,17 @@ def _test_create_or_update_indexer_if_unchanged(self, client, index_client, stor indexer.e_tag = etag with pytest.raises(HttpResponseError): - client.create_or_update_indexer(indexer, match_condition=MatchConditions.IfNotModified) + client.create_or_update_indexer( + indexer, match_condition=MatchConditions.IfNotModified + ) - def _test_delete_indexer_if_unchanged(self, client, index_client, storage_cs, container_name): + def _test_delete_indexer_if_unchanged( + self, client, index_client, storage_cs, container_name + ): name = "delunch" - indexer = self._prepare_indexer(client, index_client, storage_cs, name, container_name) + indexer = self._prepare_indexer( + client, index_client, storage_cs, name, container_name + ) result = client.create_indexer(indexer) etag = result.e_tag if self.is_live: @@ -155,4 +203,6 @@ def _test_delete_indexer_if_unchanged(self, client, index_client, storage_cs, co indexer.e_tag = etag with pytest.raises(HttpResponseError): - client.delete_indexer(indexer, match_condition=MatchConditions.IfNotModified) + client.delete_indexer( + indexer, match_condition=MatchConditions.IfNotModified + ) diff --git a/sdk/search/azure-search-documents/tests/test_serialization.py b/sdk/search/azure-search-documents/tests/test_serialization.py index 09a5f04caf32..0d532c9c7137 100644 --- a/sdk/search/azure-search-documents/tests/test_serialization.py +++ b/sdk/search/azure-search-documents/tests/test_serialization.py @@ -26,7 +26,9 @@ def test_serialize_search_index(): fields = [ SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True), SimpleField(name="baseRate", type=SearchFieldDataType.Double), - SearchableField(name="description", type=SearchFieldDataType.String, collection=True), + SearchableField( + name="description", type=SearchFieldDataType.String, collection=True + ), SearchableField(name="hotelName", type=SearchFieldDataType.String), ComplexField( name="address", @@ -43,7 +45,10 @@ def test_serialize_search_index(): scoring_profiles = [] scoring_profiles.append(scoring_profile) index = SearchIndex( - name=new_index_name, fields=fields, scoring_profiles=scoring_profiles, cors_options=cors_options + name=new_index_name, + fields=fields, + scoring_profiles=scoring_profiles, + cors_options=cors_options, ) search_index_serialized = index.serialize() search_index = SearchIndex.deserialize(search_index_serialized) @@ -54,7 +59,9 @@ def test_serialize_search_indexer_skillset(): COGNITIVE_KEY = ... COGNITIVE_DESCRIPTION = ... - cognitive_services_account = CognitiveServicesAccountKey(key=COGNITIVE_KEY, description=COGNITIVE_DESCRIPTION) + cognitive_services_account = CognitiveServicesAccountKey( + key=COGNITIVE_KEY, description=COGNITIVE_DESCRIPTION + ) inputs = [InputFieldMappingEntry(name="text", source="/document/content")] @@ -71,7 +78,9 @@ def test_serialize_search_indexer_skillset(): skills = [split_skill] skillset = SearchIndexerSkillset( - name="Skillset", skills=skills, cognitive_services_account=cognitive_services_account + name="Skillset", + skills=skills, + cognitive_services_account=cognitive_services_account, ) serialized_skillset = skillset.serialize() @@ -84,7 +93,9 @@ def test_serialize_search_index_dict(): fields = [ SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True), SimpleField(name="baseRate", type=SearchFieldDataType.Double), - SearchableField(name="description", type=SearchFieldDataType.String, collection=True), + SearchableField( + name="description", type=SearchFieldDataType.String, collection=True + ), SearchableField(name="hotelName", type=SearchFieldDataType.String), ComplexField( name="address", @@ -101,7 +112,10 @@ def test_serialize_search_index_dict(): scoring_profiles = [] scoring_profiles.append(scoring_profile) index = SearchIndex( - name=new_index_name, fields=fields, scoring_profiles=scoring_profiles, cors_options=cors_options + name=new_index_name, + fields=fields, + scoring_profiles=scoring_profiles, + cors_options=cors_options, ) search_index_serialized_dict = index.as_dict() search_index = SearchIndex.from_dict(search_index_serialized_dict)