@@ -145,7 +145,8 @@ def execute(self,
145145 files = None ,
146146 timeout = 60.0 ,
147147 experimental = False ,
148- error_log_key = "message" ):
148+ error_log_key = "message" ,
149+ raise_return_resource_not_found = False ):
149150 """ Sends a request to the server for the execution of the
150151 given query.
151152
@@ -297,9 +298,13 @@ def get_error_status_code(error: dict) -> int:
297298 resource_not_found_error = check_errors (["RESOURCE_NOT_FOUND" ],
298299 "extensions" , "code" )
299300 if resource_not_found_error is not None :
300- # Return None and let the caller methods raise an exception
301- # as they already know which resource type and ID was requested
302- return None
301+ if raise_return_resource_not_found :
302+ raise labelbox .exceptions .ResourceNotFoundError (
303+ message = resource_not_found_error ["message" ])
304+ else :
305+ # Return None and let the caller methods raise an exception
306+ # as they already know which resource type and ID was requested
307+ return None
303308
304309 resource_conflict_error = check_errors (["RESOURCE_CONFLICT" ],
305310 "extensions" , "code" )
@@ -875,12 +880,12 @@ def create_offline_model_evaluation_project(self, **kwargs) -> Project:
875880
876881 return self ._create_project (** kwargs )
877882
878-
879- def create_prompt_response_generation_project ( self ,
880- dataset_id : Optional [str ] = None ,
881- dataset_name : Optional [str ] = None ,
882- data_row_count : int = 100 ,
883- ** kwargs ) -> Project :
883+ def create_prompt_response_generation_project (
884+ self ,
885+ dataset_id : Optional [str ] = None ,
886+ dataset_name : Optional [str ] = None ,
887+ data_row_count : int = 100 ,
888+ ** kwargs ) -> Project :
884889 """
885890 Use this method exclusively to create a prompt and response generation project.
886891
@@ -915,8 +920,7 @@ def create_prompt_response_generation_project(self,
915920
916921 if dataset_id and dataset_name :
917922 raise ValueError (
918- "Only provide a dataset_name or dataset_id, not both."
919- )
923+ "Only provide a dataset_name or dataset_id, not both." )
920924
921925 if data_row_count <= 0 :
922926 raise ValueError ("data_row_count must be a positive integer." )
@@ -928,7 +932,9 @@ def create_prompt_response_generation_project(self,
928932 append_to_existing_dataset = False
929933 dataset_name_or_id = dataset_name
930934
931- if "media_type" in kwargs and kwargs .get ("media_type" ) not in [MediaType .LLMPromptCreation , MediaType .LLMPromptResponseCreation ]:
935+ if "media_type" in kwargs and kwargs .get ("media_type" ) not in [
936+ MediaType .LLMPromptCreation , MediaType .LLMPromptResponseCreation
937+ ]:
932938 raise ValueError (
933939 "media_type must be either LLMPromptCreation or LLMPromptResponseCreation"
934940 )
@@ -949,8 +955,7 @@ def create_response_creation_project(self, **kwargs) -> Project:
949955 Returns:
950956 Project: The created project
951957 """
952- kwargs [
953- "media_type" ] = MediaType .Text # Only Text is supported
958+ kwargs ["media_type" ] = MediaType .Text # Only Text is supported
954959 kwargs [
955960 "editor_task_type" ] = EditorTaskType .ResponseCreation .value # Special editor task type for response creation projects
956961
@@ -1005,7 +1010,8 @@ def _create_project(self, **kwargs) -> Project:
10051010
10061011 if quality_modes and quality_mode :
10071012 raise ValueError (
1008- "Cannot use both quality_modes and quality_mode at the same time. Use one or the other." )
1013+ "Cannot use both quality_modes and quality_mode at the same time. Use one or the other."
1014+ )
10091015
10101016 if not quality_modes and not quality_mode :
10111017 logger .info ("Defaulting quality modes to Benchmark and Consensus." )
@@ -1021,12 +1027,11 @@ def _create_project(self, **kwargs) -> Project:
10211027 if quality_mode :
10221028 quality_modes_set = {quality_mode }
10231029
1024- if (
1025- quality_modes_set is None
1026- or len (quality_modes_set ) == 0
1027- or quality_modes_set == {QualityMode .Benchmark , QualityMode .Consensus }
1028- ):
1029- data ["auto_audit_number_of_labels" ] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS
1030+ if (quality_modes_set is None or len (quality_modes_set ) == 0 or
1031+ quality_modes_set
1032+ == {QualityMode .Benchmark , QualityMode .Consensus }):
1033+ data [
1034+ "auto_audit_number_of_labels" ] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS
10301035 data ["auto_audit_percentage" ] = CONSENSUS_AUTO_AUDIT_PERCENTAGE
10311036 data ["is_benchmark_enabled" ] = True
10321037 data ["is_consensus_enabled" ] = True
@@ -1297,10 +1302,12 @@ def create_ontology_from_feature_schemas(
12971302 f"Tool `{ tool } ` not in list of supported tools." )
12981303 elif 'type' in feature_schema .normalized :
12991304 classification = feature_schema .normalized ['type' ]
1300- if classification in Classification .Type ._value2member_map_ .keys ():
1305+ if classification in Classification .Type ._value2member_map_ .keys (
1306+ ):
13011307 Classification .Type (classification )
13021308 classifications .append (feature_schema .normalized )
1303- elif classification in PromptResponseClassification .Type ._value2member_map_ .keys ():
1309+ elif classification in PromptResponseClassification .Type ._value2member_map_ .keys (
1310+ ):
13041311 PromptResponseClassification .Type (classification )
13051312 classifications .append (feature_schema .normalized )
13061313 else :
@@ -1518,7 +1525,8 @@ def create_ontology(self,
15181525 raise get_media_type_validation_error (media_type )
15191526
15201527 if ontology_kind and OntologyKind .is_supported (ontology_kind ):
1521- media_type = OntologyKind .evaluate_ontology_kind_with_media_type (ontology_kind , media_type )
1528+ media_type = OntologyKind .evaluate_ontology_kind_with_media_type (
1529+ ontology_kind , media_type )
15221530 editor_task_type_value = EditorTaskTypeMapper .to_editor_task_type (
15231531 ontology_kind , media_type ).value
15241532 elif ontology_kind :
0 commit comments