@@ -211,7 +211,7 @@ def annotations_by_data_type_v2(
211211 }
212212
213213
214- @pytest .fixture
214+ @pytest .fixture ( scope = 'session' )
215215def ontology ():
216216 bbox_tool_with_nested_text = {
217217 'required' :
@@ -479,48 +479,45 @@ def func(project):
479479
480480
481481@pytest .fixture
482- def initial_dataset (client , rand_gen ):
483- dataset = client .create_dataset (name = rand_gen (str ))
484- yield dataset
485- dataset .delete ()
486-
487-
488- @pytest .fixture
489- def hardcoded_datarow_id ():
490- data_row_id = 'ck8q9q9qj00003g5z3q1q9q9q'
482+ def configured_project_datarow_id (configured_project ):
491483
492484 def get_data_row_id (indx = 0 ):
493- return data_row_id
485+ return configured_project . data_row_ids [ indx ]
494486
495487 yield get_data_row_id
496488
497489
498490@pytest .fixture
499- def configured_project_datarow_id ( configured_project ):
491+ def configured_project_one_datarow_id ( configured_project_with_one_data_row ):
500492
501493 def get_data_row_id (indx = 0 ):
502- return configured_project .data_row_ids [indx ]
494+ return configured_project_with_one_data_row .data_row_ids [0 ]
503495
504496 yield get_data_row_id
505497
506498
507499@pytest .fixture
508- def configured_project (configured_project_without_data_rows , initial_dataset ,
509- ontology , rand_gen , image_url ):
500+ def configured_project (client , initial_dataset , ontology , rand_gen , image_url ):
510501 start_time = time .time ()
511502 dataset = initial_dataset
512- project = configured_project_without_data_rows
503+ project = client .create_project (name = rand_gen (str ),
504+ queue_mode = QueueMode .Batch )
505+ editor = list (
506+ client .get_labeling_frontends (
507+ where = LabelingFrontend .name == "editor" ))[0 ]
508+ project .setup (editor , ontology )
509+ num_rows = 0
513510
514511 data_row_ids = []
515- # print("Before creating data rows ", time.time() - start_time)
516- num_rows = 0
512+
517513 for _ in range (len (ontology ['tools' ]) + len (ontology ['classifications' ])):
518514 data_row_ids .append (dataset .create_data_row (row_data = image_url ).uid )
519515 num_rows += 1
520- # print("After creating data rows ", time.time() - start_time)
521-
522- pytest .data_row_report ['times' ] += time .time () - start_time
523- pytest .data_row_report ['num_rows' ] += num_rows
516+ project ._wait_until_data_rows_are_processed (data_row_ids = data_row_ids ,
517+ sleep_interval = 3 )
518+ if pytest .data_row_report :
519+ pytest .data_row_report ['times' ] += time .time () - start_time
520+ pytest .data_row_report ['num_rows' ] += num_rows
524521 project .create_batch (
525522 rand_gen (str ),
526523 data_row_ids , # sample of data row objects
@@ -580,15 +577,36 @@ def dataset_conversation_entity(client, rand_gen, conversation_entity_data_row,
580577
581578
582579@pytest .fixture
583- def configured_project_without_data_rows (client , ontology , rand_gen ):
580+ def configured_project_with_one_data_row (client , ontology , rand_gen ,
581+ initial_dataset , image_url ):
582+ start_time = time .time ()
583+
584584 project = client .create_project (name = rand_gen (str ),
585585 description = rand_gen (str ),
586586 queue_mode = QueueMode .Batch )
587587 editor = list (
588588 client .get_labeling_frontends (
589589 where = LabelingFrontend .name == "editor" ))[0 ]
590590 project .setup (editor , ontology )
591+
592+ data_row = initial_dataset .create_data_row (row_data = image_url )
593+ data_row_ids = [data_row .uid ]
594+ project ._wait_until_data_rows_are_processed (data_row_ids = data_row_ids ,
595+ sleep_interval = 3 )
596+
597+ if pytest .data_row_report :
598+ pytest .data_row_report ['times' ] += time .time () - start_time
599+ pytest .data_row_report ['num_rows' ] += 1
600+ batch = project .create_batch (
601+ rand_gen (str ),
602+ data_row_ids , # sample of data row objects
603+ 5 # priority between 1(Highest) - 5(lowest)
604+ )
605+ project .data_row_ids = data_row_ids
606+
591607 yield project
608+
609+ batch .delete ()
592610 project .delete ()
593611
594612
@@ -597,16 +615,20 @@ def configured_project_without_data_rows(client, ontology, rand_gen):
597615# In an example of a 'rectangle' we have extended to support multiple instances of the same tool type
598616# TODO: we will support this approach in the future for all tools
599617@pytest .fixture
600- def prediction_id_mapping (configured_project_without_data_rows , ontology ,
601- request ):
618+ def prediction_id_mapping (ontology , request ):
602619 # Maps tool types to feature schema ids
603620 if 'configured_project' in request .fixturenames :
604621 data_row_id_factory = request .getfixturevalue (
605622 'configured_project_datarow_id' )
606- project = configured_project
607- else :
623+ project = request . getfixturevalue ( ' configured_project' )
624+ elif 'hardcoded_datarow_id' in request . fixturenames :
608625 data_row_id_factory = request .getfixturevalue ('hardcoded_datarow_id' )
609- project = configured_project_without_data_rows
626+ project = request .getfixturevalue ('configured_project_with_ontology' )
627+ else :
628+ data_row_id_factory = request .getfixturevalue (
629+ 'configured_project_one_datarow_id' )
630+ project = request .getfixturevalue (
631+ 'configured_project_with_one_data_row' )
610632
611633 ontology = project .ontology ().normalized
612634
@@ -646,46 +668,6 @@ def prediction_id_mapping(configured_project_without_data_rows, ontology,
646668 return result
647669
648670
649- @pytest .fixture
650- def prediction_id_mapping_datarow_id ():
651- # Maps tool types to feature schema ids
652- data_row_id = 'ck8q9q9qj00003g5z3q1q9q9q'
653- result = {}
654-
655- for _ , tool in enumerate (ontology ['tools' ] + ontology ['classifications' ]):
656- if 'tool' in tool :
657- tool_type = tool ['tool' ]
658- else :
659- tool_type = tool [
660- 'type' ] if 'scope' not in tool else f"{ tool ['type' ]} _{ tool ['scope' ]} " # so 'checklist' of 'checklist_index'
661-
662- # TODO: remove this once we have a better way to associate multiple tools instances with a single tool type
663- if tool_type == 'rectangle' :
664- value = {
665- "uuid" : str (uuid .uuid4 ()),
666- "schemaId" : tool ['featureSchemaId' ],
667- "name" : tool ['name' ],
668- "dataRow" : {
669- "id" : data_row_id ,
670- },
671- 'tool' : tool
672- }
673- if tool_type not in result :
674- result [tool_type ] = []
675- result [tool_type ].append (value )
676- else :
677- result [tool_type ] = {
678- "uuid" : str (uuid .uuid4 ()),
679- "schemaId" : tool ['featureSchemaId' ],
680- "name" : tool ['name' ],
681- "dataRow" : {
682- "id" : data_row_id ,
683- },
684- 'tool' : tool
685- }
686- return result
687-
688-
689671@pytest .fixture
690672def polygon_inference (prediction_id_mapping ):
691673 polygon = prediction_id_mapping ['polygon' ].copy ()
@@ -1079,7 +1061,6 @@ def model_run_with_training_metadata(rand_gen, model):
10791061@pytest .fixture
10801062def model_run_with_data_rows (client , configured_project , model_run_predictions ,
10811063 model_run , wait_for_label_processing ):
1082- start_time = time .time ()
10831064 configured_project .enable_model_assisted_labeling ()
10841065
10851066 upload_task = LabelImport .create_from_objects (
@@ -1093,7 +1074,6 @@ def model_run_with_data_rows(client, configured_project, model_run_predictions,
10931074 labels = wait_for_label_processing (configured_project )
10941075 label_ids = [label .uid for label in labels ]
10951076 model_run .upsert_labels (label_ids )
1096- print (f"model_run_with_data_rows: { time .time () - start_time } " )
10971077 yield model_run
10981078 model_run .delete ()
10991079 # TODO: Delete resources when that is possible ..
0 commit comments