@@ -117,7 +117,51 @@ def test_model_run_export_labels(model_run_with_model_run_data_rows):
117117 assert len (labels ) == 3
118118
119119
120- @pytest .mark .skip (reason = "feature under development" )
120+ @pytest .mark .skipif (condition = os .environ ['LABELBOX_TEST_ENVIRON' ] == "onprem" ,
121+ reason = "does not work for onprem" )
122+ def test_model_run_status (model_run_with_model_run_data_rows ):
123+
124+ def get_model_run_status ():
125+ return model_run_with_model_run_data_rows .client .execute (
126+ """query trainingPipelinePyApi($modelRunId: ID!) {
127+ trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}}
128+ """ , {'modelRunId' : model_run_with_model_run_data_rows .uid },
129+ experimental = True )['trainingPipeline' ]
130+
131+ model_run_status = get_model_run_status ()
132+ assert model_run_status ['status' ] is None
133+ assert model_run_status ['metadata' ] is None
134+ assert model_run_status ['errorMessage' ] is None
135+
136+ status = "COMPLETE"
137+ metadata = {'key1' : 'value1' }
138+ errorMessage = "an error"
139+ model_run_with_model_run_data_rows .update_status (status , metadata ,
140+ errorMessage )
141+
142+ model_run_status = get_model_run_status ()
143+ assert model_run_status ['status' ] == status
144+ assert model_run_status ['metadata' ] == metadata
145+ assert model_run_status ['errorMessage' ] == errorMessage
146+
147+ extra_metadata = {'key2' : 'value2' }
148+ model_run_with_model_run_data_rows .update_status (status , extra_metadata )
149+ model_run_status = get_model_run_status ()
150+ assert model_run_status ['status' ] == status
151+ assert model_run_status ['metadata' ] == {** metadata , ** extra_metadata }
152+ assert model_run_status ['errorMessage' ] == errorMessage
153+
154+ status = ModelRun .Status .FAILED
155+ model_run_with_model_run_data_rows .update_status (status , metadata ,
156+ errorMessage )
157+ model_run_status = get_model_run_status ()
158+ assert model_run_status ['status' ] == status .value
159+
160+ with pytest .raises (ValueError ):
161+ model_run_with_model_run_data_rows .update_status (
162+ "INVALID" , metadata , errorMessage )
163+
164+
121165def test_model_run_export_v2 (model_run_with_model_run_data_rows ,
122166 configured_project ):
123167 task_name = "test_task"
@@ -164,51 +208,6 @@ def download_result(result_url):
164208 assert prediction_id in label_ids_set
165209
166210
167- @pytest .mark .skipif (condition = os .environ ['LABELBOX_TEST_ENVIRON' ] == "onprem" ,
168- reason = "does not work for onprem" )
169- def test_model_run_status (model_run_with_model_run_data_rows ):
170-
171- def get_model_run_status ():
172- return model_run_with_model_run_data_rows .client .execute (
173- """query trainingPipelinePyApi($modelRunId: ID!) {
174- trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}}
175- """ , {'modelRunId' : model_run_with_model_run_data_rows .uid },
176- experimental = True )['trainingPipeline' ]
177-
178- model_run_status = get_model_run_status ()
179- assert model_run_status ['status' ] is None
180- assert model_run_status ['metadata' ] is None
181- assert model_run_status ['errorMessage' ] is None
182-
183- status = "COMPLETE"
184- metadata = {'key1' : 'value1' }
185- errorMessage = "an error"
186- model_run_with_model_run_data_rows .update_status (status , metadata ,
187- errorMessage )
188-
189- model_run_status = get_model_run_status ()
190- assert model_run_status ['status' ] == status
191- assert model_run_status ['metadata' ] == metadata
192- assert model_run_status ['errorMessage' ] == errorMessage
193-
194- extra_metadata = {'key2' : 'value2' }
195- model_run_with_model_run_data_rows .update_status (status , extra_metadata )
196- model_run_status = get_model_run_status ()
197- assert model_run_status ['status' ] == status
198- assert model_run_status ['metadata' ] == {** metadata , ** extra_metadata }
199- assert model_run_status ['errorMessage' ] == errorMessage
200-
201- status = ModelRun .Status .FAILED
202- model_run_with_model_run_data_rows .update_status (status , metadata ,
203- errorMessage )
204- model_run_status = get_model_run_status ()
205- assert model_run_status ['status' ] == status .value
206-
207- with pytest .raises (ValueError ):
208- model_run_with_model_run_data_rows .update_status (
209- "INVALID" , metadata , errorMessage )
210-
211-
212211def test_model_run_split_assignment (model_run , dataset , image_url ):
213212 n_data_rows = 10
214213 data_rows = dataset .create_data_rows ([{
0 commit comments