Skip to content

Commit 0487dd9

Browse files
authored
Merge pull request #922 from Labelbox/ms/exports-v2-update
fix exports v2, enable tests
2 parents db2db12 + 3f8e7a5 commit 0487dd9

File tree

5 files changed

+52
-54
lines changed

5 files changed

+52
-54
lines changed

labelbox/schema/export_params.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ class DataRowParams(TypedDict):
1616

1717
class ProjectExportParams(DataRowParams):
1818
project_details: Optional[bool]
19-
labels: Optional[bool]
19+
label_details: Optional[bool]
2020
performance_details: Optional[bool]
2121

2222

labelbox/schema/model_run.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -487,10 +487,10 @@ def export_v2(self,
487487
# Arguments locked based on exectuion context
488488
"includeProjectDetails":
489489
False,
490-
"includeLabels":
491-
False,
492490
"includePerformanceDetails":
493491
False,
492+
"includeLabelDetails":
493+
False
494494
},
495495
}
496496
}

labelbox/schema/project.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -406,8 +406,8 @@ def export_v2(self,
406406
"metadata_fields": False,
407407
"data_row_details": False,
408408
"project_details": False,
409-
"labels": False,
410-
"performance_details": False
409+
"performance_details": False,
410+
"label_details": False
411411
})
412412

413413
mutation_name = "exportDataRowsInProject"
@@ -431,10 +431,10 @@ def export_v2(self,
431431
_params.get('data_row_details', False),
432432
"includeProjectDetails":
433433
_params.get('project_details', False),
434-
"includeLabels":
435-
_params.get('labels', False),
436434
"includePerformanceDetails":
437435
_params.get('performance_details', False),
436+
"includeLabelDetails":
437+
_params.get('label_details', False)
438438
},
439439
}
440440
}

tests/integration/annotation_import/test_model_run.py

Lines changed: 45 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,51 @@ def test_model_run_export_labels(model_run_with_model_run_data_rows):
117117
assert len(labels) == 3
118118

119119

120-
@pytest.mark.skip(reason="feature under development")
120+
@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem",
121+
reason="does not work for onprem")
122+
def test_model_run_status(model_run_with_model_run_data_rows):
123+
124+
def get_model_run_status():
125+
return model_run_with_model_run_data_rows.client.execute(
126+
"""query trainingPipelinePyApi($modelRunId: ID!) {
127+
trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}}
128+
""", {'modelRunId': model_run_with_model_run_data_rows.uid},
129+
experimental=True)['trainingPipeline']
130+
131+
model_run_status = get_model_run_status()
132+
assert model_run_status['status'] is None
133+
assert model_run_status['metadata'] is None
134+
assert model_run_status['errorMessage'] is None
135+
136+
status = "COMPLETE"
137+
metadata = {'key1': 'value1'}
138+
errorMessage = "an error"
139+
model_run_with_model_run_data_rows.update_status(status, metadata,
140+
errorMessage)
141+
142+
model_run_status = get_model_run_status()
143+
assert model_run_status['status'] == status
144+
assert model_run_status['metadata'] == metadata
145+
assert model_run_status['errorMessage'] == errorMessage
146+
147+
extra_metadata = {'key2': 'value2'}
148+
model_run_with_model_run_data_rows.update_status(status, extra_metadata)
149+
model_run_status = get_model_run_status()
150+
assert model_run_status['status'] == status
151+
assert model_run_status['metadata'] == {**metadata, **extra_metadata}
152+
assert model_run_status['errorMessage'] == errorMessage
153+
154+
status = ModelRun.Status.FAILED
155+
model_run_with_model_run_data_rows.update_status(status, metadata,
156+
errorMessage)
157+
model_run_status = get_model_run_status()
158+
assert model_run_status['status'] == status.value
159+
160+
with pytest.raises(ValueError):
161+
model_run_with_model_run_data_rows.update_status(
162+
"INVALID", metadata, errorMessage)
163+
164+
121165
def test_model_run_export_v2(model_run_with_model_run_data_rows,
122166
configured_project):
123167
task_name = "test_task"
@@ -164,51 +208,6 @@ def download_result(result_url):
164208
assert prediction_id in label_ids_set
165209

166210

167-
@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem",
168-
reason="does not work for onprem")
169-
def test_model_run_status(model_run_with_model_run_data_rows):
170-
171-
def get_model_run_status():
172-
return model_run_with_model_run_data_rows.client.execute(
173-
"""query trainingPipelinePyApi($modelRunId: ID!) {
174-
trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}}
175-
""", {'modelRunId': model_run_with_model_run_data_rows.uid},
176-
experimental=True)['trainingPipeline']
177-
178-
model_run_status = get_model_run_status()
179-
assert model_run_status['status'] is None
180-
assert model_run_status['metadata'] is None
181-
assert model_run_status['errorMessage'] is None
182-
183-
status = "COMPLETE"
184-
metadata = {'key1': 'value1'}
185-
errorMessage = "an error"
186-
model_run_with_model_run_data_rows.update_status(status, metadata,
187-
errorMessage)
188-
189-
model_run_status = get_model_run_status()
190-
assert model_run_status['status'] == status
191-
assert model_run_status['metadata'] == metadata
192-
assert model_run_status['errorMessage'] == errorMessage
193-
194-
extra_metadata = {'key2': 'value2'}
195-
model_run_with_model_run_data_rows.update_status(status, extra_metadata)
196-
model_run_status = get_model_run_status()
197-
assert model_run_status['status'] == status
198-
assert model_run_status['metadata'] == {**metadata, **extra_metadata}
199-
assert model_run_status['errorMessage'] == errorMessage
200-
201-
status = ModelRun.Status.FAILED
202-
model_run_with_model_run_data_rows.update_status(status, metadata,
203-
errorMessage)
204-
model_run_status = get_model_run_status()
205-
assert model_run_status['status'] == status.value
206-
207-
with pytest.raises(ValueError):
208-
model_run_with_model_run_data_rows.update_status(
209-
"INVALID", metadata, errorMessage)
210-
211-
212211
def test_model_run_split_assignment(model_run, dataset, image_url):
213212
n_data_rows = 10
214213
data_rows = dataset.create_data_rows([{

tests/integration/test_project.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ def test_project(client, rand_gen):
4242
assert project not in projects
4343

4444

45-
@pytest.mark.skip(reason="feature under development")
4645
def test_project_export_v2(configured_project_with_label):
4746
project, _, _, label = configured_project_with_label
4847
label_id = label.uid

0 commit comments

Comments
 (0)