Skip to content

Commit a85420c

Browse files
[AL-5942] Exports v2 integration tests (#1187)
1 parent ef21e2c commit a85420c

17 files changed

+898
-113
lines changed

tests/integration/annotation_import/test_mea_prediction_import.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ def test_create_from_objects_all_project_labels(
8080
def test_model_run_project_labels(model_run_with_all_project_labels,
8181
model_run_predictions):
8282
model_run = model_run_with_all_project_labels
83+
# TODO: Move to export_v2
8384
model_run_exported_labels = model_run.export_labels(download=True)
8485
labels_indexed_by_schema_id = {}
8586
for label in model_run_exported_labels:

tests/integration/annotation_import/test_model_run.py

Lines changed: 0 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -7,23 +7,6 @@
77
from labelbox import DataSplit, ModelRun
88

99

10-
def _model_run_export_v2_results(model_run, task_name, params, num_retries=5):
11-
"""Export model run results and retry if no results are returned."""
12-
while (num_retries > 0):
13-
task = model_run.export_v2(task_name, params=params)
14-
assert task.name == task_name
15-
task.wait_till_done()
16-
assert task.status == "COMPLETE"
17-
assert task.errors is None
18-
task_results = task.result
19-
if len(task_results) == 0:
20-
num_retries -= 1
21-
time.sleep(5)
22-
else:
23-
return task_results
24-
return []
25-
26-
2710
def test_model_run(client, configured_project_with_label, data_row, rand_gen):
2811
project, _, _, label = configured_project_with_label
2912
label_id = label.uid
@@ -182,38 +165,6 @@ def get_model_run_status():
182165
errorMessage)
183166

184167

185-
def test_model_run_export_v2(model_run_with_data_rows, configured_project):
186-
task_name = "test_task"
187-
media_attributes = True
188-
params = {"media_attributes": media_attributes, "predictions": True}
189-
task_results = _model_run_export_v2_results(model_run_with_data_rows,
190-
task_name, params)
191-
label_ids = [label.uid for label in configured_project.labels()]
192-
label_ids_set = set(label_ids)
193-
194-
assert len(task_results) == len(label_ids)
195-
196-
for task_result in task_results:
197-
# Check export param handling
198-
if media_attributes:
199-
assert 'media_attributes' in task_result and task_result[
200-
'media_attributes'] is not None
201-
else:
202-
assert 'media_attributes' not in task_result or task_result[
203-
'media_attributes'] is None
204-
model_run = task_result['experiments'][
205-
model_run_with_data_rows.model_id]['runs'][
206-
model_run_with_data_rows.uid]
207-
task_label_ids_set = set(
208-
map(lambda label: label['id'], model_run['labels']))
209-
task_prediction_ids_set = set(
210-
map(lambda prediction: prediction['id'], model_run['predictions']))
211-
for label_id in task_label_ids_set:
212-
assert label_id in label_ids_set
213-
for prediction_id in task_prediction_ids_set:
214-
assert prediction_id in label_ids_set
215-
216-
217168
def test_model_run_split_assignment_by_data_row_ids(model_run, dataset,
218169
image_url):
219170
n_data_rows = 10

tests/integration/conftest.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import requests
1212

1313
from labelbox import Client, MediaType
14-
from labelbox import LabelingFrontend
14+
from labelbox import LabelingFrontend, Dataset
1515
from labelbox import OntologyBuilder, Tool, Option, Classification, MediaType
1616
from labelbox.orm import query
1717
from labelbox.pagination import PaginatedCollection
@@ -22,6 +22,7 @@
2222
from labelbox.schema.user import User
2323

2424
IMG_URL = "https://picsum.photos/200/300.jpg"
25+
SMALL_DATASET_URL = "https://storage.googleapis.com/lb-artifacts-testing-public/sdk_integration_test/potato.jpeg"
2526
DATA_ROW_PROCESSING_WAIT_TIMEOUT_SECONDS = 30
2627
DATA_ROW_PROCESSING_WAIT_SLEEP_INTERNAL_SECONDS = 5
2728

@@ -247,6 +248,19 @@ def unique_dataset(client, rand_gen):
247248
dataset.delete()
248249

249250

251+
@pytest.fixture
252+
def small_dataset(dataset: Dataset):
253+
task = dataset.create_data_rows([
254+
{
255+
"row_data": SMALL_DATASET_URL,
256+
"external_id": "my-image"
257+
},
258+
] * 2)
259+
task.wait_till_done()
260+
261+
yield dataset
262+
263+
250264
@pytest.fixture
251265
def data_row(dataset, image_url, rand_gen):
252266
task = dataset.create_data_rows([

0 commit comments

Comments
 (0)