Skip to content

Commit 28f099e

Browse files
author
Matt Sokoloff
committed
exports v2 task support
1 parent 0487dd9 commit 28f099e

File tree

2 files changed

+23
-24
lines changed

2 files changed

+23
-24
lines changed

labelbox/schema/task.py

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ class Task(DbObject):
3939
status = Field.String("status")
4040
completion_percentage = Field.Float("completion_percentage")
4141
result_url = Field.String("result_url", "result")
42+
errors_url = Field.String("errors_url", "errors")
4243
type = Field.String("type")
4344
_user: Optional["User"] = None
4445

@@ -65,7 +66,9 @@ def wait_till_done(self, timeout_seconds=300) -> None:
6566
check_frequency = 2 # frequency of checking, in seconds
6667
while True:
6768
if self.status != "IN_PROGRESS":
68-
if self.errors is not None:
69+
# self.errors fetches the error content.
70+
# This first condition prevents us from downloading the content for v2 exports
71+
if self.errors_url is not None or self.errors is not None:
6972
logger.warning(
7073
"There are errors present. Please look at `task.errors` for more details"
7174
)
@@ -83,14 +86,15 @@ def wait_till_done(self, timeout_seconds=300) -> None:
8386
def errors(self) -> Optional[Dict[str, Any]]:
8487
""" Fetch the error associated with an import task.
8588
"""
86-
# TODO: We should handle error messages for export v2 tasks in the future.
87-
if self.name != 'JSON Import':
88-
return None
89-
if self.status == "FAILED":
90-
result = self._fetch_remote_json()
91-
return result["error"]
92-
elif self.status == "COMPLETE":
93-
return self.failed_data_rows
89+
if self.name == 'JSON Import':
90+
if self.status == "FAILED":
91+
result = self._fetch_remote_json()
92+
return result["error"]
93+
elif self.status == "COMPLETE":
94+
return self.failed_data_rows
95+
elif self.type == "export-data-rows":
96+
if self.errors_url:
97+
return self._fetch_remote_json(url=self.errors_url)
9498
return None
9599

96100
@property
@@ -122,12 +126,15 @@ def failed_data_rows(self) -> Optional[Dict[str, Any]]:
122126
return None
123127

124128
@lru_cache()
125-
def _fetch_remote_json(self) -> Dict[str, Any]:
129+
def _fetch_remote_json(self, url: Optional[str] = None) -> Dict[str, Any]:
126130
""" Function for fetching and caching the result data.
127131
"""
132+
if url is None:
133+
# for backwards compatability
134+
url = self.result_url
128135

129-
def download_result():
130-
response = requests.get(self.result_url)
136+
def _download_file(url):
137+
response = requests.get(url)
131138
response.raise_for_status()
132139
try:
133140
return response.json()
@@ -145,11 +152,11 @@ def download_result():
145152
)
146153

147154
if self.status != "IN_PROGRESS":
148-
return download_result()
155+
return _download_file(url)
149156
else:
150157
self.wait_till_done(timeout_seconds=600)
151158
if self.status == "IN_PROGRESS":
152159
raise ValueError(
153160
"Job status still in `IN_PROGRESS`. The result is not available. Call task.wait_till_done() with a larger timeout or contact support."
154161
)
155-
return download_result()
162+
return _download_file(url)

tests/integration/test_project.py

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -60,17 +60,9 @@ def test_project_export_v2(configured_project_with_label):
6060
assert task.name == task_name
6161
task.wait_till_done()
6262
assert task.status == "COMPLETE"
63+
assert task.errors is None
6364

64-
def download_result(result_url):
65-
response = requests.get(result_url)
66-
response.raise_for_status()
67-
data = [json.loads(line) for line in response.text.splitlines()]
68-
return data
69-
70-
task_results = download_result(task.result_url)
71-
72-
for task_result in task_results:
73-
assert len(task_result['errors']) == 0
65+
for task_result in task.result:
7466
task_project = task_result['projects'][project.uid]
7567
task_project_label_ids_set = set(
7668
map(lambda prediction: prediction['id'], task_project['labels']))

0 commit comments

Comments
 (0)