Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 12 additions & 9 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,28 +13,31 @@

# -- Project information -----------------------------------------------------

project = 'Python SDK reference'
copyright = '2025, Labelbox'
author = 'Labelbox'
release = '7.2.0'
project = "Python SDK reference"
copyright = "2025, Labelbox"
author = "Labelbox"
release = "7.2.0"

# -- General configuration ---------------------------------------------------

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'multiproject', 'sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.napoleon', 'sphinx_rtd_theme'
"multiproject",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx_rtd_theme",
]

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]

multiproject_projects = {"labelbox": {"path": "labelbox"}}

Expand All @@ -43,7 +46,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme = "sphinx_rtd_theme"

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
Expand Down
2 changes: 1 addition & 1 deletion examples/exports/composite_mask_export.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@
},
{
"metadata": {},
"source": "tools_frames_color = {}\nstream = export_task_video.get_buffered_stream()\n\n# Iterate over each output in the stream\nfor output in stream:\n output_json = output.json\n\n # Iterate over the labels in the specific project\n for dr in output_json[\"projects\"][VIDEO_PROJECT_ID][\"labels\"]:\n frames_data = dr[\"annotations\"][\"frames\"]\n\n # Iterate over each frame in the frames data\n for frame_key, frame_value in frames_data.items():\n\n # Iterate over each annotation in the frame\n for annotation_key, annotation_value in frame_value.items():\n if \"objects\" in annotation_key and annotation_value.values():\n\n # Iterate over each object in the annotation\n for object_key, object_value in annotation_value.items():\n if (object_value[\"annotation_kind\"] ==\n \"VideoSegmentationMask\"):\n # Update tools_frames_color with object information\n tools_frames_color.setdefault(\n object_value[\"name\"], []).append({\n frame_key:\n object_value[\"composite_mask\"]\n [\"color_rgb\"]\n })\n\nprint(tools_frames_color)",
"source": "tools_frames_color = {}\nstream = export_task_video.get_buffered_stream()\n\n# Iterate over each output in the stream\nfor output in stream:\n output_json = output.json\n\n # Iterate over the labels in the specific project\n for dr in output_json[\"projects\"][VIDEO_PROJECT_ID][\"labels\"]:\n frames_data = dr[\"annotations\"][\"frames\"]\n\n # Iterate over each frame in the frames data\n for frame_key, frame_value in frames_data.items():\n # Iterate over each annotation in the frame\n for annotation_key, annotation_value in frame_value.items():\n if \"objects\" in annotation_key and annotation_value.values():\n # Iterate over each object in the annotation\n for object_key, object_value in annotation_value.items():\n if (object_value[\"annotation_kind\"] ==\n \"VideoSegmentationMask\"):\n # Update tools_frames_color with object information\n tools_frames_color.setdefault(\n object_value[\"name\"], []).append({\n frame_key:\n object_value[\"composite_mask\"]\n [\"color_rgb\"]\n })\n\nprint(tools_frames_color)",
"cell_type": "code",
"outputs": [],
"execution_count": null
Expand Down
2 changes: 1 addition & 1 deletion examples/exports/exporting_to_csv.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@
},
{
"metadata": {},
"source": "GLOBAL_CSV_LIST = []\n\n\ndef main(output: lb.BufferedJsonConverterOutput):\n\n # Navigate to our label list\n labels = output.json[\"projects\"][project.uid][\"labels\"]\n for label in labels:\n # Define our CSV \"row\"\n csv_row = dict()\n\n # Start with data row base columns\n csv_row = get_base_data_row_columns(output.json, csv_row,\n data_row_base_columns)\n\n # Add our label details\n csv_row = get_base_label_columns(label, csv_row, label_base_columns)\n\n # Add classification features\n for classification in class_annotation_columns:\n csv_row[classification[\"column_name\"]] = get_feature_answers(\n classification, label[\"annotations\"][\"classifications\"])\n\n # Add tools features\n for tool in tool_annotation_columns:\n csv_row[tool[\"column_name\"]] = get_feature_answers(\n tool, label[\"annotations\"][\"objects\"])\n\n # Append to global csv list\n GLOBAL_CSV_LIST.append(csv_row)",
"source": "GLOBAL_CSV_LIST = []\n\n\ndef main(output: lb.BufferedJsonConverterOutput):\n # Navigate to our label list\n labels = output.json[\"projects\"][project.uid][\"labels\"]\n for label in labels:\n # Define our CSV \"row\"\n csv_row = dict()\n\n # Start with data row base columns\n csv_row = get_base_data_row_columns(output.json, csv_row,\n data_row_base_columns)\n\n # Add our label details\n csv_row = get_base_label_columns(label, csv_row, label_base_columns)\n\n # Add classification features\n for classification in class_annotation_columns:\n csv_row[classification[\"column_name\"]] = get_feature_answers(\n classification, label[\"annotations\"][\"classifications\"])\n\n # Add tools features\n for tool in tool_annotation_columns:\n csv_row[tool[\"column_name\"]] = get_feature_answers(\n tool, label[\"annotations\"][\"objects\"])\n\n # Append to global csv list\n GLOBAL_CSV_LIST.append(csv_row)",
"cell_type": "code",
"outputs": [],
"execution_count": null
Expand Down
2 changes: 1 addition & 1 deletion examples/integrations/yolo/import_yolov8_annotations.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@
},
{
"metadata": {},
"source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)",
"source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)",
"cell_type": "code",
"outputs": [],
"execution_count": null
Expand Down
2 changes: 1 addition & 1 deletion examples/scripts/generate_readme.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def make_table(base: str) -> str:
)
)
df = pandas.DataFrame(pandas_dict)
generated_markdown += f"{df.to_html(col_space={'Notebook':400}, index=False, escape=False, justify='left')}\n\n"
generated_markdown += f"{df.to_html(col_space={'Notebook': 400}, index=False, escape=False, justify='left')}\n\n"
return f"{generated_markdown.rstrip()}\n"


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def _get_metric_name(
if _is_classification(ground_truths, predictions):
return "classification"

return f"{int(iou*100)}pct_iou"
return f"{int(iou * 100)}pct_iou"


def _is_classification(
Expand Down
3 changes: 1 addition & 2 deletions libs/labelbox/src/labelbox/orm/db_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,7 @@ def _set_field_values(self, field_values):
value = value.replace(tzinfo=timezone.utc)
except ValueError:
logger.warning(
"Failed to convert value '%s' to datetime for "
"field %s",
"Failed to convert value '%s' to datetime for field %s",
value,
field,
)
Expand Down
1 change: 1 addition & 0 deletions libs/labelbox/src/labelbox/schema/role.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def get_roles(client: "Client") -> Dict[str, "Role"]:


def format_role(name: str):
# Convert to uppercase and replace spaces with underscores
return name.upper().replace(" ", "_")


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ def _handle_feature_consensus_average(
if isinstance(annotations[0], str):
# Simple ID list - convert to full format (placeholder names)
annotation_objects = [
{"name": f"Feature {i+1}", "schemaNodeId": ann_id}
{"name": f"Feature {i + 1}", "schemaNodeId": ann_id}
for i, ann_id in enumerate(annotations)
]
else:
Expand Down
10 changes: 5 additions & 5 deletions libs/labelbox/src/labelbox/schema/workflow/filter_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,27 +84,27 @@ def build_metadata_items(
"""
if item_type == "user":
return [
{key_field: item_id, "email": f"user{i+1}@example.com"}
{key_field: item_id, "email": f"user{i + 1}@example.com"}
for i, item_id in enumerate(ids)
]
elif item_type == "dataset":
return [
{key_field: item_id, "name": f"Dataset {i+1}"}
{key_field: item_id, "name": f"Dataset {i + 1}"}
for i, item_id in enumerate(ids)
]
elif item_type == "annotation":
return [
{"name": f"Annotation {i+1}", "schemaNodeId": item_id}
{"name": f"Annotation {i + 1}", "schemaNodeId": item_id}
for i, item_id in enumerate(ids)
]
elif item_type == "issue":
return [
{key_field: item_id, "name": f"Issue Category {i+1}"}
{key_field: item_id, "name": f"Issue Category {i + 1}"}
for i, item_id in enumerate(ids)
]
else:
return [
{key_field: item_id, "name": f"{item_type.title()} {i+1}"}
{key_field: item_id, "name": f"{item_type.title()} {i + 1}"}
for i, item_id in enumerate(ids)
]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -402,5 +402,5 @@ def print_filters(workflow: "ProjectWorkflow") -> None:
if isinstance(node, LogicNode):
logger.info(f"Filters for node {node.id} ({node.name}):")
for i, f in enumerate(node.get_parsed_filters()):
logger.info(f" Filter {i+1}:")
logger.info(f" Filter {i + 1}:")
logger.info(f" {json.dumps(f, indent=2)}")
12 changes: 6 additions & 6 deletions libs/labelbox/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -688,12 +688,12 @@ def create_label():
predictions,
)
upload_task.wait_until_done(sleep_time_seconds=5)
assert (
upload_task.state == AnnotationImportState.FINISHED
), "Label Import did not finish"
assert (
len(upload_task.errors) == 0
), f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
assert upload_task.state == AnnotationImportState.FINISHED, (
"Label Import did not finish"
)
assert len(upload_task.errors) == 0, (
f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
)

project.create_label = create_label
project.create_label()
Expand Down
24 changes: 12 additions & 12 deletions libs/labelbox/tests/data/annotation_import/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1929,12 +1929,12 @@ def model_run_with_data_rows(
model_run_predictions,
)
upload_task.wait_until_done()
assert (
upload_task.state == AnnotationImportState.FINISHED
), "Label Import did not finish"
assert (
len(upload_task.errors) == 0
), f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
assert upload_task.state == AnnotationImportState.FINISHED, (
"Label Import did not finish"
)
assert len(upload_task.errors) == 0, (
f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
)
labels = wait_for_label_processing(configured_project)
label_ids = [label.uid for label in labels]
model_run.upsert_labels(label_ids)
Expand Down Expand Up @@ -1963,12 +1963,12 @@ def model_run_with_all_project_labels(
model_run_predictions,
)
upload_task.wait_until_done()
assert (
upload_task.state == AnnotationImportState.FINISHED
), "Label Import did not finish"
assert (
len(upload_task.errors) == 0
), f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
assert upload_task.state == AnnotationImportState.FINISHED, (
"Label Import did not finish"
)
assert len(upload_task.errors) == 0, (
f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
)
labels = wait_for_label_processing(configured_project)
label_ids = [label.uid for label in labels]
model_run.upsert_labels(label_ids)
Expand Down
12 changes: 6 additions & 6 deletions libs/labelbox/tests/data/export/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,12 +462,12 @@ def model_run_with_data_rows(
model_run_predictions,
)
upload_task.wait_until_done()
assert (
upload_task.state == AnnotationImportState.FINISHED
), "Label Import did not finish"
assert (
len(upload_task.errors) == 0
), f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
assert upload_task.state == AnnotationImportState.FINISHED, (
"Label Import did not finish"
)
assert len(upload_task.errors) == 0, (
f"Label Import {upload_task.name} failed with errors {upload_task.errors}"
)
labels = wait_for_label_processing(configured_project_with_ontology)
label_ids = [label.uid for label in labels]
model_run.upsert_labels(label_ids)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import time

import pytest

from labelbox import DataRow, ExportTask, StreamType, Task, TaskStatus


Expand Down Expand Up @@ -136,6 +138,9 @@ def test_cancel_export_task(
cancelled_task = client.get_task_by_id(export_task.uid)
assert cancelled_task.status in ["CANCELING", "CANCELED"]

@pytest.mark.skip(
reason="Test times out in environments with high task volume - querying all org tasks is too slow"
)
def test_task_filter(self, client, data_row, wait_for_data_row_processing):
organization = client.get_organization()
user = client.get_user()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ def test_overlapping_objects(tool_examples):
).values():
for idx in range(4):
expected[idx] += expected_values[idx]
assert score[0].value == tuple(
expected
), f"{example.predictions},{example.ground_truths}"
assert score[0].value == tuple(expected), (
f"{example.predictions},{example.ground_truths}"
)


@parametrize(
Expand All @@ -59,9 +59,9 @@ def test_overlapping_classifications(tool_examples):
for expected_values in example.expected.values():
for idx in range(4):
expected[idx] += expected_values[idx]
assert score[0].value == tuple(
expected
), f"{example.predictions},{example.ground_truths}"
assert score[0].value == tuple(expected), (
f"{example.predictions},{example.ground_truths}"
)


def test_partial_overlap(pair_iou_thresholds):
Expand All @@ -70,6 +70,6 @@ def test_partial_overlap(pair_iou_thresholds):
score = confusion_matrix_metric(
example.predictions, example.ground_truths, iou=iou
)
assert score[0].value == tuple(
example.expected[iou]
), f"{example.predictions},{example.ground_truths}"
assert score[0].value == tuple(example.expected[iou]), (
f"{example.predictions},{example.ground_truths}"
)
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ def test_overlapping_objects(tool_examples):
if len(getattr(example, expected_attr_name)) == 0:
assert len(metrics) == 0
else:
assert metrics == getattr(
example, expected_attr_name
), f"{example.predictions},{example.ground_truths}"
assert metrics == getattr(example, expected_attr_name), (
f"{example.predictions},{example.ground_truths}"
)


@parametrize(
Expand All @@ -52,6 +52,6 @@ def test_overlapping_classifications(tool_examples):
if len(example.expected) == 0:
assert len(metrics) == 0
else:
assert (
metrics == example.expected
), f"{example.predictions},{example.ground_truths}"
assert metrics == example.expected, (
f"{example.predictions},{example.ground_truths}"
)
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,9 @@ def test_source_ontology_name_relationship():
type=Relationship.Type.UNIDIRECTIONAL,
),
)
assert False, "Expected ValueError for providing both source and source_ontology_name"
assert False, (
"Expected ValueError for providing both source and source_ontology_name"
)
except Exception as e:
assert (
"Value error, Only one of 'source' or 'source_ontology_name' may be provided"
Expand All @@ -419,7 +421,9 @@ def test_source_ontology_name_relationship():
type=Relationship.Type.UNIDIRECTIONAL,
),
)
assert False, "Expected ValueError for providing neither source nor source_ontology_name"
assert False, (
"Expected ValueError for providing neither source nor source_ontology_name"
)
except Exception as e:
assert (
"Value error, Either source or source_ontology_name must be provided"
Expand Down
12 changes: 6 additions & 6 deletions libs/labelbox/tests/data/test_data_row_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,9 @@ def test_large_bulk_upsert_datarow_metadata(big_dataset, mdo):
for metadata in mdo.bulk_export(data_row_ids)
}
for data_row_id in data_row_ids:
assert len(
[f for f in metadata_lookup.get(data_row_id).fields]
), metadata_lookup.get(data_row_id).fields
assert len([f for f in metadata_lookup.get(data_row_id).fields]), (
metadata_lookup.get(data_row_id).fields
)


def test_upsert_datarow_metadata_by_name(data_row, mdo):
Expand All @@ -179,9 +179,9 @@ def test_upsert_datarow_metadata_by_name(data_row, mdo):
metadata.data_row_id: metadata
for metadata in mdo.bulk_export([data_row.uid])
}
assert len(
[f for f in metadata_lookup.get(data_row.uid).fields]
), metadata_lookup.get(data_row.uid).fields
assert len([f for f in metadata_lookup.get(data_row.uid).fields]), (
metadata_lookup.get(data_row.uid).fields
)


def test_upsert_datarow_metadata_option_by_name(data_row, mdo):
Expand Down
Loading
Loading