Skip to content

Commit 1527dc5

Browse files
authored
[SN-120] Include examples of using composite mask in image projects (#1431)
1- Removed example on how to fetch a single instance mask using Python in the export_data notebook 2- Include examples on how to import image annotations using composite masks
1 parent 061dab2 commit 1527dc5

File tree

2 files changed

+131
-122
lines changed

2 files changed

+131
-122
lines changed

examples/annotation_import/image.ipynb

Lines changed: 131 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -85,10 +85,12 @@
8585
"metadata": {},
8686
"source": [
8787
"import uuid\n",
88+
"from PIL import Image\n",
8889
"import requests\n",
8990
"import base64\n",
9091
"import labelbox as lb\n",
91-
"import labelbox.types as lb_types"
92+
"import labelbox.types as lb_types\n",
93+
"from io import BytesIO\n"
9294
],
9395
"cell_type": "code",
9496
"outputs": [],
@@ -500,47 +502,27 @@
500502
{
501503
"metadata": {},
502504
"source": [
503-
"### Segmentation Mask"
505+
"### Composite mask upload using different mask tools from the project's ontology\n",
506+
"This example shows how to assigned different annotations (mask instances) from a composite mask using different mask tools"
504507
],
505508
"cell_type": "markdown"
506509
},
507510
{
508511
"metadata": {},
509512
"source": [
510-
"### Raster Segmentation (Byte string array)\n",
511-
"url = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/raster_seg.png\"\n",
512-
"response = requests.get(url)\n",
513+
"# First we need to extract all the unique colors from the composite mask\n",
514+
"def extract_rgb_colors_from_url(image_url):\n",
515+
" response = requests.get(image_url)\n",
516+
" img = Image.open(BytesIO(response.content))\n",
513517
"\n",
514-
"mask_data = lb.types.MaskData(im_bytes=response.content) # You can also use \"url\" instead of img_bytes to pass the PNG mask url.\n",
515-
"mask_annotation = lb_types.ObjectAnnotation(\n",
516-
" name=\"mask\",\n",
517-
" value=lb_types.Mask(\n",
518-
" mask=mask_data,\n",
519-
" color=(255, 255, 255))\n",
520-
")\n",
521-
"\n",
522-
"# NDJSON using instanceURI, or bytes array.\n",
523-
"mask_annotation_ndjson = {\n",
524-
" \"name\": \"mask\",\n",
525-
" \"classifications\": [],\n",
526-
" \"mask\": {\n",
527-
" \t\"instanceURI\": url,\n",
528-
" \t\"colorRGB\": (255, 255, 255)\n",
529-
" }\n",
530-
"}\n",
531-
"\n",
532-
"#Using bytes array.\n",
533-
"response = requests.get(url)\n",
534-
"im_bytes = base64.b64encode(response.content).decode('utf-8')\n",
518+
" colors = set()\n",
519+
" for x in range(img.width):\n",
520+
" for y in range(img.height):\n",
521+
" pixel = img.getpixel((x, y))\n",
522+
" if pixel[:3] != (0,0,0):\n",
523+
" colors.add(pixel[:3]) # Get only the RGB values\n",
535524
"\n",
536-
"mask_annotation_ndjson = {\n",
537-
" \"name\": \"mask\",\n",
538-
" \"classifications\": [],\n",
539-
" \"mask\": {\n",
540-
" \t\"imBytes\": im_bytes,\n",
541-
" \"colorRGB\": (255, 255, 255)\n",
542-
" }\n",
543-
" }"
525+
" return colors"
544526
],
545527
"cell_type": "code",
546528
"outputs": [],
@@ -549,40 +531,98 @@
549531
{
550532
"metadata": {},
551533
"source": [
552-
"### Segmentation mask with nested classification "
553-
],
554-
"cell_type": "markdown"
555-
},
556-
{
557-
"metadata": {},
558-
"source": [
559-
"url_2 = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/raster_seg_with_subclass.png\"\n",
560-
"response = requests.get(url_2)\n",
561-
"mask_data = lb_types.MaskData(im_bytes=response.content)\n",
562534
"\n",
563-
"# Python annotation\n",
564-
"mask_with_text_subclass_annotation = lb_types.ObjectAnnotation(\n",
565-
" name = \"mask_with_text_subclass\", # must match your ontology feature\"s name\n",
566-
" value=lb_types.Mask(\n",
567-
" mask=mask_data,\n",
568-
" color=(255, 255, 255)),\n",
569-
" classifications=[\n",
570-
" lb_types.ClassificationAnnotation(\n",
571-
" name=\"sub_free_text\",\n",
572-
" value=lb_types.Text(answer=\"free text answer\")\n",
573-
" )]\n",
574-
")\n",
535+
"cp_mask_url = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/composite_mask.png\"\n",
536+
"colors = extract_rgb_colors_from_url(cp_mask_url)\n",
537+
"response = requests.get(cp_mask_url)\n",
575538
"\n",
576-
"# NDJSON using instanceURI, bytes array is not fully supported.\n",
577-
"mask_with_text_subclass_ndjson = {\n",
578-
" \"name\": \"mask_with_text_subclass\",\n",
579-
" \"mask\": {\"instanceURI\": url_2,\n",
580-
" \"colorRGB\": (255, 255, 255)},\n",
581-
" \"classifications\":[{\n",
582-
" \"name\": \"sub_free_text\",\n",
583-
" \"answer\": \"free text answer\"\n",
584-
" }]\n",
585-
"}"
539+
"mask_data = lb.types.MaskData(im_bytes=response.content) # You can also use \"url\" instead of img_bytes to pass the PNG mask url.\n",
540+
"rgb_colors_for_mask_with_text_subclass_tool = [(73, 39, 85), (111, 87, 176), (23, 169, 254)]\n",
541+
"\n",
542+
"cp_mask = []\n",
543+
"for color in colors:\n",
544+
" # We are assigning the color related to the mask_with_text_subclass tool by identifying the unique RGB colors\n",
545+
" if color in rgb_colors_for_mask_with_text_subclass_tool:\n",
546+
" cp_mask.append(\n",
547+
" lb_types.ObjectAnnotation(\n",
548+
" name = \"mask_with_text_subclass\", # must match your ontology feature\"s name\n",
549+
" value=lb_types.Mask(\n",
550+
" mask=mask_data,\n",
551+
" color=color),\n",
552+
" classifications=[\n",
553+
" lb_types.ClassificationAnnotation(\n",
554+
" name=\"sub_free_text\",\n",
555+
" value=lb_types.Text(answer=\"free text answer sample\")\n",
556+
" )]\n",
557+
" )\n",
558+
" )\n",
559+
" else:\n",
560+
" # Create ObjectAnnotation for other masks\n",
561+
" cp_mask.append(\n",
562+
" lb_types.ObjectAnnotation(\n",
563+
" name=\"mask\",\n",
564+
" value=lb_types.Mask(\n",
565+
" mask=mask_data,\n",
566+
" color=color\n",
567+
" )\n",
568+
" )\n",
569+
" )\n",
570+
"\n",
571+
"\n",
572+
"# NDJSON using instanceURI, or bytes array - use one of the two options\n",
573+
"cp_mask_ndjson = []\n",
574+
"\n",
575+
"for color in colors:\n",
576+
" if color in rgb_colors_for_mask_with_text_subclass_tool:\n",
577+
" cp_mask_ndjson.append({\n",
578+
" \"name\": \"mask_with_text_subclass\",\n",
579+
" \"mask\": {\"instanceURI\": cp_mask_url,\n",
580+
" \"colorRGB\": color },\n",
581+
" \"classifications\":[{\n",
582+
" \"name\": \"sub_free_text\",\n",
583+
" \"answer\": \"free text answer\"\n",
584+
" }]\n",
585+
" }\n",
586+
" )\n",
587+
" else:\n",
588+
" cp_mask_ndjson.append({\n",
589+
" \"name\": \"mask\",\n",
590+
" \"classifications\": [],\n",
591+
" \"mask\": {\n",
592+
" \"instanceURI\": cp_mask_url,\n",
593+
" \"colorRGB\": color\n",
594+
" }\n",
595+
" }\n",
596+
" )\n",
597+
"\n",
598+
"\n",
599+
"#Using bytes array.\n",
600+
"response = requests.get(cp_mask_url)\n",
601+
"im_bytes = base64.b64encode(response.content).decode('utf-8')\n",
602+
"for color in colors:\n",
603+
" if color in rgb_colors_for_mask_with_text_subclass_tool:\n",
604+
" cp_mask_ndjson.append({\n",
605+
" \"name\": \"mask_with_text_subclass\",\n",
606+
" \"mask\": {\"instanceURI\": im_bytes,\n",
607+
" \"colorRGB\": color },\n",
608+
" \"classifications\":[{\n",
609+
" \"name\": \"sub_free_text\",\n",
610+
" \"answer\": \"free text answer\"\n",
611+
" }]\n",
612+
" }\n",
613+
" )\n",
614+
" else:\n",
615+
" cp_mask_ndjson.append({\n",
616+
" \"name\": \"mask\",\n",
617+
" \"classifications\": [],\n",
618+
" \"mask\": {\n",
619+
" \"instanceURI\": im_bytes,\n",
620+
" \"colorRGB\": color\n",
621+
" }\n",
622+
" }\n",
623+
" )\n",
624+
"\n",
625+
"\n"
586626
],
587627
"cell_type": "code",
588628
"outputs": [],
@@ -709,7 +749,7 @@
709749
"metadata": {},
710750
"source": [
711751
"# send a sample image as batch to the project\n",
712-
"global_key = \"2560px-Kitano_Street_Kobe01s5s41102.jpeg\"\n",
752+
"global_key = \"2560px-Kitano_Street_Kobe01s5s4110.jpeg\"\n",
713753
"\n",
714754
"test_img_url = {\n",
715755
" \"row_data\":\n",
@@ -722,8 +762,25 @@
722762
"task = dataset.create_data_rows([test_img_url])\n",
723763
"task.wait_till_done()\n",
724764
"\n",
765+
"print(f\"Failed data rows: {task.failed_data_rows}\")\n",
725766
"print(f\"Errors: {task.errors}\")\n",
726-
"print(f\"Failed data rows: {task.failed_data_rows}\")"
767+
"\n",
768+
"if task.errors:\n",
769+
" for error in task.errors:\n",
770+
" if 'Duplicate global key' in error['message'] and dataset.row_count == 0:\n",
771+
" # If the global key already exists in the workspace the dataset will be created empty, so we can delete it.\n",
772+
" print(f\"Deleting empty dataset: {dataset}\")\n",
773+
" dataset.delete()\n",
774+
"\n"
775+
],
776+
"cell_type": "code",
777+
"outputs": [],
778+
"execution_count": null
779+
},
780+
{
781+
"metadata": {},
782+
"source": [
783+
"print(dataset)"
727784
],
728785
"cell_type": "code",
729786
"outputs": [],
@@ -897,14 +954,13 @@
897954
" bbox_annotation,\n",
898955
" bbox_with_radio_subclass_annotation,\n",
899956
" polygon_annotation,\n",
900-
" mask_annotation,\n",
901-
" mask_with_text_subclass_annotation,\n",
902957
" point_annotation,\n",
903958
" polyline_annotation,\n",
904959
" bbox_source,\n",
905960
" bbox_target,\n",
906-
" relationship,\n",
907-
"]\n",
961+
" relationship\n",
962+
"] + cp_mask\n",
963+
"\n",
908964
"label.append(\n",
909965
" lb_types.Label(data=lb_types.ImageData(global_key=global_key),\n",
910966
" annotations=annotations))"
@@ -934,14 +990,13 @@
934990
" bbox_annotation_ndjson,\n",
935991
" bbox_with_radio_subclass_ndjson,\n",
936992
" polygon_annotation_ndjson,\n",
937-
" mask_annotation_ndjson,\n",
938-
" mask_with_text_subclass_ndjson,\n",
939993
" point_annotation_ndjson,\n",
940994
" polyline_annotation_ndjson,\n",
941995
" bbox_source_ndjson,\n",
942996
" bbox_target_ndjson,\n",
943997
" relationship_ndjson, ## Only supported for MAL imports\n",
944-
"]\n",
998+
"] + cp_mask_ndjson\n",
999+
"\n",
9451000
"for annotation in annotations:\n",
9461001
" annotation.update({\n",
9471002
" \"dataRow\": {\n",
@@ -1018,8 +1073,7 @@
10181073
{
10191074
"metadata": {},
10201075
"source": [
1021-
"# project.delete()\n",
1022-
"# dataset.delete()"
1076+
"# project.delete()"
10231077
],
10241078
"cell_type": "code",
10251079
"outputs": [],

examples/exports/export_data.ipynb

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -849,51 +849,6 @@
849849
"cell_type": "code",
850850
"outputs": [],
851851
"execution_count": null
852-
},
853-
{
854-
"metadata": {},
855-
"source": [
856-
"## How to access a `mask` URL \n",
857-
"\n",
858-
"Annotations of the kind `ImageSegmentationMask` and `VideoSegmentationMask` can only be present in labels made on image or video data rows, respectively. In order to access the mask data, you must pass your Labelbox API key stored in `client.headers` in an API request.\n",
859-
"\n",
860-
"When you grab a URL from the mask annotation in the export, the `project_id` and `feature_id` will already be in place. Here, we provide the framework for structuring a URL with any project ID and feature ID."
861-
],
862-
"cell_type": "markdown"
863-
},
864-
{
865-
"metadata": {},
866-
"source": [
867-
"# Provide a project ID and feature ID. Alternatively, replace the entire mask_url with a URL grabbed from your export.\n",
868-
"project_id = \"\"\n",
869-
"feature_id = \"\"\n",
870-
"\n",
871-
"mask_url = f\"https://api.labelbox.com/api/v1/projects/{project_id}/annotations/{feature_id}/index/1/mask\""
872-
],
873-
"cell_type": "code",
874-
"outputs": [],
875-
"execution_count": null
876-
},
877-
{
878-
"metadata": {},
879-
"source": [
880-
"# Make the API request\n",
881-
"req = urllib.request.Request(mask_url, headers=client.headers)"
882-
],
883-
"cell_type": "code",
884-
"outputs": [],
885-
"execution_count": null
886-
},
887-
{
888-
"metadata": {},
889-
"source": [
890-
"# Print the image of the mask\n",
891-
"image = Image.open(urllib.request.urlopen(req))\n",
892-
"image\n"
893-
],
894-
"cell_type": "code",
895-
"outputs": [],
896-
"execution_count": null
897852
}
898853
]
899854
}

0 commit comments

Comments
 (0)