|
85 | 85 | "metadata": {}, |
86 | 86 | "source": [ |
87 | 87 | "import uuid\n", |
| 88 | + "from PIL import Image\n", |
88 | 89 | "import requests\n", |
89 | 90 | "import base64\n", |
90 | 91 | "import labelbox as lb\n", |
91 | | - "import labelbox.types as lb_types" |
| 92 | + "import labelbox.types as lb_types\n", |
| 93 | + "from io import BytesIO\n" |
92 | 94 | ], |
93 | 95 | "cell_type": "code", |
94 | 96 | "outputs": [], |
|
500 | 502 | { |
501 | 503 | "metadata": {}, |
502 | 504 | "source": [ |
503 | | - "### Segmentation Mask" |
| 505 | + "### Composite mask upload using different mask tools from the project's ontology\n", |
| 506 | + "This example shows how to assigned different annotations (mask instances) from a composite mask using different mask tools" |
504 | 507 | ], |
505 | 508 | "cell_type": "markdown" |
506 | 509 | }, |
507 | 510 | { |
508 | 511 | "metadata": {}, |
509 | 512 | "source": [ |
510 | | - "### Raster Segmentation (Byte string array)\n", |
511 | | - "url = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/raster_seg.png\"\n", |
512 | | - "response = requests.get(url)\n", |
| 513 | + "# First we need to extract all the unique colors from the composite mask\n", |
| 514 | + "def extract_rgb_colors_from_url(image_url):\n", |
| 515 | + " response = requests.get(image_url)\n", |
| 516 | + " img = Image.open(BytesIO(response.content))\n", |
513 | 517 | "\n", |
514 | | - "mask_data = lb.types.MaskData(im_bytes=response.content) # You can also use \"url\" instead of img_bytes to pass the PNG mask url.\n", |
515 | | - "mask_annotation = lb_types.ObjectAnnotation(\n", |
516 | | - " name=\"mask\",\n", |
517 | | - " value=lb_types.Mask(\n", |
518 | | - " mask=mask_data,\n", |
519 | | - " color=(255, 255, 255))\n", |
520 | | - ")\n", |
521 | | - "\n", |
522 | | - "# NDJSON using instanceURI, or bytes array.\n", |
523 | | - "mask_annotation_ndjson = {\n", |
524 | | - " \"name\": \"mask\",\n", |
525 | | - " \"classifications\": [],\n", |
526 | | - " \"mask\": {\n", |
527 | | - " \t\"instanceURI\": url,\n", |
528 | | - " \t\"colorRGB\": (255, 255, 255)\n", |
529 | | - " }\n", |
530 | | - "}\n", |
531 | | - "\n", |
532 | | - "#Using bytes array.\n", |
533 | | - "response = requests.get(url)\n", |
534 | | - "im_bytes = base64.b64encode(response.content).decode('utf-8')\n", |
| 518 | + " colors = set()\n", |
| 519 | + " for x in range(img.width):\n", |
| 520 | + " for y in range(img.height):\n", |
| 521 | + " pixel = img.getpixel((x, y))\n", |
| 522 | + " if pixel[:3] != (0,0,0):\n", |
| 523 | + " colors.add(pixel[:3]) # Get only the RGB values\n", |
535 | 524 | "\n", |
536 | | - "mask_annotation_ndjson = {\n", |
537 | | - " \"name\": \"mask\",\n", |
538 | | - " \"classifications\": [],\n", |
539 | | - " \"mask\": {\n", |
540 | | - " \t\"imBytes\": im_bytes,\n", |
541 | | - " \"colorRGB\": (255, 255, 255)\n", |
542 | | - " }\n", |
543 | | - " }" |
| 525 | + " return colors" |
544 | 526 | ], |
545 | 527 | "cell_type": "code", |
546 | 528 | "outputs": [], |
|
549 | 531 | { |
550 | 532 | "metadata": {}, |
551 | 533 | "source": [ |
552 | | - "### Segmentation mask with nested classification " |
553 | | - ], |
554 | | - "cell_type": "markdown" |
555 | | - }, |
556 | | - { |
557 | | - "metadata": {}, |
558 | | - "source": [ |
559 | | - "url_2 = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/raster_seg_with_subclass.png\"\n", |
560 | | - "response = requests.get(url_2)\n", |
561 | | - "mask_data = lb_types.MaskData(im_bytes=response.content)\n", |
562 | 534 | "\n", |
563 | | - "# Python annotation\n", |
564 | | - "mask_with_text_subclass_annotation = lb_types.ObjectAnnotation(\n", |
565 | | - " name = \"mask_with_text_subclass\", # must match your ontology feature\"s name\n", |
566 | | - " value=lb_types.Mask(\n", |
567 | | - " mask=mask_data,\n", |
568 | | - " color=(255, 255, 255)),\n", |
569 | | - " classifications=[\n", |
570 | | - " lb_types.ClassificationAnnotation(\n", |
571 | | - " name=\"sub_free_text\",\n", |
572 | | - " value=lb_types.Text(answer=\"free text answer\")\n", |
573 | | - " )]\n", |
574 | | - ")\n", |
| 535 | + "cp_mask_url = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/composite_mask.png\"\n", |
| 536 | + "colors = extract_rgb_colors_from_url(cp_mask_url)\n", |
| 537 | + "response = requests.get(cp_mask_url)\n", |
575 | 538 | "\n", |
576 | | - "# NDJSON using instanceURI, bytes array is not fully supported.\n", |
577 | | - "mask_with_text_subclass_ndjson = {\n", |
578 | | - " \"name\": \"mask_with_text_subclass\",\n", |
579 | | - " \"mask\": {\"instanceURI\": url_2,\n", |
580 | | - " \"colorRGB\": (255, 255, 255)},\n", |
581 | | - " \"classifications\":[{\n", |
582 | | - " \"name\": \"sub_free_text\",\n", |
583 | | - " \"answer\": \"free text answer\"\n", |
584 | | - " }]\n", |
585 | | - "}" |
| 539 | + "mask_data = lb.types.MaskData(im_bytes=response.content) # You can also use \"url\" instead of img_bytes to pass the PNG mask url.\n", |
| 540 | + "rgb_colors_for_mask_with_text_subclass_tool = [(73, 39, 85), (111, 87, 176), (23, 169, 254)]\n", |
| 541 | + "\n", |
| 542 | + "cp_mask = []\n", |
| 543 | + "for color in colors:\n", |
| 544 | + " # We are assigning the color related to the mask_with_text_subclass tool by identifying the unique RGB colors\n", |
| 545 | + " if color in rgb_colors_for_mask_with_text_subclass_tool:\n", |
| 546 | + " cp_mask.append(\n", |
| 547 | + " lb_types.ObjectAnnotation(\n", |
| 548 | + " name = \"mask_with_text_subclass\", # must match your ontology feature\"s name\n", |
| 549 | + " value=lb_types.Mask(\n", |
| 550 | + " mask=mask_data,\n", |
| 551 | + " color=color),\n", |
| 552 | + " classifications=[\n", |
| 553 | + " lb_types.ClassificationAnnotation(\n", |
| 554 | + " name=\"sub_free_text\",\n", |
| 555 | + " value=lb_types.Text(answer=\"free text answer sample\")\n", |
| 556 | + " )]\n", |
| 557 | + " )\n", |
| 558 | + " )\n", |
| 559 | + " else:\n", |
| 560 | + " # Create ObjectAnnotation for other masks\n", |
| 561 | + " cp_mask.append(\n", |
| 562 | + " lb_types.ObjectAnnotation(\n", |
| 563 | + " name=\"mask\",\n", |
| 564 | + " value=lb_types.Mask(\n", |
| 565 | + " mask=mask_data,\n", |
| 566 | + " color=color\n", |
| 567 | + " )\n", |
| 568 | + " )\n", |
| 569 | + " )\n", |
| 570 | + "\n", |
| 571 | + "\n", |
| 572 | + "# NDJSON using instanceURI, or bytes array - use one of the two options\n", |
| 573 | + "cp_mask_ndjson = []\n", |
| 574 | + "\n", |
| 575 | + "for color in colors:\n", |
| 576 | + " if color in rgb_colors_for_mask_with_text_subclass_tool:\n", |
| 577 | + " cp_mask_ndjson.append({\n", |
| 578 | + " \"name\": \"mask_with_text_subclass\",\n", |
| 579 | + " \"mask\": {\"instanceURI\": cp_mask_url,\n", |
| 580 | + " \"colorRGB\": color },\n", |
| 581 | + " \"classifications\":[{\n", |
| 582 | + " \"name\": \"sub_free_text\",\n", |
| 583 | + " \"answer\": \"free text answer\"\n", |
| 584 | + " }]\n", |
| 585 | + " }\n", |
| 586 | + " )\n", |
| 587 | + " else:\n", |
| 588 | + " cp_mask_ndjson.append({\n", |
| 589 | + " \"name\": \"mask\",\n", |
| 590 | + " \"classifications\": [],\n", |
| 591 | + " \"mask\": {\n", |
| 592 | + " \"instanceURI\": cp_mask_url,\n", |
| 593 | + " \"colorRGB\": color\n", |
| 594 | + " }\n", |
| 595 | + " }\n", |
| 596 | + " )\n", |
| 597 | + "\n", |
| 598 | + "\n", |
| 599 | + "#Using bytes array.\n", |
| 600 | + "response = requests.get(cp_mask_url)\n", |
| 601 | + "im_bytes = base64.b64encode(response.content).decode('utf-8')\n", |
| 602 | + "for color in colors:\n", |
| 603 | + " if color in rgb_colors_for_mask_with_text_subclass_tool:\n", |
| 604 | + " cp_mask_ndjson.append({\n", |
| 605 | + " \"name\": \"mask_with_text_subclass\",\n", |
| 606 | + " \"mask\": {\"instanceURI\": im_bytes,\n", |
| 607 | + " \"colorRGB\": color },\n", |
| 608 | + " \"classifications\":[{\n", |
| 609 | + " \"name\": \"sub_free_text\",\n", |
| 610 | + " \"answer\": \"free text answer\"\n", |
| 611 | + " }]\n", |
| 612 | + " }\n", |
| 613 | + " )\n", |
| 614 | + " else:\n", |
| 615 | + " cp_mask_ndjson.append({\n", |
| 616 | + " \"name\": \"mask\",\n", |
| 617 | + " \"classifications\": [],\n", |
| 618 | + " \"mask\": {\n", |
| 619 | + " \"instanceURI\": im_bytes,\n", |
| 620 | + " \"colorRGB\": color\n", |
| 621 | + " }\n", |
| 622 | + " }\n", |
| 623 | + " )\n", |
| 624 | + "\n", |
| 625 | + "\n" |
586 | 626 | ], |
587 | 627 | "cell_type": "code", |
588 | 628 | "outputs": [], |
|
709 | 749 | "metadata": {}, |
710 | 750 | "source": [ |
711 | 751 | "# send a sample image as batch to the project\n", |
712 | | - "global_key = \"2560px-Kitano_Street_Kobe01s5s41102.jpeg\"\n", |
| 752 | + "global_key = \"2560px-Kitano_Street_Kobe01s5s4110.jpeg\"\n", |
713 | 753 | "\n", |
714 | 754 | "test_img_url = {\n", |
715 | 755 | " \"row_data\":\n", |
|
722 | 762 | "task = dataset.create_data_rows([test_img_url])\n", |
723 | 763 | "task.wait_till_done()\n", |
724 | 764 | "\n", |
| 765 | + "print(f\"Failed data rows: {task.failed_data_rows}\")\n", |
725 | 766 | "print(f\"Errors: {task.errors}\")\n", |
726 | | - "print(f\"Failed data rows: {task.failed_data_rows}\")" |
| 767 | + "\n", |
| 768 | + "if task.errors:\n", |
| 769 | + " for error in task.errors:\n", |
| 770 | + " if 'Duplicate global key' in error['message'] and dataset.row_count == 0:\n", |
| 771 | + " # If the global key already exists in the workspace the dataset will be created empty, so we can delete it.\n", |
| 772 | + " print(f\"Deleting empty dataset: {dataset}\")\n", |
| 773 | + " dataset.delete()\n", |
| 774 | + "\n" |
| 775 | + ], |
| 776 | + "cell_type": "code", |
| 777 | + "outputs": [], |
| 778 | + "execution_count": null |
| 779 | + }, |
| 780 | + { |
| 781 | + "metadata": {}, |
| 782 | + "source": [ |
| 783 | + "print(dataset)" |
727 | 784 | ], |
728 | 785 | "cell_type": "code", |
729 | 786 | "outputs": [], |
|
897 | 954 | " bbox_annotation,\n", |
898 | 955 | " bbox_with_radio_subclass_annotation,\n", |
899 | 956 | " polygon_annotation,\n", |
900 | | - " mask_annotation,\n", |
901 | | - " mask_with_text_subclass_annotation,\n", |
902 | 957 | " point_annotation,\n", |
903 | 958 | " polyline_annotation,\n", |
904 | 959 | " bbox_source,\n", |
905 | 960 | " bbox_target,\n", |
906 | | - " relationship,\n", |
907 | | - "]\n", |
| 961 | + " relationship\n", |
| 962 | + "] + cp_mask\n", |
| 963 | + "\n", |
908 | 964 | "label.append(\n", |
909 | 965 | " lb_types.Label(data=lb_types.ImageData(global_key=global_key),\n", |
910 | 966 | " annotations=annotations))" |
|
934 | 990 | " bbox_annotation_ndjson,\n", |
935 | 991 | " bbox_with_radio_subclass_ndjson,\n", |
936 | 992 | " polygon_annotation_ndjson,\n", |
937 | | - " mask_annotation_ndjson,\n", |
938 | | - " mask_with_text_subclass_ndjson,\n", |
939 | 993 | " point_annotation_ndjson,\n", |
940 | 994 | " polyline_annotation_ndjson,\n", |
941 | 995 | " bbox_source_ndjson,\n", |
942 | 996 | " bbox_target_ndjson,\n", |
943 | 997 | " relationship_ndjson, ## Only supported for MAL imports\n", |
944 | | - "]\n", |
| 998 | + "] + cp_mask_ndjson\n", |
| 999 | + "\n", |
945 | 1000 | "for annotation in annotations:\n", |
946 | 1001 | " annotation.update({\n", |
947 | 1002 | " \"dataRow\": {\n", |
|
1018 | 1073 | { |
1019 | 1074 | "metadata": {}, |
1020 | 1075 | "source": [ |
1021 | | - "# project.delete()\n", |
1022 | | - "# dataset.delete()" |
| 1076 | + "# project.delete()" |
1023 | 1077 | ], |
1024 | 1078 | "cell_type": "code", |
1025 | 1079 | "outputs": [], |
|
0 commit comments