11# type: ignore
2- from labelbox .data .annotation_types .metrics .scalar import CustomScalarMetric
2+ from labelbox .data .annotation_types .metrics .scalar import ScalarMetric
33from typing import Dict , List , Optional , Tuple , Union
44from shapely .geometry import Polygon
55from itertools import product
1010 ClassificationAnnotation , Mask , Geometry , Point ,
1111 Line , Checklist , Text , Radio )
1212
13- from .utils import get_lookup_pair
13+ from .utils import get_feature_pairs
1414
1515
1616"""
2323data_row_iou()
2424
2525Is it even possible to return a None? If both are none then they won't have keys..
26- """
27-
28- def feature_miou (
29- ground_truth : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
30- prediction : List [Union [ObjectAnnotation , ClassificationAnnotation ]]) -> List [CustomScalarMetric ]:
31- # Classifications are supported because we just take a naive approach to them..
32- return [
33- CustomScalarMetric (metric_name = "iou" , metric_value = value , feature_name = name )
34- for name , value in get_iou_across_features (ground_truth , prediction )
35- if value is not None
36- ]
37-
26+ Everything will have types. That is the MO of the
3827
28+ Nike - Somehow getting issue with empty masks. idk wtf
29+ """
3930
4031# TODO: What should we call this?
4132# We should be returning these objects..
42- def data_row_miou_v2 (ground_truth : Label , prediction : Label , include_subclasses = True ) -> List [CustomScalarMetric ]:
43- return CustomScalarMetric (
44- metric_name = "iou" ,
45- metric_value = data_row_miou (ground_truth = ground_truth , prediction = prediction , include_subclasses = include_subclasses )
46- )
33+ def data_row_miou_v2 (ground_truth : Label , prediction : Label , include_subclasses = True ) -> List [ScalarMetric ]:
34+ feature_ious = data_row_miou (ground_truth .annotations ,
35+ prediction .annotations , include_subclasses )
36+ return [ScalarMetric (metric_name = "iou" , value = feature_ious )]
37+
38+ def features_miou (
39+ ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
40+ predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]], include_subclasses = True ) -> List [ScalarMetric ]:
41+ """
42+ Groups annotations by feature_schema_id or name (which is available), calculates iou score and returns the mean across all features.
43+
44+ Args:
45+ ground_truth : Label containing human annotations or annotations known to be correct
46+ prediction: Label representing model predictions
47+ Returns:
48+ float indicating the iou score for all features represented in the annotations passed to this function.
49+ Returns None if there are no annotations in ground_truth or prediction annotations
50+ """
51+ # Classifications are supported because we just take a naive approach to them..
52+ annotation_pairs = get_feature_pairs (predictions , ground_truths )
53+ return [
54+ ScalarMetric (
55+ metric_name = "iou" ,
56+ value = feature_miou (annotation_pair [0 ], annotation_pair [1 ], include_subclasses )
57+ ) for annotation_pair in annotation_pairs
58+ ]
4759
4860
4961def data_row_miou (ground_truth : Label , prediction : Label , include_subclasses = True ) -> Optional [float ]:
@@ -57,81 +69,16 @@ def data_row_miou(ground_truth: Label, prediction: Label, include_subclasses = T
5769 float indicating the iou score for this data row.
5870 Returns None if there are no annotations in ground_truth or prediction Labels
5971 """
60- feature_ious = get_iou_across_features (ground_truth .annotations ,
72+ feature_ious = features_miou (ground_truth .annotations ,
6173 prediction .annotations , include_subclasses )
62- return average_ious (feature_ious )
63-
64-
65- def subclass_ious (ground_truth : Label , prediction : Label ) -> Dict [str , Optional [float ]]:
66- """
67- # This function effectively flattens all Label classes and computes the iou.
68- # Text is ignored for this function.
69- # So for Radio or Checkbox if you have an animal detection model and the model predicts:
70- # Polygon - cat
71- Radio - orange
72- Checklist - fluffy
73-
74- # This all gets grouped into one category cat:orange:fluffy
75- # This has to match
76-
77- The most appropriate use case for this is if you have one radio subclasses that you prefer to treat as top level.
78- Otherwise this function is a bit naive and if you want something to specifically suite
79- your use case then create a new function based off this one.
80-
81- """
82-
83-
84- prediction_annotations , ground_truth_annotations , keys = get_lookup_pair (prediction .annotations , ground_truth .annotations )
85-
86-
87- def _create_classification_feature_lookup (annotations : Union [List [ObjectAnnotation ], List [ClassificationAnnotation ]]):
88- # Note that these annotations should all be of the same type..
89-
90- if not len (annotations ) or isinstance (annotations [0 ], ClassificationAnnotation ):
91- return annotations
92-
93- ious = []
94- for key in keys :
95- # We shouldn't have any nones. Since the keys are generated by the presence of the object.
96- [classification .value .answer for classification in annotation .classifications if isinstance (classification .value , Radio )]
97- prediction_annotations = prediction_annotations [key ]
98- gt_annotations = gt_annotations [key ]
99-
100-
101-
102-
103-
74+ return average_ious ({feature .metric_name : feature .value for feature in feature_ious })
10475
10576
10677def average_ious (feature_ious : Dict [str , Optional [float ]]) -> Optional [float ]:
10778 ious = [iou for iou in feature_ious .values () if iou is not None ]
10879 return None if not len (ious ) else np .mean (ious )
10980
11081
111- def get_iou_across_features (
112- ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
113- predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
114- include_subclasses = True
115- ) -> Optional [float ]:
116- """
117- Groups annotations by feature_schema_id or name (which is available), calculates iou score and returns the mean across all features.
118-
119- Args:
120- ground_truth : Label containing human annotations or annotations known to be correct
121- prediction: Label representing model predictions
122- Returns:
123- float indicating the iou score for all features represented in the annotations passed to this function.
124- Returns None if there are no annotations in ground_truth or prediction annotations
125- """
126- prediction_annotations , ground_truth_annotations , keys = get_lookup_pair (predictions , ground_truths )
127- ious = {
128- key : feature_miou (ground_truth_annotations [key ],
129- prediction_annotations [key ], include_subclasses )
130- for key in keys
131- }
132- return ious
133-
134-
13582
13683def feature_miou (
13784 ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
0 commit comments