11# type: ignore
2+ from labelbox .data .annotation_types .metrics .scalar import CustomScalarMetric
23from typing import Dict , List , Optional , Tuple , Union
34from shapely .geometry import Polygon
45from itertools import product
1011 Line , Checklist , Text , Radio )
1112
1213
13- def data_row_miou (ground_truth : Label , prediction : Label ) -> Optional [float ]:
14+
15+ def subclass_ious (ground_truth : Label , prediction : Label ) -> Dict [str , Optional [float ]]:
16+ """
17+ # This function effectively flattens all Label classes and computes the iou.
18+ # Text is ignored for this function.
19+ # So for Radio or Checkbox if you have an animal detection model and the model predicts:
20+ # Polygon - cat
21+ Radio - orange
22+ Checklist - fluffy
23+
24+ # This all gets grouped into one category cat:orange:fluffy
25+ # This has to match
26+
27+
28+ The most appropriate use case for this is if you have one radio subclasses that you prefer to treat as top level.
29+ Otherwise this function is a bit naive and if you want something to specifically suite
30+ your use case then create a new function based off this one.
31+ """
32+ prediction_annotations = _create_feature_lookup (prediction .annotations )
33+ ground_truth_annotations = _create_feature_lookup (ground_truth .annotations )
34+ feature_schemas = set (prediction_annotations .keys ()).union (
35+ set (ground_truth_annotations .keys ()))
36+
37+
38+ def _create_classification_feature_lookup (annotations : Union [List [ObjectAnnotation ], List [ClassificationAnnotation ]]):
39+ # Note that these annotations should all be of the same type..
40+
41+ if not len (annotations ) or isinstance (annotations [0 ], ClassificationAnnotation ):
42+ return annotations
43+
44+ grouped_annotations = defaultdict (list )
45+ for annotation in annotations :
46+ row = []
47+ classifications = [classification .value for classification in annotation .classifications if isinstance (classification .value , Radio )]
48+ classifications = [classification .answer .name or classification .answer .feature_schema_id for classification in classifications ]
49+ # TODO: create the lookup
50+ grouped_annotations [annotation .name or annotation .feature_schema_id ].append (annotation )
51+
52+ return grouped_annotations
53+
54+
55+ ious = []
56+ for key in feature_schemas :
57+ # We shouldn't have any nones. Since the keys are generated by the presence of the object.
58+ prediction_annotations = prediction_annotations [key ]
59+ ground_truth_annotations =
60+
61+
62+
63+ def feature_miou (ground_truth : Label , prediction : Label ) -> List [CustomScalarMetric ]:
64+ return [
65+ CustomScalarMetric (metric_name = "iou" , metric_value = value , feature_name = name )
66+ for name , value in get_iou_across_features (ground_truth .annotations , prediction .annotations )
67+ if value is not None
68+ ]
69+
70+
71+ # TODO: What should we call this?
72+ # We should be returning these objects..
73+ def data_row_miou_v2 (ground_truth : Label , prediction : Label , include_subclasses = True ) -> List [CustomScalarMetric ]:
74+ return CustomScalarMetric (
75+ metric_name = "iou" ,
76+ metric_value = data_row_miou (ground_truth = ground_truth , prediction = prediction , include_subclasses = include_subclasses )
77+ )
78+
79+
80+ def data_row_miou (ground_truth : Label , prediction : Label , include_subclasses = True ) -> Optional [float ]:
1481 """
1582 Calculate iou for two labels corresponding to the same data row.
1683
@@ -21,13 +88,20 @@ def data_row_miou(ground_truth: Label, prediction: Label) -> Optional[float]:
2188 float indicating the iou score for this data row.
2289 Returns None if there are no annotations in ground_truth or prediction Labels
2390 """
24- return get_iou_across_features (ground_truth .annotations ,
25- prediction .annotations )
91+ feature_ious = get_iou_across_features (ground_truth .annotations ,
92+ prediction .annotations , include_subclasses )
93+ return average_ious (feature_ious )
94+
95+
96+ def average_ious (feature_ious : Dict [str , Optional [float ]]) -> Optional [float ]:
97+ ious = [iou for iou in feature_ious .values () if iou is not None ]
98+ return None if not len (ious ) else np .mean (ious )
2699
27100
28101def get_iou_across_features (
29102 ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
30- predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]]
103+ predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
104+ include_subclasses = True
31105) -> Optional [float ]:
32106 """
33107 Groups annotations by feature_schema_id or name (which is available), calculates iou score and returns the mean across all features.
@@ -43,18 +117,21 @@ def get_iou_across_features(
43117 ground_truth_annotations = _create_feature_lookup (ground_truths )
44118 feature_schemas = set (prediction_annotations .keys ()).union (
45119 set (ground_truth_annotations .keys ()))
46- ious = [
47- feature_miou (ground_truth_annotations [feature_schema ],
48- prediction_annotations [feature_schema ])
120+ ious = {
121+ feature_schema : feature_miou (ground_truth_annotations [feature_schema ],
122+ prediction_annotations [feature_schema ], include_subclasses )
49123 for feature_schema in feature_schemas
50- ]
51- ious = [iou for iou in ious if iou is not None ]
52- return None if not len (ious ) else np .mean (ious )
124+ }
125+ return ious
126+ #ious = [iou for iou in ious if iou is not None] # TODO: What causes this to be None?
127+
128+ return #None if not len(ious) else np.mean(ious)
53129
54130
55131def feature_miou (
56132 ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
57133 predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
134+ include_subclasses : bool
58135) -> Optional [float ]:
59136 """
60137 Computes iou score for all features with the same feature schema id.
@@ -66,15 +143,19 @@ def feature_miou(
66143 float representing the iou score for the feature type if score can be computed otherwise None.
67144 """
68145 if len (ground_truths ) and not len (predictions ):
69- # No existing predictions but existing labels means no matches.
146+ # No existing predictions but existing ground truths means no matches.
147+ return 0.
148+ elif not len (ground_truths ) and len (predictions ):
149+ # No ground truth annotations but there are predictions means no matches
70150 return 0.
71151 elif not len (ground_truths ) and not len (predictions ):
72- # Ignore examples that do not have any labels or predictions
73- return
152+ # Ignore examples that do not have any annotations or predictions
153+ # This could maybe be counted as correct but could also skew the stats..
154+ return # Undefined (neither wrong nor right. )
74155 elif isinstance (predictions [0 ].value , Mask ):
75- return mask_miou (ground_truths , predictions )
156+ return mask_miou (ground_truths , predictions , include_subclasses )
76157 elif isinstance (predictions [0 ].value , Geometry ):
77- return vector_miou (ground_truths , predictions )
158+ return vector_miou (ground_truths , predictions , include_subclasses )
78159 elif isinstance (predictions [0 ], ClassificationAnnotation ):
79160 return classification_miou (ground_truths , predictions )
80161 else :
@@ -84,7 +165,7 @@ def feature_miou(
84165
85166def vector_miou (ground_truths : List [ObjectAnnotation ],
86167 predictions : List [ObjectAnnotation ],
87- buffer = 70. ) -> float :
168+ buffer = 70. , include_subclasses = True ) -> float :
88169 """
89170 Computes iou score for all features with the same feature schema id.
90171 Calculation includes subclassifications.
@@ -105,10 +186,13 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
105186 if id (prediction ) not in solution_features and id (
106187 ground_truth ) not in solution_features :
107188 solution_features .update ({id (prediction ), id (ground_truth )})
108- classification_iou = get_iou_across_features (
109- prediction .classifications , ground_truth .classifications )
110- classification_iou = classification_iou if classification_iou is not None else agreement
111- solution_agreements .append ((agreement + classification_iou ) / 2. )
189+ if include_subclasses :
190+ classification_iou = average_ious (get_iou_across_features (
191+ prediction .classifications , ground_truth .classifications ))
192+ classification_iou = classification_iou if classification_iou is not None else agreement
193+ solution_agreements .append ((agreement + classification_iou ) / 2. )
194+ else :
195+ solution_agreements .append (agreement )
112196
113197 # Add zeros for unmatched Features
114198 solution_agreements .extend ([0.0 ] *
@@ -117,7 +201,7 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
117201
118202
119203def mask_miou (ground_truths : List [ObjectAnnotation ],
120- predictions : List [ObjectAnnotation ]) -> float :
204+ predictions : List [ObjectAnnotation ], include_subclasses = True ) -> float :
121205 """
122206 Computes iou score for all features with the same feature schema id.
123207 Calculation includes subclassifications.
@@ -138,6 +222,10 @@ def mask_miou(ground_truths: List[ObjectAnnotation],
138222 "Prediction and mask must have the same shape."
139223 f" Found { prediction_np .shape } /{ ground_truth_np .shape } ." )
140224
225+ agreement = _mask_iou (ground_truth_np , prediction_np )
226+ if not include_subclasses :
227+ return agreement
228+
141229 prediction_classifications = []
142230 for prediction in predictions :
143231 prediction_classifications .extend (prediction .classifications )
@@ -147,7 +235,7 @@ def mask_miou(ground_truths: List[ObjectAnnotation],
147235
148236 classification_iou = get_iou_across_features (ground_truth_classifications ,
149237 prediction_classifications )
150- agreement = _mask_iou ( ground_truth_np , prediction_np )
238+
151239 classification_iou = classification_iou if classification_iou is not None else agreement
152240 return (agreement + classification_iou ) / 2.
153241
@@ -225,10 +313,40 @@ def _create_feature_lookup(
225313 and the value is a list of annotations that have that feature_schema_id (or name)
226314
227315 """
316+ # TODO: Add a check here.
317+ """
318+
319+ We don't want to select name for one and then feature_schema_id for the other.
320+ I think in another function we should check
321+
322+ Do we want to require that the user provides the feature name?
323+ We don't really want schema ids showing up in the metric names..
324+
325+ So:
326+
327+ Also add a freakin test.
328+ ####
329+ all_schema_ids_defined_pred, all_names_defined_pred = check_references(pred_annotations)
330+ if (not all_schema_ids_defined and not all_names_defined_pred):
331+ raise ValueError("All data must have feature_schema_ids or names set")
332+
333+
334+ all_schema_ids_defined_gt, all_names_defined_gt = check_references(gt_annotations)
335+
336+ #Prefer name becuse the user will be able to know what it means
337+ #Schema id incase that doesn't exist..
338+ if (all_names_defined_pred and all_names_defined_gt):
339+ return 'name'
340+ elif all_schema_ids_defined_pred and all_schema_ids_defined_gt:
341+ return 'feature_schema_id'
342+ else:
343+ raise ValueError("Ground truth and prediction annotations must have set all name or feature ids. Otherwise there is no key to match on. Please update.")
344+ """
228345 grouped_annotations = defaultdict (list )
229346 for annotation in annotations :
230- grouped_annotations [annotation .feature_schema_id or
231- annotation .name ].append (annotation )
347+ grouped_annotations [annotation .name or
348+ annotation .feature_schema_id ].append (annotation )
349+
232350 return grouped_annotations
233351
234352
0 commit comments