1010 ClassificationAnnotation , Mask , Geometry , Point ,
1111 Line , Checklist , Text , Radio )
1212
13+ from .utils import get_lookup_pair
1314
1415
15- def subclass_ious (ground_truth : Label , prediction : Label ) -> Dict [str , Optional [float ]]:
16- """
17- # This function effectively flattens all Label classes and computes the iou.
18- # Text is ignored for this function.
19- # So for Radio or Checkbox if you have an animal detection model and the model predicts:
20- # Polygon - cat
21- Radio - orange
22- Checklist - fluffy
23-
24- # This all gets grouped into one category cat:orange:fluffy
25- # This has to match
26-
27-
28- The most appropriate use case for this is if you have one radio subclasses that you prefer to treat as top level.
29- Otherwise this function is a bit naive and if you want something to specifically suite
30- your use case then create a new function based off this one.
31- """
32- identifying = get_identifying_key (prediction .annotations , ground_truth .annotations )
33- prediction_annotations = _create_feature_lookup (prediction .annotations )
34- ground_truth_annotations = _create_feature_lookup (ground_truth .annotations )
35- feature_schemas = set (prediction_annotations .keys ()).union (
36- set (ground_truth_annotations .keys ()))
37-
38-
39- def _create_classification_feature_lookup (annotations : Union [List [ObjectAnnotation ], List [ClassificationAnnotation ]]):
40- # Note that these annotations should all be of the same type..
16+ """
17+ Instead of these functions accepting labels they should accept annotations..
18+ Then we can add a helper for applying functions across pred and label combinations..
4119
42- if not len (annotations ) or isinstance (annotations [0 ], ClassificationAnnotation ):
43- return annotations
44-
45- grouped_annotations = defaultdict (list )
46- for annotation in annotations :
47- row = []
48- classifications = [classification .value for classification in annotation .classifications if isinstance (classification .value , Radio )]
49- classifications = [classification .answer .name or classification .answer .feature_schema_id for classification in classifications ]
50- # TODO: create the lookup
51- grouped_annotations [annotation .name or annotation .feature_schema_id ].append (annotation )
52- return grouped_annotations
20+ We will get stats for each and the stats will support flattening
5321
5422
55- #ious = []
56- #for key in feature_schemas:
57- # We shouldn't have any nones. Since the keys are generated by the presence of the object.
58- #prediction_annotations = prediction_annotations[key]
59- # #ground_truth_annotations =
60-
23+ data_row_iou()
6124
25+ Is it even possible to return a None? If both are none then they won't have keys..
26+ """
6227
63- def feature_miou (ground_truth : Label , prediction : Label ) -> List [CustomScalarMetric ]:
28+ def feature_miou (
29+ ground_truth : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
30+ prediction : List [Union [ObjectAnnotation , ClassificationAnnotation ]]) -> List [CustomScalarMetric ]:
31+ # Classifications are supported because we just take a naive approach to them..
6432 return [
6533 CustomScalarMetric (metric_name = "iou" , metric_value = value , feature_name = name )
66- for name , value in get_iou_across_features (ground_truth . annotations , prediction . annotations )
34+ for name , value in get_iou_across_features (ground_truth , prediction )
6735 if value is not None
6836 ]
6937
7038
39+
7140# TODO: What should we call this?
7241# We should be returning these objects..
7342def data_row_miou_v2 (ground_truth : Label , prediction : Label , include_subclasses = True ) -> List [CustomScalarMetric ]:
@@ -93,6 +62,47 @@ def data_row_miou(ground_truth: Label, prediction: Label, include_subclasses = T
9362 return average_ious (feature_ious )
9463
9564
65+ def subclass_ious (ground_truth : Label , prediction : Label ) -> Dict [str , Optional [float ]]:
66+ """
67+ # This function effectively flattens all Label classes and computes the iou.
68+ # Text is ignored for this function.
69+ # So for Radio or Checkbox if you have an animal detection model and the model predicts:
70+ # Polygon - cat
71+ Radio - orange
72+ Checklist - fluffy
73+
74+ # This all gets grouped into one category cat:orange:fluffy
75+ # This has to match
76+
77+ The most appropriate use case for this is if you have one radio subclasses that you prefer to treat as top level.
78+ Otherwise this function is a bit naive and if you want something to specifically suite
79+ your use case then create a new function based off this one.
80+
81+ """
82+
83+
84+ prediction_annotations , ground_truth_annotations , keys = get_lookup_pair (prediction .annotations , ground_truth .annotations )
85+
86+
87+ def _create_classification_feature_lookup (annotations : Union [List [ObjectAnnotation ], List [ClassificationAnnotation ]]):
88+ # Note that these annotations should all be of the same type..
89+
90+ if not len (annotations ) or isinstance (annotations [0 ], ClassificationAnnotation ):
91+ return annotations
92+
93+ ious = []
94+ for key in keys :
95+ # We shouldn't have any nones. Since the keys are generated by the presence of the object.
96+ [classification .value .answer for classification in annotation .classifications if isinstance (classification .value , Radio )]
97+ prediction_annotations = prediction_annotations [key ]
98+ gt_annotations = gt_annotations [key ]
99+
100+
101+
102+
103+
104+
105+
96106def average_ious (feature_ious : Dict [str , Optional [float ]]) -> Optional [float ]:
97107 ious = [iou for iou in feature_ious .values () if iou is not None ]
98108 return None if not len (ious ) else np .mean (ious )
@@ -113,19 +123,14 @@ def get_iou_across_features(
113123 float indicating the iou score for all features represented in the annotations passed to this function.
114124 Returns None if there are no annotations in ground_truth or prediction annotations
115125 """
116- prediction_annotations = _create_feature_lookup (predictions )
117- ground_truth_annotations = _create_feature_lookup (ground_truths )
118- feature_schemas = set (prediction_annotations .keys ()).union (
119- set (ground_truth_annotations .keys ()))
126+ prediction_annotations , ground_truth_annotations , keys = get_lookup_pair (predictions , ground_truths )
120127 ious = {
121- feature_schema : feature_miou (ground_truth_annotations [feature_schema ],
122- prediction_annotations [feature_schema ], include_subclasses )
123- for feature_schema in feature_schemas
128+ key : feature_miou (ground_truth_annotations [key ],
129+ prediction_annotations [key ], include_subclasses )
130+ for key in keys
124131 }
125132 return ious
126- #ious = [iou for iou in ious if iou is not None] # TODO: What causes this to be None?
127133
128- return #None if not len(ious) else np.mean(ious)
129134
130135
131136def feature_miou (
@@ -148,7 +153,7 @@ def feature_miou(
148153 elif not len (ground_truths ) and len (predictions ):
149154 # No ground truth annotations but there are predictions means no matches
150155 return 0.
151- elif not len (ground_truths ) and not len (predictions ):
156+ elif not len (ground_truths ) and not len (predictions ): #TODO: This shouldn't run at all for subclasses. Otherwise it should return 1.
152157 # Ignore examples that do not have any annotations or predictions
153158 # This could maybe be counted as correct but could also skew the stats..
154159 return # Undefined (neither wrong nor right. )
@@ -300,29 +305,6 @@ def checklist_iou(ground_truth: Checklist, prediction: Checklist) -> float:
300305 len (schema_ids_label | schema_ids_pred ))
301306
302307
303- def _create_feature_lookup (
304- annotations : List [Union [ObjectAnnotation , ClassificationAnnotation ]]
305- ) -> Dict [str , List [Union [ObjectAnnotation , ClassificationAnnotation ]]]:
306- """
307- Groups annotation by schema id (if available otherwise name).
308-
309- Args:
310- annotations: List of annotations to group
311- Returns:
312- a dict where each key is the feature_schema_id (or name)
313- and the value is a list of annotations that have that feature_schema_id (or name)
314-
315- """
316- # TODO: Add a check here.
317-
318- grouped_annotations = defaultdict (list )
319- for annotation in annotations :
320- grouped_annotations [annotation .name or
321- annotation .feature_schema_id ].append (annotation )
322-
323- return grouped_annotations
324-
325-
326308def _get_vector_pairs (
327309 ground_truths : List [ObjectAnnotation ],
328310 predictions : List [ObjectAnnotation ], buffer : float
@@ -356,44 +338,4 @@ def _mask_iou(mask1: np.ndarray, mask2: np.ndarray) -> float:
356338 return np .sum (mask1 & mask2 ) / np .sum (mask1 | mask2 )
357339
358340
359- def all_have_key (annotations : List [FeatureSchema ]) -> Tuple [bool , bool ]:
360- """
361- We want to make sure that all feature schemas have names set or feature_schema_ids set.
362-
363- """
364- all_names = True
365- all_schemas = True
366- for annotation in annotations :
367- if annotation .name is None :
368- all_names = False
369- if annotation .feature_schema_id is None :
370- all_schemas = False
371- return all_schemas , all_names
372-
373- def get_identifying_key (pred_annotations , gt_annotations ):
374- """
375- We don't want to select name for one and then feature_schema_id for the other.
376- I think in another function we should check
377-
378- Do we want to require that the user provides the feature name?
379- We don't really want schema ids showing up in the metric names..
380-
381- So:
382- """
383- #TODO: Also add a freakin test.
384- all_schema_ids_defined_pred , all_names_defined_pred = all_have_key (pred_annotations )
385- if (not all_schema_ids_defined_pred and not all_names_defined_pred ):
386- raise ValueError ("All data must have feature_schema_ids or names set" )
387-
388-
389- all_schema_ids_defined_gt , all_names_defined_gt = all_have_key (gt_annotations )
390-
391- #Prefer name becuse the user will be able to know what it means
392- #Schema id incase that doesn't exist..
393- if (all_names_defined_pred and all_names_defined_gt ):
394- return 'name'
395- elif all_schema_ids_defined_pred and all_schema_ids_defined_gt :
396- return 'feature_schema_id'
397- else :
398- raise ValueError ("Ground truth and prediction annotations must have set all name or feature ids. Otherwise there is no key to match on. Please update." )
399341
0 commit comments