1111 Line , Checklist , Text , Radio )
1212
1313from .utils import get_feature_pairs
14-
15-
1614"""
1715Instead of these functions accepting labels they should accept annotations..
1816Then we can add a helper for applying functions across pred and label combinations..
2826Nike - Somehow getting issue with empty masks. idk wtf
2927"""
3028
29+
3130# TODO: What should we call this?
3231# We should be returning these objects..
33- def data_row_miou_v2 (ground_truth : Label , prediction : Label , include_subclasses = True ) -> List [ScalarMetric ]:
32+ def data_row_miou_v2 (ground_truth : Label ,
33+ prediction : Label ,
34+ include_subclasses = True ) -> List [ScalarMetric ]:
3435 feature_ious = data_row_miou (ground_truth .annotations ,
35- prediction .annotations , include_subclasses )
36- return [ScalarMetric (metric_name = "iou" , value = feature_ious )]
36+ prediction .annotations , include_subclasses )
37+ return [ScalarMetric (metric_name = "iou" , value = feature_ious )]
38+
3739
38- def features_miou (
39- ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
40- predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]], include_subclasses = True ) -> List [ScalarMetric ]:
40+ def features_miou (ground_truths : List [Union [ObjectAnnotation ,
41+ ClassificationAnnotation ]],
42+ predictions : List [Union [ObjectAnnotation ,
43+ ClassificationAnnotation ]],
44+ include_subclasses = True ) -> List [ScalarMetric ]:
4145 """
4246 Groups annotations by feature_schema_id or name (which is available), calculates iou score and returns the mean across all features.
4347
@@ -50,15 +54,17 @@ def features_miou(
5054 """
5155 # Classifications are supported because we just take a naive approach to them..
5256 annotation_pairs = get_feature_pairs (predictions , ground_truths )
53- return [
54- ScalarMetric (
55- metric_name = "iou" ,
56- value = feature_miou ( annotation_pair [ 0 ], annotation_pair [ 1 ], include_subclasses )
57- ) for annotation_pair in annotation_pairs
57+ return [
58+ ScalarMetric (metric_name = "iou" ,
59+ value = feature_miou ( annotation_pair [ 0 ], annotation_pair [ 1 ] ,
60+ include_subclasses ) )
61+ for annotation_pair in annotation_pairs
5862 ]
5963
6064
61- def data_row_miou (ground_truth : Label , prediction : Label , include_subclasses = True ) -> Optional [float ]:
65+ def data_row_miou (ground_truth : Label ,
66+ prediction : Label ,
67+ include_subclasses = True ) -> Optional [float ]:
6268 """
6369 Calculate iou for two labels corresponding to the same data row.
6470
@@ -70,21 +76,21 @@ def data_row_miou(ground_truth: Label, prediction: Label, include_subclasses = T
7076 Returns None if there are no annotations in ground_truth or prediction Labels
7177 """
7278 feature_ious = features_miou (ground_truth .annotations ,
73- prediction .annotations , include_subclasses )
74- return average_ious ({feature .metric_name : feature .value for feature in feature_ious })
79+ prediction .annotations , include_subclasses )
80+ return average_ious (
81+ {feature .metric_name : feature .value for feature in feature_ious })
7582
7683
77- def average_ious (feature_ious : Dict [str , Optional [float ]]) -> Optional [float ]:
84+ def average_ious (feature_ious : Dict [str , Optional [float ]]) -> Optional [float ]:
7885 ious = [iou for iou in feature_ious .values () if iou is not None ]
7986 return None if not len (ious ) else np .mean (ious )
8087
8188
82-
83- def feature_miou (
84- ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
85- predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
86- include_subclasses : bool
87- ) -> Optional [float ]:
89+ def feature_miou (ground_truths : List [Union [ObjectAnnotation ,
90+ ClassificationAnnotation ]],
91+ predictions : List [Union [ObjectAnnotation ,
92+ ClassificationAnnotation ]],
93+ include_subclasses : bool ) -> Optional [float ]:
8894 """
8995 Computes iou score for all features with the same feature schema id.
9096
@@ -100,10 +106,12 @@ def feature_miou(
100106 elif not len (ground_truths ) and len (predictions ):
101107 # No ground truth annotations but there are predictions means no matches
102108 return 0.
103- elif not len (ground_truths ) and not len (predictions ): #TODO: This shouldn't run at all for subclasses. Otherwise it should return 1.
109+ elif not len (ground_truths ) and not len (
110+ predictions
111+ ): #TODO: This shouldn't run at all for subclasses. Otherwise it should return 1.
104112 # Ignore examples that do not have any annotations or predictions
105113 # This could maybe be counted as correct but could also skew the stats..
106- return # Undefined (neither wrong nor right. )
114+ return # Undefined (neither wrong nor right. )
107115 elif isinstance (predictions [0 ].value , Mask ):
108116 return mask_miou (ground_truths , predictions , include_subclasses )
109117 elif isinstance (predictions [0 ].value , Geometry ):
@@ -117,7 +125,8 @@ def feature_miou(
117125
118126def vector_miou (ground_truths : List [ObjectAnnotation ],
119127 predictions : List [ObjectAnnotation ],
120- buffer = 70. , include_subclasses = True ) -> float :
128+ buffer = 70. ,
129+ include_subclasses = True ) -> float :
121130 """
122131 Computes iou score for all features with the same feature schema id.
123132 Calculation includes subclassifications.
@@ -139,10 +148,12 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
139148 ground_truth ) not in solution_features :
140149 solution_features .update ({id (prediction ), id (ground_truth )})
141150 if include_subclasses :
142- classification_iou = average_ious (get_iou_across_features (
143- prediction .classifications , ground_truth .classifications ))
151+ classification_iou = average_ious (
152+ get_iou_across_features (prediction .classifications ,
153+ ground_truth .classifications ))
144154 classification_iou = classification_iou if classification_iou is not None else agreement
145- solution_agreements .append ((agreement + classification_iou ) / 2. )
155+ solution_agreements .append (
156+ (agreement + classification_iou ) / 2. )
146157 else :
147158 solution_agreements .append (agreement )
148159
@@ -153,7 +164,8 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
153164
154165
155166def mask_miou (ground_truths : List [ObjectAnnotation ],
156- predictions : List [ObjectAnnotation ], include_subclasses = True ) -> float :
167+ predictions : List [ObjectAnnotation ],
168+ include_subclasses = True ) -> float :
157169 """
158170 Computes iou score for all features with the same feature schema id.
159171 Calculation includes subclassifications.
@@ -283,6 +295,3 @@ def _polygon_iou(poly1: Polygon, poly2: Polygon) -> float:
283295def _mask_iou (mask1 : np .ndarray , mask2 : np .ndarray ) -> float :
284296 """Computes iou between two binary segmentation masks."""
285297 return np .sum (mask1 & mask2 ) / np .sum (mask1 | mask2 )
286-
287-
288-
0 commit comments