55from labelbox .data .annotation_types .metrics import ConfusionMatrixMetric , ScalarMetric
66from labelbox .data .annotation_types .collection import LabelList
77from labelbox .data .annotation_types import ScalarMetric , Label , ImageData
8+ from labelbox .data .annotation_types .metrics .scalar import RESERVED_METRIC_NAMES
89
910
1011def test_legacy_scalar_metric ():
@@ -56,7 +57,7 @@ def test_legacy_scalar_metric():
5657])
5758def test_custom_scalar_metric (feature_name , subclass_name , aggregation , value ):
5859 kwargs = {'aggregation' : aggregation } if aggregation is not None else {}
59- metric = ScalarMetric (metric_name = "iou " ,
60+ metric = ScalarMetric (metric_name = "custom_iou " ,
6061 value = value ,
6162 feature_name = feature_name ,
6263 subclass_name = subclass_name ,
@@ -80,7 +81,7 @@ def test_custom_scalar_metric(feature_name, subclass_name, aggregation, value):
8081 'value' :
8182 value ,
8283 'metric_name' :
83- 'iou ' ,
84+ 'custom_iou ' ,
8485 ** ({
8586 'feature_name' : feature_name
8687 } if feature_name else {}),
@@ -192,3 +193,10 @@ def test_invalid_number_of_confidence_scores():
192193 metric_name = "too many scores" ,
193194 value = {i / 20. : [0 , 1 , 2 , 3 ] for i in range (20 )})
194195 assert "Number of confidence scores must be greater" in str (exc_info .value )
196+
197+
198+ @pytest .mark .parametrize ("metric_name" , RESERVED_METRIC_NAMES )
199+ def test_reserved_names (metric_name : str ):
200+ with pytest .raises (ValidationError ) as exc_info :
201+ ScalarMetric (metric_name = metric_name , value = 0.5 )
202+ assert 'is a reserved metric name' in exc_info .value .errors ()[0 ]['msg' ]
0 commit comments