Skip to content

Commit 2ba0762

Browse files
committed
Merge remote-tracking branch 'origin/develop' into farkob/schema_id_name
# Conflicts: # labelbox/data/serialization/ndjson/label.py # labelbox/data/serialization/ndjson/objects.py
2 parents b2048e8 + df40f5e commit 2ba0762

File tree

6 files changed

+157
-27
lines changed

6 files changed

+157
-27
lines changed

labelbox/data/serialization/ndjson/label.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ def _generate_annotations(
5252
annots.extend(
5353
NDSegments.to_common(annotation, annotation.name,
5454
annotation.schema_id))
55-
5655
elif isinstance(annotation, NDObjectType.__args__):
5756
annots.append(NDObject.to_common(annotation))
5857
elif isinstance(annotation, NDClassificationType.__args__):
@@ -63,12 +62,15 @@ def _generate_annotations(
6362
else:
6463
raise TypeError(
6564
f"Unsupported annotation. {type(annotation)}")
66-
data = self._infer_media_type(annotations)(uid=data_row_id)
65+
data = self._infer_media_type(annots)(uid=data_row_id)
6766
yield Label(annotations=annots, data=data)
6867

6968
def _infer_media_type(
70-
self, annotations: List[Union[NDObjectType, NDClassificationType]]
71-
) -> Union[TextEntity, TextData, ImageData]:
69+
self, annotations: List[Union[TextEntity, VideoClassificationAnnotation,
70+
VideoObjectAnnotation, ObjectAnnotation,
71+
ClassificationAnnotation, ScalarMetric,
72+
ConfusionMatrixMetric]]
73+
) -> Union[TextData, VideoData, ImageData]:
7274
types = {type(annotation) for annotation in annotations}
7375
if TextEntity in types:
7476
return TextData
@@ -102,7 +104,6 @@ def _create_video_annotations(
102104
for annotation_group in video_annotations.values():
103105
consecutive_frames = cls._get_consecutive_frames(
104106
sorted([annotation.frame for annotation in annotation_group]))
105-
106107
if isinstance(annotation_group[0], VideoClassificationAnnotation):
107108
annotation = annotation_group[0]
108109
frames_data = []

labelbox/data/serialization/ndjson/objects.py

Lines changed: 54 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from pydantic import BaseModel
88
from PIL import Image
9+
from labelbox.data.annotation_types import feature
910

1011
from labelbox.data.annotation_types.data.video import VideoData
1112

@@ -61,6 +62,21 @@ def from_common(cls, point: Point,
6162
classifications=classifications)
6263

6364

65+
class NDFramePoint(VideoSupported):
66+
point: _Point
67+
68+
def to_common(self, feature_schema_id: Cuid) -> VideoObjectAnnotation:
69+
return VideoObjectAnnotation(frame=self.frame,
70+
keyframe=True,
71+
feature_schema_id=feature_schema_id,
72+
value=Point(x=self.point.x,
73+
y=self.point.y))
74+
75+
@classmethod
76+
def from_common(cls, frame: int, point: Point):
77+
return cls(frame=frame, point=_Point(x=point.x, y=point.y))
78+
79+
6480
class NDLine(NDBaseObject):
6581
line: List[_Point]
6682

@@ -83,6 +99,25 @@ def from_common(cls, line: Line,
8399
classifications=classifications)
84100

85101

102+
class NDFrameLine(VideoSupported):
103+
line: List[_Point]
104+
105+
def to_common(self, feature_schema_id: Cuid) -> VideoObjectAnnotation:
106+
return VideoObjectAnnotation(
107+
frame=self.frame,
108+
keyframe=True,
109+
feature_schema_id=feature_schema_id,
110+
value=Line(points=[Point(x=pt.x, y=pt.y) for pt in self.line]))
111+
112+
@classmethod
113+
def from_common(cls, frame: int, line: Line):
114+
return cls(frame=frame,
115+
line=[{
116+
'x': pt.x,
117+
'y': pt.y
118+
} for pt in line.points])
119+
120+
86121
class NDPolygon(NDBaseObject):
87122
polygon: List[_Point]
88123

@@ -153,18 +188,30 @@ def from_common(cls, frame: int, rectangle: Rectangle):
153188

154189

155190
class NDSegment(BaseModel):
156-
keyframes: List[NDFrameRectangle]
191+
keyframes: List[Union[NDFrameRectangle, NDFramePoint, NDFrameLine]]
157192

158193
@staticmethod
159194
def lookup_segment_object_type(segment: List) -> "NDFrameObjectType":
160195
"""Used for determining which object type the annotation contains
161196
returns the object type"""
162-
result = {Rectangle: NDFrameRectangle}.get(type(segment[0].value))
197+
result = {
198+
Rectangle: NDFrameRectangle,
199+
Point: NDFramePoint,
200+
Line: NDFrameLine,
201+
}.get(type(segment[0].value))
163202
return result
164203

165-
def to_common(self, name: str, feature_schema_id: Cuid):
204+
@staticmethod
205+
def segment_with_uuid(keyframe: Union[NDFrameRectangle, NDFramePoint,
206+
NDFrameLine], uuid: str):
207+
keyframe.extra = {'uuid': uuid}
208+
return keyframe
209+
210+
def to_common(self, name: str, feature_schema_id: Cuid, uuid: str):
166211
return [
167-
keyframe.to_common(name=name, feature_schema_id=feature_schema_id)
212+
self.segment_with_uuid(
213+
keyframe.to_common(name=name,
214+
feature_schema_id=feature_schema_id), uuid)
168215
for keyframe in self.keyframes
169216
]
170217

@@ -188,7 +235,8 @@ def to_common(self, name: str, feature_schema_id: Cuid):
188235
result.extend(
189236
NDSegment.to_common(segment,
190237
name=name,
191-
feature_schema_id=feature_schema_id))
238+
feature_schema_id=feature_schema_id,
239+
uuid=self.uuid))
192240
return result
193241

194242
@classmethod
@@ -345,4 +393,4 @@ def lookup_object(
345393
NDObjectType = Union[NDLine, NDPolygon, NDPoint, NDRectangle, NDMask,
346394
NDTextEntity]
347395

348-
NDFrameObjectType = NDFrameRectangle
396+
NDFrameObjectType = NDFrameRectangle, NDFramePoint, NDFrameLine

labelbox/schema/model_run.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -203,13 +203,7 @@ def assign_data_rows_to_split(self,
203203
timeout_seconds=120):
204204

205205
split_value = split.value if isinstance(split, DataSplit) else split
206-
207-
if split_value == DataSplit.UNASSIGNED.value:
208-
raise ValueError(
209-
f"Cannot assign split value of `{DataSplit.UNASSIGNED.value}`.")
210-
211-
valid_splits = filter(lambda name: name != DataSplit.UNASSIGNED.value,
212-
DataSplit._member_names_)
206+
valid_splits = DataSplit._member_names_
213207

214208
if split_value not in valid_splits:
215209
raise ValueError(
Lines changed: 94 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,94 @@
1-
[{"answer": {"schemaId": "ckrb1sfl8099g0y91cxbd5ftb"}, "schemaId": "ckrb1sfjx099a0y914hl319ie", "dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"}, "uuid": "f6879f59-d2b5-49c2-aceb-d9e8dc478673", "frames": [{"start": 30, "end": 35}, {"start": 50, "end": 51}]}, {"answer": [{"schemaId": "ckrb1sfl8099e0y919v260awv"}], "schemaId": "ckrb1sfkn099c0y910wbo0p1a", "dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"}, "uuid": "d009925d-91a3-4f67-abd9-753453f5a584", "frames": [{"start": 0, "end": 5}]}, {"answer": "a value", "schemaId": "ckrb1sfkn099c0y910wbo0p1a", "dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"}, "uuid": "d009925d-91a3-4f67-abd9-753453f5a584"}]
1+
[
2+
{
3+
"answer": {"schemaId": "ckrb1sfl8099g0y91cxbd5ftb"},
4+
"schemaId": "ckrb1sfjx099a0y914hl319ie",
5+
"dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"},
6+
"uuid": "f6879f59-d2b5-49c2-aceb-d9e8dc478673",
7+
"frames": [{"start": 30, "end": 35}, {"start": 50, "end": 51}]
8+
},
9+
{
10+
"answer": [{"schemaId": "ckrb1sfl8099e0y919v260awv"}],
11+
"schemaId": "ckrb1sfkn099c0y910wbo0p1a",
12+
"dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"},
13+
"uuid": "d009925d-91a3-4f67-abd9-753453f5a584",
14+
"frames": [{"start": 0, "end": 5}]
15+
},
16+
{
17+
"answer": "a value",
18+
"schemaId": "ckrb1sfkn099c0y910wbo0p1a",
19+
"dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"},
20+
"uuid": "d009925d-91a3-4f67-abd9-753453f5a584"
21+
},
22+
{
23+
"classifications": [],
24+
"schemaId": "cl5islwg200gfci6g0oitaypu",
25+
"dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"},
26+
"uuid": "6f7c835a-0139-4896-b73f-66a6baa89e94",
27+
"segments": [
28+
{
29+
"keyframes": [
30+
{
31+
"frame": 1,
32+
"line": [{"x": 10.0, "y": 10.0}, {"x": 100.0, "y": 100.0}, {"x": 50.0, "y": 30.0}]
33+
}
34+
]
35+
},
36+
{
37+
"keyframes": [
38+
{
39+
"frame": 5,
40+
"line": [{"x": 100.0, "y": 10.0}, {"x": 50.0, "y": 100.0}, {"x": 50.0, "y": 30.0}]
41+
}
42+
]
43+
}
44+
]
45+
},
46+
{
47+
"classifications": [],
48+
"schemaId": "cl5it7ktp00i5ci6gf80b1ysd",
49+
"dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"},
50+
"uuid": "f963be22-227b-4efe-9be4-2738ed822216",
51+
"segments": [
52+
{
53+
"keyframes": [
54+
{
55+
"frame": 1,
56+
"point": {"x": 10.0, "y": 10.0}
57+
}
58+
]
59+
},
60+
{
61+
"keyframes": [
62+
{
63+
"frame": 5,
64+
"point": {"x": 50.0, "y": 50.0}
65+
}
66+
]
67+
}
68+
]
69+
},
70+
{
71+
"classifications": [],
72+
"schemaId": "cl5iw0roz00lwci6g5jni62vs",
73+
"dataRow": {"id": "ckrb1sf1i1g7i0ybcdc6oc8ct"},
74+
"uuid": "13b2ee0e-2355-4336-8b83-d74d09e3b1e7",
75+
"segments": [
76+
{
77+
"keyframes": [
78+
{
79+
"frame": 1,
80+
"bbox": {"top": 10.0, "left": 5.0, "height": 100.0, "width": 150.0}
81+
}
82+
]
83+
},
84+
{
85+
"keyframes": [
86+
{
87+
"frame": 5,
88+
"bbox": {"top": 300.0, "left": 200.0, "height": 400.0, "width": 150.0}
89+
}
90+
]
91+
}
92+
]
93+
}
94+
]

tests/data/serialization/ndjson/test_video.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,4 @@ def test_video():
99

1010
res = NDJsonConverter.deserialize(data).as_list()
1111
res = list(NDJsonConverter.serialize(res))
12-
assert res == [data[2], data[0], data[1]]
12+
assert res == [data[2], data[0], data[1], data[3], data[4], data[5]]

tests/integration/annotation_import/test_model_run.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -146,13 +146,7 @@ def test_model_run_split_assignment(model_run, dataset, image_url):
146146
with pytest.raises(ValueError):
147147
model_run.assign_data_rows_to_split(data_row_ids, "INVALID SPLIT")
148148

149-
with pytest.raises(ValueError):
150-
model_run.assign_data_rows_to_split(data_row_ids, DataSplit.UNASSIGNED)
151-
152-
for split in ["TRAINING", "TEST", "VALIDATION", *DataSplit]:
153-
if split == DataSplit.UNASSIGNED:
154-
continue
155-
149+
for split in ["TRAINING", "TEST", "VALIDATION", "UNASSIGNED", *DataSplit]:
156150
model_run.assign_data_rows_to_split(data_row_ids, split)
157151
counts = Counter()
158152
for data_row in model_run.model_run_data_rows():

0 commit comments

Comments
 (0)