|
259 | 259 | }, |
260 | 260 | { |
261 | 261 | "metadata": {}, |
262 | | - "source": "# Define tokens with precise timing (from demo script)\ntokens_data = [\n (\"Hello\", 586, 770), # Hello: frames 586-770\n (\"AI\", 771, 955), # AI: frames 771-955\n (\"how\", 956, 1140), # how: frames 956-1140\n (\"are\", 1141, 1325), # are: frames 1141-1325\n (\"you\", 1326, 1510), # you: frames 1326-1510\n (\"doing\", 1511, 1695), # doing: frames 1511-1695\n (\"today\", 1696, 1880), # today: frames 1696-1880\n]\n\n# Create temporal annotations for each token using NEW frames interface\ntemporal_annotations = []\nfor token, start_frame, end_frame in tokens_data:\n token_annotation = lb_types.AudioClassificationAnnotation(\n frames=[lb_types.FrameLocation(start=start_frame, end=end_frame)],\n name=\"User Speaker\",\n value=lb_types.Text(answer=token),\n )\n temporal_annotations.append(token_annotation)\n\nprint(f\"Created {len(temporal_annotations)} temporal token annotations\")", |
| 262 | + "source": "# Define tokens with precise timing\ntokens_data = [\n (\"Hello\", 586, 770), # Hello: frames 586-770\n (\"AI\", 771, 955), # AI: frames 771-955\n (\"how\", 956, 1140), # how: frames 956-1140\n (\"are\", 1141, 1325), # are: frames 1141-1325\n (\"you\", 1326, 1510), # you: frames 1326-1510\n (\"doing\", 1511, 1695), # doing: frames 1511-1695\n (\"today\", 1696, 1880), # today: frames 1696-1880\n]\n\n# Create temporal annotations for each token\ntemporal_annotations = []\nfor token, start_frame, end_frame in tokens_data:\n token_annotation = lb_types.AudioClassificationAnnotation(\n start_frame=start_frame,\n end_frame=end_frame,\n name=\"User Speaker\",\n value=lb_types.Text(answer=token),\n )\n temporal_annotations.append(token_annotation)\n\nprint(f\"Created {len(temporal_annotations)} temporal token annotations\")", |
263 | 263 | "cell_type": "code", |
264 | 264 | "outputs": [], |
265 | 265 | "execution_count": null |
266 | 266 | }, |
267 | 267 | { |
268 | 268 | "metadata": {}, |
269 | | - "source": "# Create label with regular and temporal annotations\nlabel_with_temporal = []\nlabel_with_temporal.append(\n lb_types.Label(\n data={\"global_key\": global_key},\n annotations=[text_annotation, checklist_annotation, radio_annotation] +\n temporal_annotations,\n ))\n\nprint(\n f\"Created label with {len(label_with_temporal[0].annotations)} total annotations\"\n)\nprint(f\" - Regular annotations: 3\")\nprint(f\" - Temporal annotations: {len(temporal_annotations)}\")\n\n# Example: Nested temporal annotation with explicit frame matching\n# Structure: Speaker -> Transcription -> Emotion -> Intensity\n# Each level can have different frame ranges (subsets of parent)\nnested_temporal_annotation = lb_types.AudioClassificationAnnotation(\n frames=[lb_types.FrameLocation(start=100, end=500)],\n name=\"Speaker Analysis\",\n value=lb_types.Radio(\n answer=lb_types.ClassificationAnswer(\n name=\"User\",\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"Transcription\",\n value=lb_types.Text(answer=\"Hello there\"),\n frames=[lb_types.FrameLocation(start=100, end=500)],\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"Emotion\",\n value=lb_types.Radio(\n answer=lb_types.ClassificationAnswer(\n name=\"happy\",\n frames=[lb_types.FrameLocation(start=150, end=450)],\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"Intensity\",\n value=lb_types.Radio(\n answer=lb_types.ClassificationAnswer(\n name=\"high\",\n frames=[lb_types.FrameLocation(start=200, end=400)]\n )\n )\n )\n ]\n )\n ),\n frames=[lb_types.FrameLocation(start=150, end=450)]\n )\n ]\n )\n ]\n )\n )\n)\n\nprint(\"\\nNested temporal annotation created:\")\nprint(\" - Speaker: 100-500ms\")\nprint(\" → Transcription: 100-500ms\")\nprint(\" → Emotion: 150-450ms (subset)\")\nprint(\" → Intensity: 200-400ms (subset)\")\n", |
| 269 | + "source": "# Create label with regular and temporal annotations\nlabel_with_temporal = []\nlabel_with_temporal.append(\n lb_types.Label(\n data={\"global_key\": global_key},\n annotations=[text_annotation, checklist_annotation, radio_annotation] +\n temporal_annotations,\n ))\n\nprint(\n f\"Created label with {len(label_with_temporal[0].annotations)} total annotations\"\n)\nprint(f\" - Regular annotations: 3\")\nprint(f\" - Temporal annotations: {len(temporal_annotations)}\")\n\n# Example: Nested temporal annotation with hierarchical classifications\n# Structure: Speaker -> Transcription -> Emotion -> Intensity\n# Parent uses start_frame/end_frame, nested items use frames for discontinuous ranges\nnested_temporal_annotation = lb_types.AudioClassificationAnnotation(\n start_frame=100,\n end_frame=500,\n name=\"Speaker Analysis\",\n value=lb_types.Radio(\n answer=lb_types.ClassificationAnswer(\n name=\"User\",\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"Transcription\",\n value=lb_types.Text(answer=\"Hello there\"),\n frames=[lb_types.FrameLocation(start=100, end=500)],\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"Emotion\",\n value=lb_types.Radio(\n answer=lb_types.ClassificationAnswer(\n name=\"happy\",\n frames=[lb_types.FrameLocation(start=150, end=450)],\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"Intensity\",\n value=lb_types.Radio(\n answer=lb_types.ClassificationAnswer(\n name=\"high\",\n frames=[lb_types.FrameLocation(start=200, end=400)]\n )\n )\n )\n ]\n )\n ),\n frames=[lb_types.FrameLocation(start=150, end=450)]\n )\n ]\n )\n ]\n )\n )\n)\n\nprint(\"\\nNested temporal annotation created:\")\nprint(\" - Speaker: 100-500ms (parent range)\")\nprint(\" → Transcription: 100-500ms\")\nprint(\" → Emotion: 150-450ms (nested subset)\")\nprint(\" → Intensity: 200-400ms (nested subset)\")\n", |
270 | 270 | "cell_type": "code", |
271 | 271 | "outputs": [], |
272 | 272 | "execution_count": null |
|
0 commit comments