From 804c4a2f3fc64b42519dbbe426cd3c6dffb120d3 Mon Sep 17 00:00:00 2001 From: "codeflash-ai[bot]" <148906541+codeflash-ai[bot]@users.noreply.github.com> Date: Wed, 29 Oct 2025 17:32:49 +0000 Subject: [PATCH] Optimize BlockManifest.describe_outputs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The optimization extracts the `OutputDefinition` list creation from inside the `describe_outputs()` method to a module-level constant `_OUTPUT_DEFINITIONS`. This eliminates redundant object creation on every method call. **Key Performance Gains:** - **Object Creation Elimination**: Instead of creating two `OutputDefinition` objects and a list every time `describe_outputs()` is called, these objects are created once at module import time - **Memory Allocation Reduction**: Avoids repeated memory allocation for the same static data structure - **Method Call Overhead Reduction**: The method now simply returns a pre-existing list reference instead of constructing objects **Why This Works:** In Python, object creation (especially with `__init__` calls) and list construction have overhead. Since the output definitions are static and never change, creating them once at module load time and reusing the same objects is significantly faster. **Test Case Performance:** The optimization shows consistent 10-15x speedups across all test cases, with individual calls dropping from ~5μs to ~300-400ns. The most dramatic improvement appears in the large-scale test with 1000 calls (1.62ms → 134μs), demonstrating how the benefit compounds with frequent usage. This pattern is ideal for workflows where `describe_outputs()` is called repeatedly during execution planning or validation phases. --- .../models/roboflow/instance_segmentation/v1.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py index 7ff43093b1..ce2ea34006 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py @@ -48,6 +48,14 @@ ) from inference_sdk import InferenceConfiguration, InferenceHTTPClient +_OUTPUT_DEFINITIONS = [ + OutputDefinition(name=INFERENCE_ID_KEY, kind=[STRING_KIND]), + OutputDefinition( + name="predictions", + kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND], + ), +] + LONG_DESCRIPTION = """ Run inference on an instance segmentation model hosted on or uploaded to Roboflow. @@ -159,13 +167,7 @@ def get_parameters_accepting_batches(cls) -> List[str]: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: - return [ - OutputDefinition(name=INFERENCE_ID_KEY, kind=[STRING_KIND]), - OutputDefinition( - name="predictions", - kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND], - ), - ] + return _OUTPUT_DEFINITIONS @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: