7676 ModelParams ,
7777)
7878from ads .aqua .evaluation .errors import EVALUATION_JOB_EXIT_CODE_MESSAGE
79+ from ads .aqua .ui import AquaContainerConfig
7980from ads .common .auth import default_signer
8081from ads .common .object_storage_details import ObjectStorageDetails
8182from ads .common .utils import get_console_link , get_files , get_log_links
9091from ads .jobs .builders .runtimes .base import Runtime
9192from ads .jobs .builders .runtimes .container_runtime import ContainerRuntime
9293from ads .model .datascience_model import DataScienceModel
94+ from ads .model .deployment import ModelDeploymentContainerRuntime
9395from ads .model .deployment .model_deployment import ModelDeployment
96+ from ads .model .generic_model import ModelDeploymentRuntimeType
9497from ads .model .model_metadata import (
9598 MetadataTaxonomyKeys ,
9699 ModelCustomMetadata ,
@@ -166,15 +169,27 @@ def create(
166169 f"Invalid evaluation source { create_aqua_evaluation_details .evaluation_source_id } . "
167170 "Specify either a model or model deployment id."
168171 )
169-
170172 evaluation_source = None
173+ eval_inference_configuration = None
171174 if (
172175 DataScienceResource .MODEL_DEPLOYMENT
173176 in create_aqua_evaluation_details .evaluation_source_id
174177 ):
175178 evaluation_source = ModelDeployment .from_id (
176179 create_aqua_evaluation_details .evaluation_source_id
177180 )
181+ if evaluation_source .runtime .type == ModelDeploymentRuntimeType .CONTAINER :
182+ runtime = ModelDeploymentContainerRuntime .from_dict (
183+ evaluation_source .runtime .to_dict ()
184+ )
185+ inference_config = AquaContainerConfig .from_container_index_json (
186+ enable_spec = True
187+ ).inference
188+ for container in inference_config .values ():
189+ if container .name == runtime .image .split (":" )[0 ]:
190+ eval_inference_configuration = (
191+ container .spec .evaluation_configuration
192+ )
178193 elif (
179194 DataScienceResource .MODEL
180195 in create_aqua_evaluation_details .evaluation_source_id
@@ -390,6 +405,9 @@ def create(
390405 report_path = create_aqua_evaluation_details .report_path ,
391406 model_parameters = create_aqua_evaluation_details .model_parameters ,
392407 metrics = create_aqua_evaluation_details .metrics ,
408+ inference_configuration = eval_inference_configuration .to_filtered_dict ()
409+ if eval_inference_configuration
410+ else {},
393411 )
394412 ).create (** kwargs ) ## TODO: decide what parameters will be needed
395413 logger .debug (
@@ -511,6 +529,7 @@ def _build_evaluation_runtime(
511529 report_path : str ,
512530 model_parameters : dict ,
513531 metrics : List = None ,
532+ inference_configuration : dict = None ,
514533 ) -> Runtime :
515534 """Builds evaluation runtime for Job."""
516535 # TODO the image name needs to be extracted from the mapping index.json file.
@@ -520,16 +539,19 @@ def _build_evaluation_runtime(
520539 .with_environment_variable (
521540 ** {
522541 "AIP_SMC_EVALUATION_ARGUMENTS" : json .dumps (
523- asdict (
524- self ._build_launch_cmd (
525- evaluation_id = evaluation_id ,
526- evaluation_source_id = evaluation_source_id ,
527- dataset_path = dataset_path ,
528- report_path = report_path ,
529- model_parameters = model_parameters ,
530- metrics = metrics ,
531- )
532- )
542+ {
543+ ** asdict (
544+ self ._build_launch_cmd (
545+ evaluation_id = evaluation_id ,
546+ evaluation_source_id = evaluation_source_id ,
547+ dataset_path = dataset_path ,
548+ report_path = report_path ,
549+ model_parameters = model_parameters ,
550+ metrics = metrics ,
551+ ),
552+ ),
553+ ** inference_configuration ,
554+ },
533555 ),
534556 "CONDA_BUCKET_NS" : CONDA_BUCKET_NS ,
535557 },
0 commit comments