7
7
import re
8
8
import tempfile
9
9
from concurrent .futures import ThreadPoolExecutor , as_completed
10
- from dataclasses import asdict
10
+ from dataclasses import asdict , fields
11
11
from datetime import datetime , timedelta
12
12
from pathlib import Path
13
13
from threading import Lock
76
76
ModelParams ,
77
77
)
78
78
from ads .aqua .evaluation .errors import EVALUATION_JOB_EXIT_CODE_MESSAGE
79
+ from ads .aqua .ui import AquaContainerConfig
79
80
from ads .common .auth import default_signer
80
81
from ads .common .object_storage_details import ObjectStorageDetails
81
82
from ads .common .utils import get_console_link , get_files , get_log_links
90
91
from ads .jobs .builders .runtimes .base import Runtime
91
92
from ads .jobs .builders .runtimes .container_runtime import ContainerRuntime
92
93
from ads .model .datascience_model import DataScienceModel
94
+ from ads .model .deployment import ModelDeploymentContainerRuntime
93
95
from ads .model .deployment .model_deployment import ModelDeployment
96
+ from ads .model .generic_model import ModelDeploymentRuntimeType
94
97
from ads .model .model_metadata import (
95
98
MetadataTaxonomyKeys ,
96
99
ModelCustomMetadata ,
@@ -157,24 +160,47 @@ def create(
157
160
create_aqua_evaluation_details = CreateAquaEvaluationDetails (** kwargs )
158
161
except Exception as ex :
159
162
raise AquaValueError (
160
- "Invalid create evaluation parameters. Allowable parameters are: "
161
- f"{ ', ' .join (list (asdict (CreateAquaEvaluationDetails ).keys ()))} ."
163
+ "Invalid create evaluation parameters. "
164
+ "Allowable parameters are: "
165
+ f"{ ', ' .join ([field .name for field in fields (CreateAquaEvaluationDetails )])} ."
162
166
) from ex
163
167
164
168
if not is_valid_ocid (create_aqua_evaluation_details .evaluation_source_id ):
165
169
raise AquaValueError (
166
170
f"Invalid evaluation source { create_aqua_evaluation_details .evaluation_source_id } . "
167
171
"Specify either a model or model deployment id."
168
172
)
169
-
170
173
evaluation_source = None
174
+ eval_inference_configuration = None
171
175
if (
172
176
DataScienceResource .MODEL_DEPLOYMENT
173
177
in create_aqua_evaluation_details .evaluation_source_id
174
178
):
175
179
evaluation_source = ModelDeployment .from_id (
176
180
create_aqua_evaluation_details .evaluation_source_id
177
181
)
182
+ try :
183
+ if (
184
+ evaluation_source .runtime .type
185
+ == ModelDeploymentRuntimeType .CONTAINER
186
+ ):
187
+ runtime = ModelDeploymentContainerRuntime .from_dict (
188
+ evaluation_source .runtime .to_dict ()
189
+ )
190
+ inference_config = AquaContainerConfig .from_container_index_json (
191
+ enable_spec = True
192
+ ).inference
193
+ for container in inference_config .values ():
194
+ if container .name == runtime .image .split (":" )[0 ]:
195
+ eval_inference_configuration = (
196
+ container .spec .evaluation_configuration
197
+ )
198
+ except Exception :
199
+ logger .debug (
200
+ f"Could not load inference config details for the evaluation id: "
201
+ f"{ create_aqua_evaluation_details .evaluation_source_id } . Please check if the container"
202
+ f" runtime has the correct SMC image information."
203
+ )
178
204
elif (
179
205
DataScienceResource .MODEL
180
206
in create_aqua_evaluation_details .evaluation_source_id
@@ -390,6 +416,9 @@ def create(
390
416
report_path = create_aqua_evaluation_details .report_path ,
391
417
model_parameters = create_aqua_evaluation_details .model_parameters ,
392
418
metrics = create_aqua_evaluation_details .metrics ,
419
+ inference_configuration = eval_inference_configuration .to_filtered_dict ()
420
+ if eval_inference_configuration
421
+ else {},
393
422
)
394
423
).create (** kwargs ) ## TODO: decide what parameters will be needed
395
424
logger .debug (
@@ -511,6 +540,7 @@ def _build_evaluation_runtime(
511
540
report_path : str ,
512
541
model_parameters : dict ,
513
542
metrics : List = None ,
543
+ inference_configuration : dict = None ,
514
544
) -> Runtime :
515
545
"""Builds evaluation runtime for Job."""
516
546
# TODO the image name needs to be extracted from the mapping index.json file.
@@ -520,16 +550,19 @@ def _build_evaluation_runtime(
520
550
.with_environment_variable (
521
551
** {
522
552
"AIP_SMC_EVALUATION_ARGUMENTS" : json .dumps (
523
- asdict (
524
- self ._build_launch_cmd (
525
- evaluation_id = evaluation_id ,
526
- evaluation_source_id = evaluation_source_id ,
527
- dataset_path = dataset_path ,
528
- report_path = report_path ,
529
- model_parameters = model_parameters ,
530
- metrics = metrics ,
531
- )
532
- )
553
+ {
554
+ ** asdict (
555
+ self ._build_launch_cmd (
556
+ evaluation_id = evaluation_id ,
557
+ evaluation_source_id = evaluation_source_id ,
558
+ dataset_path = dataset_path ,
559
+ report_path = report_path ,
560
+ model_parameters = model_parameters ,
561
+ metrics = metrics ,
562
+ ),
563
+ ),
564
+ ** (inference_configuration or {}),
565
+ },
533
566
),
534
567
"CONDA_BUCKET_NS" : CONDA_BUCKET_NS ,
535
568
},
0 commit comments