@@ -524,7 +524,7 @@ def test_all_series_failure(model):
524524 module_to_patch = {
525525 "arima" : 'pmdarima.auto_arima' ,
526526 "autots" : 'autots.AutoTS' ,
527- "automlx" : 'automl .Pipeline' ,
527+ "automlx" : 'automlx .Pipeline' ,
528528 "prophet" : 'prophet.Prophet' ,
529529 "neuralprophet" : 'neuralprophet.NeuralProphet'
530530 }
@@ -551,7 +551,7 @@ def test_all_series_failure(model):
551551 local_fn = f"{ tmpdirname } /results/local_explanation.csv"
552552 assert os .path .exists (local_fn ), f"Local explanation file not found at { report_path } "
553553
554- @pytest .mark .parametrize ("model" , [ "arima" , "automlx" ] )
554+ @pytest .mark .parametrize ("model" , MODELS )
555555def test_arima_automlx_errors (operator_setup , model ):
556556 tmpdirname = operator_setup
557557 historical_data_path , additional_data_path = setup_faulty_rossman ()
@@ -572,14 +572,15 @@ def test_arima_automlx_errors(operator_setup, model):
572572 outputs get generated and that error is shown in errors.json
573573 """
574574
575+ """
576+ explanations generation is failing when boolean columns are passed. So we added label_encode before passing data to
577+ explainer
578+ """
579+
575580 yaml_i ['spec' ]['horizon' ] = 10
576581 yaml_i ['spec' ]['preprocessing' ] = True
577582 yaml_i ['spec' ]['generate_explanations' ] = True
578583 yaml_i ['spec' ]['model' ] = model
579- if model == "automlx" :
580- yaml_i ['spec' ]['model_kwargs' ] = {
581- 'model_list' : ['ProphetForecaster' ]
582- }
583584
584585 run_yaml (tmpdirname = tmpdirname , yaml_i = yaml_i , output_data_path = output_data_path , test_metrics_check = False )
585586
@@ -594,23 +595,24 @@ def test_arima_automlx_errors(operator_setup, model):
594595 error_path = f"{ tmpdirname } /results/errors.json"
595596 if model == "arima" :
596597 assert not os .path .exists (error_path ), f"Error file not found at { error_path } "
597- else :
598+ elif model == "automlx" :
598599 assert os .path .exists (error_path ), f"Error file not found at { error_path } "
599600 with open (error_path , 'r' ) as error_file :
600601 error_content = json .load (error_file )
601602 assert "Input data does not have a consistent (in terms of diff) DatetimeIndex." in error_content ["13" ][
602603 "error" ], "Error message mismatch"
603604
604- global_fn = f"{ tmpdirname } /results/global_explanation.csv"
605- assert os .path .exists (global_fn ), f"Global explanation file not found at { report_path } "
605+ if model != "autots" :
606+ global_fn = f"{ tmpdirname } /results/global_explanation.csv"
607+ assert os .path .exists (global_fn ), f"Global explanation file not found at { report_path } "
606608
607- local_fn = f"{ tmpdirname } /results/local_explanation.csv"
608- assert os .path .exists (local_fn ), f"Local explanation file not found at { report_path } "
609+ local_fn = f"{ tmpdirname } /results/local_explanation.csv"
610+ assert os .path .exists (local_fn ), f"Local explanation file not found at { report_path } "
609611
610- glb_expl = pd .read_csv (global_fn , index_col = 0 )
611- loc_expl = pd .read_csv (local_fn )
612- assert not glb_expl .empty
613- assert not loc_expl .empty
612+ glb_expl = pd .read_csv (global_fn , index_col = 0 )
613+ loc_expl = pd .read_csv (local_fn )
614+ assert not glb_expl .empty
615+ assert not loc_expl .empty
614616
615617
616618def test_smape_error ():
@@ -631,11 +633,7 @@ def test_date_format(operator_setup, model):
631633 yaml_i ["spec" ]["model" ] = model
632634 if model == "autots" :
633635 yaml_i ["spec" ]["model_kwargs" ] = {"model_list" : "superfast" }
634- if model == "automlx" :
635- yaml_i ['spec' ]['model_kwargs' ] = {
636- 'model_list' : ['ProphetForecaster' ],
637- "time_budget" : 1
638- }
636+
639637 run_yaml (tmpdirname = tmpdirname , yaml_i = yaml_i , output_data_path = output_data_path , test_metrics_check = False )
640638 assert pd .read_csv (additional_data_path )['Date' ].equals (pd .read_csv (f"{ tmpdirname } /results/forecast.csv" )['Date' ])
641639
0 commit comments