@@ -836,6 +836,7 @@ def test_predictor_with_component_name(sagemaker_session, component_name):
836836 assert predictor ._get_component_name () == component_name
837837
838838
839+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
839840def test_training_recipe_for_cpu (sagemaker_session ):
840841 container_log_level = '"logging.INFO"'
841842
@@ -864,17 +865,18 @@ def test_training_recipe_for_cpu(sagemaker_session):
864865 instance_type = INSTANCE_TYPE ,
865866 base_job_name = "job" ,
866867 container_log_level = container_log_level ,
867- training_recipe = "training/llama/hf_llama3_8b_seq8192_gpu " ,
868+ training_recipe = "training/llama/hf_llama3_8b_seq8k_gpu_p5x16_pretrain " ,
868869 recipe_overrides = recipe_overrides ,
869870 )
870871
871872
873+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
872874@pytest .mark .parametrize (
873875 "recipe, model" ,
874876 [
875- ("hf_llama3_8b_seq8192_gpu " , "llama" ),
876- ("hf_mistral_gpu " , "mistral" ),
877- ("hf_mixtral_gpu " , "mixtral" ),
877+ ("hf_llama3_8b_seq8k_gpu_p5x16_pretrain " , "llama" ),
878+ ("hf_mistral_7b_seq8k_gpu_p5x16_pretrain " , "mistral" ),
879+ ("hf_mixtral_8x7b_seq8k_gpu_p5x16_pretrain " , "mixtral" ),
878880 ],
879881)
880882def test_training_recipe_for_gpu (sagemaker_session , recipe , model ):
@@ -925,6 +927,7 @@ def test_training_recipe_for_gpu(sagemaker_session, recipe, model):
925927 assert pytorch .distribution .items () == expected_distribution .items ()
926928
927929
930+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
928931def test_training_recipe_with_override (sagemaker_session ):
929932 container_log_level = '"logging.INFO"'
930933
@@ -953,7 +956,7 @@ def test_training_recipe_with_override(sagemaker_session):
953956 instance_type = INSTANCE_TYPE_GPU ,
954957 base_job_name = "job" ,
955958 container_log_level = container_log_level ,
956- training_recipe = "training/llama/hf_llama3_8b_seq8192_gpu " ,
959+ training_recipe = "training/llama/hf_llama3_8b_seq8k_gpu_p5x16_pretrain " ,
957960 recipe_overrides = recipe_overrides ,
958961 )
959962
@@ -962,6 +965,7 @@ def test_training_recipe_with_override(sagemaker_session):
962965 assert pytorch .image_uri == IMAGE_URI
963966
964967
968+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
965969def test_training_recipe_gpu_custom_source_dir (sagemaker_session ):
966970 container_log_level = '"logging.INFO"'
967971
@@ -992,7 +996,7 @@ def test_training_recipe_gpu_custom_source_dir(sagemaker_session):
992996 instance_type = INSTANCE_TYPE_GPU ,
993997 base_job_name = "job" ,
994998 container_log_level = container_log_level ,
995- training_recipe = "training/llama/hf_llama3_8b_seq8192_gpu " ,
999+ training_recipe = "training/llama/hf_llama3_8b_seq8k_gpu_p5x16_pretrain " ,
9961000 recipe_overrides = recipe_overrides ,
9971001 )
9981002
0 commit comments