|
6 | 6 | from ads.jobs import Job, DataScienceJob, PythonRuntime |
7 | 7 |
|
8 | 8 | job = ( |
9 | | - Job(name="My Job") |
10 | | - .with_infrastructure( |
11 | | - DataScienceJob() |
12 | | - .with_log_group_id("<log_group_ocid>") |
13 | | - .with_log_id("<log_ocid>") |
14 | | - # The following infrastructure configurations are optional |
15 | | - # if you are in an OCI data science notebook session. |
16 | | - # The configurations of the notebook session will be used as defaults. |
17 | | - .with_compartment_id("<compartment_ocid>") |
18 | | - .with_project_id("<project_ocid>") |
19 | | - # For default networking, no need to specify subnet ID |
20 | | - .with_subnet_id("<subnet_ocid>") |
21 | | - .with_shape_name("VM.Standard.E3.Flex") |
22 | | - # Shape config details are applicable only for the flexible shapes. |
23 | | - .with_shape_config_details(memory_in_gbs=16, ocpus=1) |
24 | | - .with_block_storage_size(50) |
25 | | - ) |
26 | | - .with_runtime( |
27 | | - PythonRuntime() |
28 | | - # Specify the service conda environment by slug name. |
29 | | - .with_service_conda("pytorch19_p37_cpu_v1") |
30 | | - # The job artifact can be a single Python script, a directory or a zip file. |
31 | | - .with_source("local/path/to/code_dir") |
32 | | - # Set the working directory |
33 | | - # When using a directory as source, the default working dir is the parent of code_dir. |
34 | | - # Working dir should be a relative path beginning from the source directory (code_dir) |
35 | | - .with_working_dir("code_dir") |
36 | | - # The entrypoint is applicable only to directory or zip file as source |
37 | | - # The entrypoint should be a path relative to the working dir. |
38 | | - # Here my_script.py is a file in the code_dir/my_package directory |
39 | | - .with_entrypoint("my_package/my_script.py") |
40 | | - # Add an additional Python path, relative to the working dir (code_dir/other_packages). |
41 | | - .with_python_path("other_packages") |
42 | | - # Copy files in "code_dir/output" to object storage after job finishes. |
43 | | - .with_output("output", "oci://bucket_name@namespace/path/to/dir") |
44 | | - ) |
| 9 | + Job(name="My Job") |
| 10 | + .with_infrastructure( |
| 11 | + DataScienceJob() |
| 12 | + .with_log_group_id("<log_group_ocid>") |
| 13 | + .with_log_id("<log_ocid>") |
| 14 | + # The following infrastructure configurations are optional |
| 15 | + # if you are in an OCI data science notebook session. |
| 16 | + # The configurations of the notebook session will be used as defaults. |
| 17 | + .with_compartment_id("<compartment_ocid>") |
| 18 | + .with_project_id("<project_ocid>") |
| 19 | + # For default networking, no need to specify subnet ID |
| 20 | + .with_subnet_id("<subnet_ocid>") |
| 21 | + .with_shape_name("VM.Standard.E3.Flex") |
| 22 | + # Shape config details are applicable only for the flexible shapes. |
| 23 | + .with_shape_config_details(memory_in_gbs=16, ocpus=1) |
| 24 | + .with_block_storage_size(50) |
| 25 | + ) |
| 26 | + .with_runtime( |
| 27 | + PythonRuntime() |
| 28 | + # Specify the service conda environment by slug name. |
| 29 | + .with_service_conda("pytorch19_p37_cpu_v1") |
| 30 | + # The job artifact can be a single Python script, a directory or a zip file. |
| 31 | + .with_source("local/path/to/code_dir") |
| 32 | + # Environment variable |
| 33 | + .with_environment_variable(NAME="Welcome to OCI Data Science.") |
| 34 | + # Command line argument, arg1 --key arg2 |
| 35 | + .with_argument("arg1", key="arg2") |
| 36 | + # Set the working directory |
| 37 | + # When using a directory as source, the default working dir is the parent of code_dir. |
| 38 | + # Working dir should be a relative path beginning from the source directory (code_dir) |
| 39 | + .with_working_dir("code_dir") |
| 40 | + # The entrypoint is applicable only to directory or zip file as source |
| 41 | + # The entrypoint should be a path relative to the working dir. |
| 42 | + # Here my_script.py is a file in the code_dir/my_package directory |
| 43 | + .with_entrypoint("my_package/my_script.py") |
| 44 | + # Add an additional Python path, relative to the working dir (code_dir/other_packages). |
| 45 | + .with_python_path("other_packages") |
| 46 | + # Copy files in "code_dir/output" to object storage after job finishes. |
| 47 | + .with_output("output", "oci://bucket_name@namespace/path/to/dir") |
| 48 | + ) |
45 | 49 | ) |
46 | 50 |
|
47 | | - # Create the job on OCI Data Science |
48 | | - job.create() |
49 | | - # Start a job run |
50 | | - run = job.run() |
51 | | - # Stream the job run outputs |
52 | | - run.watch() |
53 | | - |
54 | 51 | .. code-tab:: yaml |
55 | 52 | :caption: YAML |
56 | 53 |
|
|
77 | 74 | kind: runtime |
78 | 75 | type: python |
79 | 76 | spec: |
| 77 | + args: |
| 78 | + - arg1 |
| 79 | + - --key |
| 80 | + - arg2 |
80 | 81 | conda: |
81 | 82 | slug: pytorch19_p37_cpu_v1 |
82 | 83 | type: service |
83 | 84 | entrypoint: my_package/my_script.py |
| 85 | + env: |
| 86 | + - name: NAME |
| 87 | + value: Welcome to OCI Data Science. |
84 | 88 | outputDir: output |
85 | 89 | outputUri: oci://bucket_name@namespace/path/to/dir |
86 | 90 | pythonPath: |
87 | 91 | - other_packages |
88 | 92 | scriptPathURI: local/path/to/code_dir |
89 | | - workingDir: code_dir |
| 93 | + workingDir: code_dir |
| 94 | + workingDir: code_dir |
| 95 | + |
| 96 | + |
| 97 | +.. code-block:: python |
| 98 | +
|
| 99 | + # Create the job on OCI Data Science |
| 100 | + job.create() |
| 101 | + # Start a job run |
| 102 | + run = job.run() |
| 103 | + # Stream the job run outputs |
| 104 | + run.watch() |
0 commit comments