@@ -161,6 +161,10 @@ You could submit a notebook using ADS SDK APIs. Here is an example to submit a n
161161 .with_executor_shape_config(ocpus = 4 , memory_in_gbs = 64 )
162162 .with_logs_bucket_uri(" oci://mybucket@mytenancy/" )
163163 .with_private_endpoint_id(" ocid1.dataflowprivateendpoint.oc1.iad.<your private endpoint ocid>" )
164+ .with_configuration({
165+ " spark.driverEnv.myEnvVariable" : " value1" ,
166+ " spark.executorEnv.myEnvVariable" : " value2" ,
167+ })
164168 )
165169 rt = (
166170 DataFlowNotebookRuntime()
@@ -169,7 +173,6 @@ You could submit a notebook using ADS SDK APIs. Here is an example to submit a n
169173 ) # This could be local path or http path to notebook ipynb file
170174 .with_script_bucket(" <my-bucket>" )
171175 .with_exclude_tag([" ignore" , " remove" ]) # Cells to Ignore
172- .with_environment_variable(env1 = " test" , env2 = " test2" ) # will be propagated to both driver and executor
173176 )
174177 job = Job(infrastructure = df, runtime = rt).create(overwrite = True )
175178 df_run = job.run(wait = True )
@@ -213,7 +216,7 @@ The ``DataFlowRuntime`` properties are:
213216- ``with_archive_uri `` (`doc <https://docs.oracle.com/en-us/iaas/data-flow/using/dfs_data_flow_library.htm#third-party-libraries >`__)
214217- ``with_archive_bucket ``
215218- ``with_custom_conda ``
216- - ``with_environment_variable ``
219+ - ``with_configuration ``
217220
218221For more details, see the `runtime class documentation <../../ads.jobs.html#module-ads.jobs.builders.runtimes.python_runtime >`__.
219222
@@ -272,7 +275,10 @@ accepted. In the next example, the prefix is given for ``script_bucket``.
272275 .with_script_uri(os.path.join(td, " script.py" ))
273276 .with_script_bucket(" oci://mybucket@namespace/prefix" )
274277 .with_custom_conda(" oci://<mybucket>@<mynamespace>/<path/to/conda_pack>" )
275- .with_environment_variable(env1 = " test" , env2 = " test2" ) # will be propagated to both driver and executor
278+ .with_configuration({
279+ " spark.driverEnv.myEnvVariable" : " value1" ,
280+ " spark.executorEnv.myEnvVariable" : " value2" ,
281+ })
276282 )
277283 df = Job(name = name, infrastructure = dataflow_configs, runtime = runtime_config)
278284 df.create()
@@ -380,6 +386,10 @@ In the next example, ``archive_uri`` is given as an Object Storage location.
380386 .with_executor_shape(" VM.Standard.E4.Flex" )
381387 .with_executor_shape_config(ocpus = 4 , memory_in_gbs = 64 )
382388 .with_spark_version(" 3.0.2" )
389+ .with_configuration({
390+ " spark.driverEnv.myEnvVariable" : " value1" ,
391+ " spark.executorEnv.myEnvVariable" : " value2" ,
392+ })
383393 )
384394 runtime_config = (
385395 DataFlowRuntime()
@@ -558,11 +568,11 @@ into the ``Job.from_yaml()`` function to build a Data Flow job:
558568 runtime :
559569 kind : runtime
560570 spec :
571+ configuration :
572+ spark.driverEnv.myEnvVariable : value1
573+ spark.executorEnv.myEnvVariable : value2
561574 scriptBucket : bucket_name
562575 scriptPathURI : oci://<bucket_name>@<namespace>/<prefix>
563- env :
564- - name : env1
565- value : test1
566576 type : dataFlow
567577
568578 **Data Flow Infrastructure YAML Schema **
@@ -631,6 +641,9 @@ into the ``Job.from_yaml()`` function to build a Data Flow job:
631641 privateEndpointId :
632642 required : false
633643 type : string
644+ configuration :
645+ required : false
646+ type : dict
634647 type :
635648 allowed :
636649 - dataFlow
@@ -675,11 +688,9 @@ into the ``Job.from_yaml()`` function to build a Data Flow job:
675688 - service
676689 required : true
677690 type : string
678- env :
679- type : list
691+ configuration :
680692 required : false
681- schema :
682- type : dict
693+ type : dict
683694 freeform_tag :
684695 required : false
685696 type : dict
0 commit comments