|
170 | 170 | update_mask=UPDATE_MASK,
|
171 | 171 | graceful_decommission_timeout=TIMEOUT,
|
172 | 172 | project_id=PROJECT_ID,
|
173 |
| - location=REGION, |
| 173 | + region=REGION, |
174 | 174 | )
|
175 | 175 | # [END how_to_cloud_dataproc_update_cluster_operator]
|
176 | 176 |
|
|
179 | 179 | task_id="create_workflow_template",
|
180 | 180 | template=WORKFLOW_TEMPLATE,
|
181 | 181 | project_id=PROJECT_ID,
|
182 |
| - location=REGION, |
| 182 | + region=REGION, |
183 | 183 | )
|
184 | 184 | # [END how_to_cloud_dataproc_create_workflow_template]
|
185 | 185 |
|
|
190 | 190 | # [END how_to_cloud_dataproc_trigger_workflow_template]
|
191 | 191 |
|
192 | 192 | pig_task = DataprocSubmitJobOperator(
|
193 |
| - task_id="pig_task", job=PIG_JOB, location=REGION, project_id=PROJECT_ID |
| 193 | + task_id="pig_task", job=PIG_JOB, region=REGION, project_id=PROJECT_ID |
194 | 194 | )
|
195 | 195 | spark_sql_task = DataprocSubmitJobOperator(
|
196 |
| - task_id="spark_sql_task", job=SPARK_SQL_JOB, location=REGION, project_id=PROJECT_ID |
| 196 | + task_id="spark_sql_task", job=SPARK_SQL_JOB, region=REGION, project_id=PROJECT_ID |
197 | 197 | )
|
198 | 198 |
|
199 | 199 | spark_task = DataprocSubmitJobOperator(
|
200 |
| - task_id="spark_task", job=SPARK_JOB, location=REGION, project_id=PROJECT_ID |
| 200 | + task_id="spark_task", job=SPARK_JOB, region=REGION, project_id=PROJECT_ID |
201 | 201 | )
|
202 | 202 |
|
203 | 203 | # [START cloud_dataproc_async_submit_sensor]
|
204 | 204 | spark_task_async = DataprocSubmitJobOperator(
|
205 |
| - task_id="spark_task_async", job=SPARK_JOB, location=REGION, project_id=PROJECT_ID, asynchronous=True |
| 205 | + task_id="spark_task_async", job=SPARK_JOB, region=REGION, project_id=PROJECT_ID, asynchronous=True |
206 | 206 | )
|
207 | 207 |
|
208 | 208 | spark_task_async_sensor = DataprocJobSensor(
|
209 | 209 | task_id='spark_task_async_sensor_task',
|
210 |
| - location=REGION, |
| 210 | + region=REGION, |
211 | 211 | project_id=PROJECT_ID,
|
212 | 212 | dataproc_job_id="{{task_instance.xcom_pull(task_ids='spark_task_async')}}",
|
213 | 213 | poke_interval=10,
|
|
216 | 216 |
|
217 | 217 | # [START how_to_cloud_dataproc_submit_job_to_cluster_operator]
|
218 | 218 | pyspark_task = DataprocSubmitJobOperator(
|
219 |
| - task_id="pyspark_task", job=PYSPARK_JOB, location=REGION, project_id=PROJECT_ID |
| 219 | + task_id="pyspark_task", job=PYSPARK_JOB, region=REGION, project_id=PROJECT_ID |
220 | 220 | )
|
221 | 221 | # [END how_to_cloud_dataproc_submit_job_to_cluster_operator]
|
222 | 222 |
|
223 | 223 | sparkr_task = DataprocSubmitJobOperator(
|
224 |
| - task_id="sparkr_task", job=SPARKR_JOB, location=REGION, project_id=PROJECT_ID |
| 224 | + task_id="sparkr_task", job=SPARKR_JOB, region=REGION, project_id=PROJECT_ID |
225 | 225 | )
|
226 | 226 |
|
227 | 227 | hive_task = DataprocSubmitJobOperator(
|
228 |
| - task_id="hive_task", job=HIVE_JOB, location=REGION, project_id=PROJECT_ID |
| 228 | + task_id="hive_task", job=HIVE_JOB, region=REGION, project_id=PROJECT_ID |
229 | 229 | )
|
230 | 230 |
|
231 | 231 | hadoop_task = DataprocSubmitJobOperator(
|
232 |
| - task_id="hadoop_task", job=HADOOP_JOB, location=REGION, project_id=PROJECT_ID |
| 232 | + task_id="hadoop_task", job=HADOOP_JOB, region=REGION, project_id=PROJECT_ID |
233 | 233 | )
|
234 | 234 |
|
235 | 235 | # [START how_to_cloud_dataproc_delete_cluster_operator]
|
|
0 commit comments