Skip to content

Commit 0419ff8

Browse files
authored
Remove local_rank arg from TrainingArguments (#41382)
1 parent 081391b commit 0419ff8

File tree

36 files changed

+63
-72
lines changed

36 files changed

+63
-72
lines changed

examples/legacy/multiple_choice/run_multiple_choice.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -99,18 +99,18 @@ def main():
9999
logging.basicConfig(
100100
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
101101
datefmt="%m/%d/%Y %H:%M:%S",
102-
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
102+
level=logging.INFO if training_args.local_process_index in [-1, 0] else logging.WARN,
103103
)
104104
logger.warning(
105105
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
106-
training_args.local_rank,
106+
training_args.local_process_index,
107107
training_args.device,
108108
training_args.n_gpu,
109-
bool(training_args.local_rank != -1),
109+
bool(training_args.parallel_mode.value == "distributed"),
110110
training_args.fp16,
111111
)
112112
# Set the verbosity to info of the Transformers logger (on main process only):
113-
if is_main_process(training_args.local_rank):
113+
if is_main_process(training_args.local_process_index):
114114
transformers.utils.logging.set_verbosity_info()
115115
transformers.utils.logging.enable_default_handler()
116116
transformers.utils.logging.enable_explicit_format()

examples/legacy/question-answering/run_squad_trainer.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -80,18 +80,18 @@ def main():
8080
logging.basicConfig(
8181
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
8282
datefmt="%m/%d/%Y %H:%M:%S",
83-
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
83+
level=logging.INFO if training_args.local_process_index in [-1, 0] else logging.WARN,
8484
)
8585
logger.warning(
8686
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
87-
training_args.local_rank,
87+
training_args.local_process_index,
8888
training_args.device,
8989
training_args.n_gpu,
90-
bool(training_args.local_rank != -1),
90+
bool(training_args.parallel_mode.value == "distributed"),
9191
training_args.fp16,
9292
)
9393
# Set the verbosity to info of the Transformers logger (on main process only):
94-
if is_main_process(training_args.local_rank):
94+
if is_main_process(training_args.local_process_index):
9595
transformers.utils.logging.set_verbosity_info()
9696
transformers.utils.logging.enable_default_handler()
9797
transformers.utils.logging.enable_explicit_format()

examples/legacy/run_language_modeling.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -212,18 +212,18 @@ def main():
212212
logging.basicConfig(
213213
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
214214
datefmt="%m/%d/%Y %H:%M:%S",
215-
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
215+
level=logging.INFO if training_args.local_process_index in [-1, 0] else logging.WARN,
216216
)
217217
logger.warning(
218218
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
219-
training_args.local_rank,
219+
training_args.local_process_index,
220220
training_args.device,
221221
training_args.n_gpu,
222-
bool(training_args.local_rank != -1),
222+
bool(training_args.parallel_mode.value == "distributed"),
223223
training_args.fp16,
224224
)
225225
# Set the verbosity to info of the Transformers logger (on main process only):
226-
if is_main_process(training_args.local_rank):
226+
if is_main_process(training_args.local_process_index):
227227
transformers.utils.logging.set_verbosity_info()
228228
transformers.utils.logging.enable_default_handler()
229229
transformers.utils.logging.enable_explicit_format()

examples/legacy/seq2seq/finetune_trainer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -171,11 +171,11 @@ def main():
171171
logging.basicConfig(
172172
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
173173
datefmt="%m/%d/%Y %H:%M:%S",
174-
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
174+
level=logging.INFO if training_args.local_process_index in [-1, 0] else logging.WARN,
175175
)
176176
logger.warning(
177177
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
178-
training_args.local_rank,
178+
training_args.local_process_index,
179179
training_args.device,
180180
training_args.n_gpu,
181181
bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED),
@@ -184,7 +184,7 @@ def main():
184184
transformers.utils.logging.enable_default_handler()
185185
transformers.utils.logging.enable_explicit_format()
186186
# Set the verbosity to info of the Transformers logger (on main process only):
187-
if is_main_process(training_args.local_rank):
187+
if is_main_process(training_args.local_process_index):
188188
transformers.utils.logging.set_verbosity_info()
189189
logger.info("Training/evaluation parameters %s", training_args)
190190

examples/legacy/token-classification/run_ner.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -125,18 +125,18 @@ def main():
125125
logging.basicConfig(
126126
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
127127
datefmt="%m/%d/%Y %H:%M:%S",
128-
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
128+
level=logging.INFO if training_args.local_process_index in [-1, 0] else logging.WARN,
129129
)
130130
logger.warning(
131131
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
132-
training_args.local_rank,
132+
training_args.local_process_index,
133133
training_args.device,
134134
training_args.n_gpu,
135-
bool(training_args.local_rank != -1),
135+
bool(training_args.parallel_mode.value == "distributed"),
136136
training_args.fp16,
137137
)
138138
# Set the verbosity to info of the Transformers logger (on main process only):
139-
if is_main_process(training_args.local_rank):
139+
if is_main_process(training_args.local_process_index):
140140
transformers.utils.logging.set_verbosity_info()
141141
transformers.utils.logging.enable_default_handler()
142142
transformers.utils.logging.enable_explicit_format()

examples/pytorch/audio-classification/run_audio_classification.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ def main():
236236

237237
# Log on each process the small summary:
238238
logger.warning(
239-
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
239+
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
240240
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
241241
)
242242
logger.info(f"Training/evaluation parameters {training_args}")

examples/pytorch/contrastive-image-text/run_clip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def main():
265265

266266
# Log on each process the small summary:
267267
logger.warning(
268-
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
268+
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
269269
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
270270
)
271271
logger.info(f"Training/evaluation parameters {training_args}")

examples/pytorch/image-classification/run_image_classification.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ def main():
219219

220220
# Log on each process the small summary:
221221
logger.warning(
222-
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
222+
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
223223
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
224224
)
225225
logger.info(f"Training/evaluation parameters {training_args}")

examples/pytorch/image-pretraining/run_mae.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ def main():
211211

212212
# Log on each process the small summary:
213213
logger.warning(
214-
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
214+
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
215215
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
216216
)
217217
logger.info(f"Training/evaluation parameters {training_args}")

examples/pytorch/image-pretraining/run_mim.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ def main():
275275

276276
# Log on each process the small summary:
277277
logger.warning(
278-
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
278+
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
279279
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
280280
)
281281
logger.info(f"Training/evaluation parameters {training_args}")

0 commit comments

Comments
 (0)