Skip to content

Commit 1d91a8a

Browse files
authored
Use max/min (#41280)
Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
1 parent f1b64c5 commit 1d91a8a

File tree

8 files changed

+9
-9
lines changed

8 files changed

+9
-9
lines changed

src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -495,7 +495,7 @@ def checku2e(x):
495495
candidates.append((self.vocab[wd], wd, e))
496496
if len(candidates) > 0:
497497
# the smallest token_id is adopted
498-
_, wd, e = sorted(candidates, key=lambda x: x[0])[0]
498+
_, wd, e = min(candidates, key=lambda x: x[0])
499499
result.append(wd)
500500
pos = e
501501
else:

src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ def checku2e(x):
318318
candidates.append((self.vocab[wd], wd, e))
319319
if len(candidates) > 0:
320320
# the smallest token_id is adopted
321-
_, wd, e = sorted(candidates, key=lambda x: x[0])[0]
321+
_, wd, e = min(candidates, key=lambda x: x[0])
322322
result.append(wd)
323323
pos = e
324324
else:

src/transformers/models/ovis2/image_processing_ovis2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,10 +169,10 @@ def get_min_tile_covering_grid(
169169

170170
if sufficient_covering_grids:
171171
# Prefer fewer tiles and higher covering ratio
172-
return sorted(sufficient_covering_grids, key=lambda x: (x[0][0] * x[0][1], -x[1]))[0][0]
172+
return min(sufficient_covering_grids, key=lambda x: (x[0][0] * x[0][1], -x[1]))[0]
173173
else:
174174
# Fallback: prefer higher covering even if below threshold
175-
return sorted(evaluated_grids, key=lambda x: (-x[1], x[0][0] * x[0][1]))[0][0]
175+
return min(evaluated_grids, key=lambda x: (-x[1], x[0][0] * x[0][1]))[0]
176176

177177

178178
class Ovis2ImageProcessor(BaseImageProcessor):

src/transformers/tokenization_mistral_common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1789,7 +1789,7 @@ def from_pretrained(
17891789
if "tekken.json" in valid_tokenizer_files:
17901790
tokenizer_file = "tekken.json"
17911791
else:
1792-
tokenizer_file = sorted(valid_tokenizer_files)[-1]
1792+
tokenizer_file = max(valid_tokenizer_files)
17931793
logger.warning(
17941794
f"Multiple tokenizer files found in directory: {pretrained_model_name_or_path}. Using {tokenizer_file}."
17951795
)

tests/trainer/test_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3478,7 +3478,7 @@ def test_resume_training_with_randomness(self):
34783478
checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")]
34793479
# There should be one checkpoint per epoch.
34803480
self.assertEqual(len(checkpoints), 3)
3481-
checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0]
3481+
checkpoint_dir = min(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))
34823482

34833483
trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir))
34843484
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()

utils/add_pipeline_model_mapping_to_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def find_test_class(test_file):
134134
break
135135
# Take the test class with the shortest name (just a heuristic)
136136
if target_test_class is None and len(test_classes) > 0:
137-
target_test_class = sorted(test_classes, key=lambda x: (len(x.__name__), x.__name__))[0]
137+
target_test_class = min(test_classes, key=lambda x: (len(x.__name__), x.__name__))
138138

139139
return target_test_class
140140

utils/create_dummy_models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,7 @@ def get_tiny_config(config_class, model_class=None, **model_tester_kwargs):
389389
# This is to avoid `T5EncoderOnlyModelTest` is used instead of `T5ModelTest`, which has
390390
# `is_encoder_decoder=False` and causes some pipeline tests failing (also failures in `Optimum` CI).
391391
# TODO: More fine grained control of the desired tester class.
392-
model_tester_class = sorted(tester_classes, key=lambda x: (len(x.__name__), x.__name__))[0]
392+
model_tester_class = min(tester_classes, key=lambda x: (len(x.__name__), x.__name__))
393393
except ModuleNotFoundError:
394394
error = f"Tiny config not created for {model_type} - cannot find the testing module from the model name."
395395
raise ValueError(error)

utils/deprecate_models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def get_last_stable_minor_release():
3737
last_stable_minor_releases = [
3838
release for release in release_data["releases"] if release.startswith(last_major_minor)
3939
]
40-
last_stable_release = sorted(last_stable_minor_releases, key=version.parse)[-1]
40+
last_stable_release = max(last_stable_minor_releases, key=version.parse)
4141

4242
return last_stable_release
4343

0 commit comments

Comments
 (0)