Skip to content

Commit f7392fa

Browse files
authored
Fix tqdm logging (#711)
* fix tqdm logging * suppress stderr draws * add env var for custom tqdm behavior
1 parent 7477de0 commit f7392fa

File tree

2 files changed

+6
-2
lines changed

2 files changed

+6
-2
lines changed

src/lighteval/metrics/stderr.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626

2727
import logging
2828
import math
29+
import os
2930
import random
3031
from typing import Callable, Optional
3132

@@ -36,6 +37,8 @@
3637

3738
logger = logging.getLogger(__name__)
3839

40+
LIGHTEVAL_DISABLE_TQDM = int(os.environ.get("LIGHTEVAL_DISABLE_TQDM", "0"))
41+
3942

4043
def _stddev(arr):
4144
mu = np.mean(arr)
@@ -66,6 +69,7 @@ def __call__(self, cur_experiment):
6669
[(rnd.choices(population, k=len(population)),) for _ in range(self.number_draws)],
6770
total=self.number_draws,
6871
desc="Sampling bootstrap iterations",
72+
disable=bool(LIGHTEVAL_DISABLE_TQDM),
6973
),
7074
)
7175
return samplings

src/lighteval/models/transformers/transformers_model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -763,7 +763,7 @@ def _loglikelihood_tokens(
763763
starting_batch_size = STARTING_BATCH_SIZE
764764
res = []
765765

766-
for split in tqdm(dataset.splits_iterator()):
766+
for split in tqdm(dataset.splits_iterator(), disable=self.disable_tqdm):
767767
context_enc = split[0].tokenized_context
768768
continuation_enc = split[0].tokenized_continuation
769769
if rolling: # we take all the sequence in rolling mode
@@ -1007,7 +1007,7 @@ def _loglikelihood_single_token(
10071007
starting_batch_size = STARTING_BATCH_SIZE
10081008
res = []
10091009

1010-
for split in tqdm(dataset.splits_iterator()):
1010+
for split in tqdm(dataset.splits_iterator(), disable=self.disable_tqdm):
10111011
context_enc = split[0].tokenized_context
10121012
max_context = len(context_enc[-self.max_length :])
10131013
batch_size = self._get_batch_size(override_bs=self.config.batch_size, max_input_length=max_context)

0 commit comments

Comments
 (0)