Skip to content

Commit 5baa2c0

Browse files
committed
Remove print
1 parent b581e05 commit 5baa2c0

File tree

1 file changed

+0
-6
lines changed

1 file changed

+0
-6
lines changed

bigcode_eval/generation.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,14 +122,10 @@ def parallel_generations(
122122
has_encoder=args.modeltype == "seq2seq",
123123
instruction_tokens=instruction_tokens,
124124
)
125-
print("TokenizedDataset Finished.")
126125

127-
print("DataLoader Loading...")
128126
# do not confuse args.batch_size, which is actually the num_return_sequences
129127
ds_loader = DataLoader(ds_tokenized, batch_size=1)
130-
print("DataLoader Loaded.")
131128

132-
print("Accelerator preparing...")
133129
is_loaded_in_8bit = getattr(model, "is_loaded_in_8bit", False)
134130
is_loaded_in_4bit = getattr(model, "is_loaded_in_4bit", False)
135131
if args.max_memory_per_gpu is not None:
@@ -143,7 +139,6 @@ def parallel_generations(
143139
# model.to() is not supported for 8bit and 4bit models
144140
model, ds_loader = accelerator.prepare(model, ds_loader)
145141

146-
print("Complete_code...")
147142
generations = complete_code(
148143
task,
149144
accelerator,
@@ -162,5 +157,4 @@ def parallel_generations(
162157
intermediate_save_generations_path=intermediate_save_generations_path,
163158
**gen_kwargs,
164159
)
165-
166160
return generations

0 commit comments

Comments
 (0)