Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.

Commit 5235ab9

Browse files
authored
Bump version of black to 22.3.0 (#153)
* Bump version of black to 22.3.0 * fix example
1 parent 0b955b5 commit 5235ab9

File tree

5 files changed

+8
-8
lines changed

5 files changed

+8
-8
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ repos:
4141
- id: isort
4242

4343
- repo: https://github.com/psf/black
44-
rev: 21.12b0
44+
rev: 22.3.0
4545
hooks:
4646
- id: black
4747
name: Format code

course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@
314314

315315
# %%
316316
a = x + 2
317-
b = a ** 2
317+
b = a**2
318318
c = b + 3
319319
y = c.mean()
320320
print("Y", y)

course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1042,8 +1042,8 @@ def train_curve(optimizer_func, curve_func=pathological_curve_loss, num_updates=
10421042
# %%
10431043
def bivar_gaussian(w1, w2, x_mean=0.0, y_mean=0.0, x_sig=1.0, y_sig=1.0):
10441044
norm = 1 / (2 * np.pi * x_sig * y_sig)
1045-
x_exp = (-1 * (w1 - x_mean) ** 2) / (2 * x_sig ** 2)
1046-
y_exp = (-1 * (w2 - y_mean) ** 2) / (2 * y_sig ** 2)
1045+
x_exp = (-1 * (w1 - x_mean) ** 2) / (2 * x_sig**2)
1046+
y_exp = (-1 * (w2 - y_mean) ** 2) / (2 * y_sig**2)
10471047
return norm * torch.exp(x_exp + y_exp)
10481048

10491049

course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -497,7 +497,7 @@ def training_step(self, batch, batch_idx):
497497
real_out, fake_out = self.cnn(inp_imgs).chunk(2, dim=0)
498498

499499
# Calculate losses
500-
reg_loss = self.hparams.alpha * (real_out ** 2 + fake_out ** 2).mean()
500+
reg_loss = self.hparams.alpha * (real_out**2 + fake_out**2).mean()
501501
cdiv_loss = fake_out.mean() - real_out.mean()
502502
loss = reg_loss + cdiv_loss
503503

course_UvA-DL/11-vision-transformer/Vision_Transformer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ def __init__(
287287
self.patch_size = patch_size
288288

289289
# Layers/Networks
290-
self.input_layer = nn.Linear(num_channels * (patch_size ** 2), embed_dim)
290+
self.input_layer = nn.Linear(num_channels * (patch_size**2), embed_dim)
291291
self.transformer = nn.Sequential(
292292
*(AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers))
293293
)
@@ -403,8 +403,8 @@ def train_model(**kwargs):
403403
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
404404

405405
# Test best model on validation and test set
406-
val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)
407-
test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)
406+
val_result = trainer.test(model, dataloaders=val_loader, verbose=False)
407+
test_result = trainer.test(model, dataloaders=test_loader, verbose=False)
408408
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
409409

410410
return model, result

0 commit comments

Comments
 (0)