Skip to content

Commit 10c8f60

Browse files
amyreesefacebook-github-bot
authored andcommitted
apply new formatting config
Summary: pyfmt now specifies a target Python version of 3.8 when formatting with black. With this new config, black adds trailing commas to all multiline function calls. This applies the new formatting as part of rolling out the linttool-integration for pyfmt. paintitblack Reviewed By: zertosh, lisroach Differential Revision: D37084377 fbshipit-source-id: 781a1b883a381a172e54d6e447137657977876b4
1 parent a5b075b commit 10c8f60

File tree

5 files changed

+10
-10
lines changed

5 files changed

+10
-10
lines changed

botorch/models/kernels/categorical.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def forward(
2828
x2: Tensor,
2929
diag: bool = False,
3030
last_dim_is_batch: bool = False,
31-
**kwargs
31+
**kwargs,
3232
) -> Tensor:
3333
delta = x1.unsqueeze(-2) != x2.unsqueeze(-3)
3434
dists = delta / self.lengthscale.unsqueeze(-2)

test/acquisition/multi_objective/test_multi_output_risk_measures.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ def set_equals(t1: Tensor, t2: Tensor) -> bool:
285285
[4, 1],
286286
[3.5, 3.5],
287287
],
288-
**tkwargs
288+
**tkwargs,
289289
)
290290
cpu_mvar = mvar.get_mvar_set_cpu(Y)
291291
gpu_mvar = mvar.get_mvar_set_gpu(Y)[0]
@@ -299,7 +299,7 @@ def set_equals(t1: Tensor, t2: Tensor) -> bool:
299299
[3, 3.5],
300300
[4, 2],
301301
],
302-
**tkwargs
302+
**tkwargs,
303303
)
304304
self.assertTrue(set_equals(cpu_mvar, expected_w_dominated))
305305
expected_non_dominated = expected_w_dominated[

test/acquisition/multi_objective/test_objective.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def test_feasibility_weighted_mc_multi_output_objective(self):
7676
[4.0, 1.0],
7777
[5.0, 1.0],
7878
],
79-
**tkwargs
79+
**tkwargs,
8080
)
8181
variances = torch.zeros(5, 2, **tkwargs)
8282
mm = MockModel(MockPosterior(mean=means, variance=variances))

test/acquisition/test_objective.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def test_expectation_posterior_transform(self):
133133
[0.2, 0.15, 0.2, 0.7, 1.0, 0.7],
134134
[0.1, 0.1, 0.05, 0.6, 0.7, 1.0],
135135
],
136-
**tkwargs
136+
**tkwargs,
137137
)
138138
org_mvn = MultivariateNormal(org_loc, lazify(org_covar))
139139
org_post = GPyTorchPosterior(mvn=org_mvn)
@@ -168,7 +168,7 @@ def test_expectation_posterior_transform(self):
168168
[0.0, 0.0, 0.0, 0.0, 0.4, 0.3, 1.4, 0.5],
169169
[0.0, 0.0, 0.0, 0.0, 0.3, 0.2, 0.5, 1.2],
170170
],
171-
**tkwargs
171+
**tkwargs,
172172
)
173173
# Making it batched by adding two more batches, mostly the same.
174174
org_loc = org_loc.repeat(3, 1)
@@ -217,7 +217,7 @@ def test_expectation_posterior_transform(self):
217217
[0.0, 0.0, 0.875, 0.35],
218218
[0.0, 0.0, 0.35, 1.05],
219219
],
220-
**tkwargs
220+
**tkwargs,
221221
).repeat(3, 1, 1)
222222
self.assertTrue(torch.allclose(tf_mvn.loc, expected_loc, atol=1e-3))
223223
self.assertTrue(

test/models/test_pairwise_gp.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def test_pairwise_gp(self):
6060
batch_shape=batch_shape,
6161
X_dim=X_dim,
6262
likelihood_cls=likelihood_cls,
63-
**tkwargs
63+
**tkwargs,
6464
)
6565
train_X = model_kwargs["datapoints"]
6666
train_comp = model_kwargs["comparisons"]
@@ -201,7 +201,7 @@ def test_condition_on_observations(self):
201201
batch_shape=batch_shape,
202202
X_dim=X_dim,
203203
likelihood_cls=likelihood_cls,
204-
**tkwargs
204+
**tkwargs,
205205
)
206206
train_X = model_kwargs["datapoints"]
207207
train_comp = model_kwargs["comparisons"]
@@ -304,7 +304,7 @@ def test_fantasize(self):
304304
batch_shape=batch_shape,
305305
X_dim=X_dim,
306306
likelihood_cls=likelihood_cls,
307-
**tkwargs
307+
**tkwargs,
308308
)
309309

310310
# fantasize

0 commit comments

Comments
 (0)