Skip to content

Commit 89b6255

Browse files
committed
Lint and format changes
1 parent a4365ae commit 89b6255

File tree

4 files changed

+20
-19
lines changed

4 files changed

+20
-19
lines changed

opacus/optimizers/__init__.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,21 +15,21 @@
1515
from .adaclipoptimizer import AdaClipDPOptimizer
1616
from .ddp_perlayeroptimizer import SimpleDistributedPerLayerOptimizer
1717
from .ddpoptimizer import DistributedDPOptimizer
18+
from .ddpoptimizer_automatic_clipping import (
19+
DistributedDPAutomaticClippingOptimizer,
20+
DistributedDPPerLayerAutomaticClippingOptimizer,
21+
)
1822
from .ddpoptimizer_fast_gradient_clipping import (
1923
DistributedDPOptimizerFastGradientClipping,
2024
)
2125
from .fsdpoptimizer_fast_gradient_clipping import FSDPOptimizerFastGradientClipping
2226
from .optimizer import DPOptimizer
23-
from .optimizer_fast_gradient_clipping import DPOptimizerFastGradientClipping
24-
from .perlayeroptimizer import DPPerLayerOptimizer
2527
from .optimizer_automatic_clipping import (
2628
DPAutomaticClippingOptimizer,
2729
DPPerLayerAutomaticClippingOptimizer,
2830
)
29-
from .ddpoptimizer_automatic_clipping import (
30-
DistributedDPAutomaticClippingOptimizer,
31-
DistributedDPPerLayerAutomaticClippingOptimizer,
32-
)
31+
from .optimizer_fast_gradient_clipping import DPOptimizerFastGradientClipping
32+
from .perlayeroptimizer import DPPerLayerOptimizer
3333

3434

3535
__all__ = [

opacus/optimizers/ddpoptimizer_automatic_clipping.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,11 @@
1717
from typing import Callable, List, Optional
1818

1919
import torch
20-
from torch.optim import Optimizer
21-
2220
from opacus.optimizers.optimizer_automatic_clipping import (
2321
DPAutomaticClippingOptimizer,
2422
DPPerLayerAutomaticClippingOptimizer,
2523
)
24+
from torch.optim import Optimizer
2625

2726

2827
class DistributedDPAutomaticClippingOptimizer(DPAutomaticClippingOptimizer):

opacus/optimizers/optimizer_automatic_clipping.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,12 @@
1818
from typing import List
1919

2020
import torch
21-
from opacus.optimizers.optimizer import _check_processed_flag, _mark_as_processed
21+
from opacus.optimizers.optimizer import (
22+
DPOptimizer,
23+
_check_processed_flag,
24+
_mark_as_processed,
25+
)
2226
from opacus.optimizers.perlayeroptimizer import DPPerLayerOptimizer
23-
from opacus.optimizers.optimizer import DPOptimizer
2427

2528

2629
class DPAutomaticClippingOptimizer(DPOptimizer):
@@ -102,8 +105,9 @@ class DPPerLayerAutomaticClippingOptimizer(DPPerLayerOptimizer):
102105
Per-layer variant of automatic clipping.
103106
104107
For each parameter (layer), we compute the per-sample clip factor using the
105-
corresponding per-layer ``max_grad_norm``:
106-
``per_sample_clip_factor = max_grad_norm / (per_sample_norms + 0.01)``
108+
corresponding per-layer ``max_grad_norm``::
109+
110+
per_sample_clip_factor = max_grad_norm / (per_sample_norms + 0.01)
107111
108112
This allows each layer to have different clipping behavior based on its own
109113
gradient magnitude distribution, which can improve training stability and

opacus/tests/multigpu_automatic_clipping_test.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,9 @@ def demo_basic(rank, weight, world_size, dp, clipping):
115115
if clipping == "automatic":
116116
assert isinstance(optimizer, DistributedDPAutomaticClippingOptimizer)
117117
elif clipping == "automatic_per_layer":
118-
assert isinstance(optimizer, DistributedDPPerLayerAutomaticClippingOptimizer)
118+
assert isinstance(
119+
optimizer, DistributedDPPerLayerAutomaticClippingOptimizer
120+
)
119121

120122
for x, y in data_loader:
121123
outputs = ddp_model(x.to(rank))
@@ -167,9 +169,7 @@ def test_automatic_clipping_gradient_correct(self) -> None:
167169
clipping=None,
168170
)
169171

170-
self.assertTrue(
171-
torch.allclose(weight_dp, weight_nodp, atol=1e-5, rtol=1e-3)
172-
)
172+
self.assertTrue(torch.allclose(weight_dp, weight_nodp, atol=1e-5, rtol=1e-3))
173173

174174
@unittest.skipIf(torch.cuda.device_count() < 2, "Need at least 2 GPUs")
175175
def test_automatic_per_layer_clipping_gradient_correct(self) -> None:
@@ -199,9 +199,7 @@ def test_automatic_per_layer_clipping_gradient_correct(self) -> None:
199199
clipping=None,
200200
)
201201

202-
self.assertTrue(
203-
torch.allclose(weight_dp, weight_nodp, atol=1e-5, rtol=1e-3)
204-
)
202+
self.assertTrue(torch.allclose(weight_dp, weight_nodp, atol=1e-5, rtol=1e-3))
205203

206204
@unittest.skipIf(torch.cuda.device_count() < 2, "Need at least 2 GPUs")
207205
def test_automatic_clipping_optimizer_type(self) -> None:

0 commit comments

Comments
 (0)