Skip to content

Commit c191b37

Browse files
authored
Change functional surgery method return values to None (#1543)
Some functional surgery methods previously returned the model object.
1 parent 48d40f9 commit c191b37

File tree

11 files changed

+16
-38
lines changed

11 files changed

+16
-38
lines changed

.ci/release_tests/example_1.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
my_model = models.resnet18()
55

66
# add blurpool and squeeze excite layers
7-
my_model = cf.apply_blurpool(my_model)
8-
my_model = cf.apply_squeeze_excite(my_model)
7+
cf.apply_blurpool(my_model)
8+
cf.apply_squeeze_excite(my_model)
99

1010
# your own training code starts here

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@ from torchvision import models
112112
my_model = models.resnet18()
113113

114114
# add blurpool and squeeze excite layers
115-
my_model = cf.apply_blurpool(my_model)
116-
my_model = cf.apply_squeeze_excite(my_model)
115+
cf.apply_blurpool(my_model)
116+
cf.apply_squeeze_excite(my_model)
117117

118118
# your own training code starts here
119119
```

composer/algorithms/blurpool/blurpool.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def apply_blurpool(model: torch.nn.Module,
2626
replace_maxpools: bool = True,
2727
blur_first: bool = True,
2828
min_channels: int = 16,
29-
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> torch.nn.Module:
29+
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> None:
3030
"""Add anti-aliasing filters to strided :class:`torch.nn.Conv2d` and/or :class:`torch.nn.MaxPool2d` modules.
3131
3232
These filters increase invariance to small spatial shifts in the input
@@ -55,9 +55,6 @@ def apply_blurpool(model: torch.nn.Module,
5555
then it is safe to omit this parameter. These optimizers will see
5656
the correct model parameters.
5757
58-
Returns:
59-
The modified model
60-
6158
Example:
6259
.. testcode::
6360
@@ -78,8 +75,6 @@ def apply_blurpool(model: torch.nn.Module,
7875
module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms)
7976
_log_surgery_result(model)
8077

81-
return model
82-
8378

8479
class BlurPool(Algorithm):
8580
"""`BlurPool <http://proceedings.mlr.press/v97/zhang19a.html>`_ adds anti-aliasing filters to convolutional layers.

composer/algorithms/ema/ema.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
__all__ = ['EMA', 'compute_ema']
2222

2323

24-
def compute_ema(model: torch.nn.Module, ema_model: torch.nn.Module, smoothing: float = 0.99):
24+
def compute_ema(model: torch.nn.Module, ema_model: torch.nn.Module, smoothing: float = 0.99) -> None:
2525
r"""Updates the weights of ``ema_model`` to be closer to the weights of ``model``
2626
according to an exponential weighted average. Weights are updated according to
2727

composer/algorithms/factorize/factorize.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def apply_factorization(model: torch.nn.Module,
2828
latent_channels: Union[int, float] = 0.25,
2929
min_features: int = 512,
3030
latent_features: Union[int, float] = 0.25,
31-
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> torch.nn.Module:
31+
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> None:
3232
"""Replaces :class:`torch.nn.Linear` and :class:`torch.nn.Conv2d` modules with
3333
:class:`.FactorizedLinear` and :class:`.FactorizedConv2d` modules.
3434
@@ -71,9 +71,6 @@ def apply_factorization(model: torch.nn.Module,
7171
then it is safe to omit this parameter. These optimizers will see
7272
the correct model parameters.
7373
74-
Returns:
75-
The modified model
76-
7774
Example:
7875
.. testcode::
7976
@@ -92,7 +89,6 @@ def apply_factorization(model: torch.nn.Module,
9289
min_features=min_features,
9390
latent_features=latent_features,
9491
optimizers=optimizers)
95-
return model
9692

9793

9894
class Factorize(Algorithm):

composer/algorithms/gated_linear_units/gated_linear_units.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,6 @@ def apply_gated_linear_units(model: torch.nn.Module,
9696
NoEffectWarning('No instances of BertIntermediate were found so Gated Linear Units will be skipped '
9797
'as no modules can be replaced. This is likely because Gated Linear Units has already '
9898
'been applied to this model.'))
99-
return
10099

101100
# get the activation functions used
102101
act_fns = {module.intermediate_act_fn for module in intermediate_modules}

composer/algorithms/ghost_batchnorm/ghost_batchnorm.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
def apply_ghost_batchnorm(model: torch.nn.Module,
2121
ghost_batch_size: int = 32,
22-
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> torch.nn.Module:
22+
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> None:
2323
"""Replace batch normalization modules with ghost batch normalization modules.
2424
2525
Ghost batch normalization modules split their input into chunks of
@@ -39,7 +39,7 @@ def apply_ghost_batchnorm(model: torch.nn.Module,
3939
model parameters.
4040
4141
Returns:
42-
The modified model
42+
The number of modules modified.
4343
4444
Example:
4545
.. testcode::
@@ -59,7 +59,6 @@ def maybe_replace(module: torch.nn.Module, module_index: int) -> Optional[torch.
5959
# now checks if `module.__class__ == cls`, rather than `isinstance(module, cls)`
6060
transforms = {cls: maybe_replace for cls in [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d]}
6161
module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms)
62-
return model
6362

6463

6564
class GhostBatchNorm(Algorithm):

composer/algorithms/low_precision_layernorm/low_precision_layernorm.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,6 @@ def apply_low_precision_layernorm(model, optimizers: Union[torch.optim.Optimizer
103103
warnings.warn(NoEffectWarning('No instances of torch.nn.LayerNorm found.'))
104104
log.info(f'Successfully replaced {len(replaced_instances)} instances of LayerNorm with LowPrecisionLayerNorm')
105105

106-
return model
107-
108106

109107
class LowPrecisionLayerNorm(Algorithm):
110108
"""

composer/algorithms/squeeze_excite/squeeze_excite.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def apply_squeeze_excite(
2121
latent_channels: float = 64,
2222
min_channels: int = 128,
2323
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
24-
):
24+
) -> None:
2525
"""Adds Squeeze-and-Excitation blocks (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_) after
2626
:class:`torch.nn.Conv2d` layers.
2727
@@ -50,9 +50,6 @@ def apply_squeeze_excite(
5050
then it is safe to omit this parameter. These optimizers will see the correct
5151
model parameters.
5252
53-
Returns:
54-
The modified model
55-
5653
Example:
5754
.. testcode::
5855
@@ -73,8 +70,6 @@ def convert_module(module: torch.nn.Module, module_index: int):
7370

7471
module_surgery.replace_module_classes(model, optimizers=optimizers, policies={torch.nn.Conv2d: convert_module})
7572

76-
return model
77-
7873

7974
class SqueezeExcite2d(torch.nn.Module):
8075
"""Squeeze-and-Excitation block from (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_)
@@ -164,10 +159,10 @@ def match(self, event: Event, state: State) -> bool:
164159
return event == Event.INIT
165160

166161
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
167-
state.model = apply_squeeze_excite(state.model,
168-
optimizers=state.optimizers,
169-
latent_channels=self.latent_channels,
170-
min_channels=self.min_channels)
162+
apply_squeeze_excite(state.model,
163+
optimizers=state.optimizers,
164+
latent_channels=self.latent_channels,
165+
min_channels=self.min_channels)
171166
layer_count = module_surgery.count_module_instances(state.model, SqueezeExciteConv2d)
172167

173168
log.info(f'Applied SqueezeExcite to model {state.model.__class__.__name__} '

composer/algorithms/stochastic_depth/stochastic_depth.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def apply_stochastic_depth(model: torch.nn.Module,
3333
target_layer_name: str,
3434
stochastic_method: str = 'block',
3535
drop_rate: float = 0.2,
36-
drop_distribution: str = 'linear') -> torch.nn.Module:
36+
drop_distribution: str = 'linear') -> None:
3737
"""Applies Stochastic Depth (`Huang et al, 2016 <https://arxiv.org/abs/1603.09382>`_) to the specified model.
3838
3939
The algorithm replaces the specified target layer with a stochastic version
@@ -67,9 +67,6 @@ def apply_stochastic_depth(model: torch.nn.Module,
6767
starting with 0 drop rate and ending with ``drop_rate``.
6868
Default: ``"linear"``.
6969
70-
Returns:
71-
The modified model
72-
7370
Example:
7471
.. testcode::
7572
@@ -95,7 +92,6 @@ def apply_stochastic_depth(model: torch.nn.Module,
9592
stochastic_method=stochastic_method)
9693
transforms[target_layer] = stochastic_from_target_layer
9794
module_surgery.replace_module_classes(model, policies=transforms)
98-
return model
9995

10096

10197
class StochasticDepth(Algorithm):

0 commit comments

Comments
 (0)