Shortcuts

ParamGroupScheduler#

class ignite.handlers.param_scheduler.ParamGroupScheduler(schedulers, names=None, save_history=False)[source]#

Scheduler helper to group multiple schedulers into one.

Parameters
  • schedulers (List[ParamScheduler]) – list/tuple of parameter schedulers.

  • names (Optional[List[str]]) – list of names of schedulers.

  • save_history (bool) – whether to save history or not.

Examples

from collections import OrderedDict

import torch
from torch import nn, optim

from ignite.engine import *
from ignite.handlers import *
from ignite.metrics import *
from ignite.metrics.regression import *
from ignite.utils import *

# create default evaluator for doctests

def eval_step(engine, batch):
    return batch

default_evaluator = Engine(eval_step)

# create default optimizer for doctests

param_tensor = torch.zeros([1], requires_grad=True)
default_optimizer = torch.optim.SGD([param_tensor], lr=0.1)

# create default trainer for doctests
# as handlers could be attached to the trainer,
# each test must define his own trainer using `.. testsetup:`

def get_default_trainer():

    def train_step(engine, batch):
        return batch

    return Engine(train_step)

# create default model for doctests

default_model = nn.Sequential(OrderedDict([
    ('base', nn.Linear(4, 2)),
    ('fc', nn.Linear(2, 1))
]))

manual_seed(666)
default_trainer = get_default_trainer()

optimizer = torch.optim.SGD(
    [
        {"params": default_model.base.parameters(), "lr": 0.001},
        {"params": default_model.fc.parameters(), "lr": 0.01},
    ]
)

# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler_1 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 1.0, 4, param_group_index=0)

# CosineAnnealing increases the learning rate from 0.0 to 0.1
# over a cycle of 4 iterations
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 0.1, 4, param_group_index=1)

scheduler = ParamGroupScheduler(schedulers=[scheduler_1, scheduler_2],
                                names=["lr (base)", "lr (fc)"])

default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)

@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
    print(optimizer.param_groups[0]["lr"],
          optimizer.param_groups[1]["lr"])

default_trainer.run([0] * 8, max_epochs=1)
0.0 0.0
0.1464... 0.01464...
0.4999... 0.04999...
0.8535... 0.08535...
...

New in version 0.4.5.

Methods

get_param

Method to get current schedulers' parameter values

load_state_dict

Copies parameters from state_dict into this ParamScheduler.

simulate_values

Method to simulate scheduled values during num_events events.

state_dict

Returns a dictionary containing a whole state of ParamGroupScheduler.

get_param()[source]#

Method to get current schedulers’ parameter values

New in version 0.4.11.

Return type

List[Union[float, List[float]]]

load_state_dict(state_dict)[source]#

Copies parameters from state_dict into this ParamScheduler.

Parameters

state_dict (Mapping) – a dict containing parameters.

Return type

None

classmethod simulate_values(num_events, schedulers, **kwargs)[source]#

Method to simulate scheduled values during num_events events.

Parameters
Returns

list of [event_index, scheduler_0_value, scheduler_1_value, …], where scheduler_i_value corresponds to the simulated param of scheduler i at ‘event_index’th event.

Return type

list

state_dict()[source]#

Returns a dictionary containing a whole state of ParamGroupScheduler.

Returns

a dictionary containing a whole state of ParamGroupScheduler

Return type

dict