Shortcuts

create_lr_scheduler_with_warmup#

ignite.handlers.param_scheduler.create_lr_scheduler_with_warmup(lr_scheduler, warmup_start_value, warmup_duration, warmup_end_value=None, save_history=False, output_simulated_values=None)[source]#

Helper method to create a learning rate scheduler with a linear warm-up.

Parameters
  • lr_scheduler (Union[ParamScheduler, _LRScheduler]) – learning rate scheduler after the warm-up.

  • warmup_start_value (float) – learning rate start value of the warm-up phase.

  • warmup_duration (int) – warm-up phase duration, number of events.

  • warmup_end_value (Optional[float]) – learning rate end value of the warm-up phase, (default=None). If None, warmup_end_value is set to optimizer initial lr.

  • save_history (bool) – whether to log the parameter values to engine.state.param_history, (default=False).

  • output_simulated_values (Optional[List]) – optional output of simulated learning rate values. If output_simulated_values is a list of None, e.g. [None] * 100, after the execution it will be filled by 100 simulated learning rate values.

Returns

ConcatScheduler

Return type

ConcatScheduler

Note

If the first learning rate value provided by lr_scheduler is different from warmup_end_value, an additional event is added after the warm-up phase such that the warm-up ends with warmup_end_value value and then lr_scheduler provides its learning rate values as normally.

Examples

from collections import OrderedDict

import torch
from torch import nn, optim

from ignite.engine import *
from ignite.handlers import *
from ignite.metrics import *
from ignite.utils import *
from ignite.contrib.metrics.regression import *
from ignite.contrib.metrics import *

# create default evaluator for doctests

def eval_step(engine, batch):
    return batch

default_evaluator = Engine(eval_step)

# create default optimizer for doctests

param_tensor = torch.zeros([1], requires_grad=True)
default_optimizer = torch.optim.SGD([param_tensor], lr=0.1)

# create default trainer for doctests
# as handlers could be attached to the trainer,
# each test must define his own trainer using `.. testsetup:`

def get_default_trainer():

    def train_step(engine, batch):
        return batch

    return Engine(train_step)

# create default model for doctests

default_model = nn.Sequential(OrderedDict([
    ('base', nn.Linear(4, 2)),
    ('fc', nn.Linear(2, 1))
]))

manual_seed(666)
from torch.optim.lr_scheduler import ExponentialLR

torch_lr_scheduler = ExponentialLR(optimizer=default_optimizer, gamma=0.98)

default_trainer = get_default_trainer()

scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
                                            warmup_start_value=0.0,
                                            warmup_end_value=0.1,
                                            warmup_duration=3)

default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)

@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
    print(default_optimizer.param_groups[0]["lr"])

default_trainer.run([0] * 8, max_epochs=1)
0.0
0.05
0.1
0.098
0.09604
0.09411...
0.09223...
0.09039...

New in version 0.4.5.