PiecewiseLinearStateScheduler#
- class ignite.handlers.state_param_scheduler.PiecewiseLinearStateScheduler(milestones_values, param_name, save_history=False, create_new=False)[source]#
Piecewise linear state parameter scheduler.
- Parameters
milestones_values (List[Tuple[int, float]]) – list of tuples (event index, parameter value) represents milestones and parameter values. Milestones should be increasing integers.
param_name (str) – name of parameter to update.
save_history (bool) – whether to log the parameter values to engine.state.param_history, (default=False).
create_new (bool) – whether to create
param_name
onengine.state
taking into account whetherparam_name
attribute already exists or not. Overrides existing attribute by default, (default=False).
Examples
from collections import OrderedDict import torch from torch import nn, optim from ignite.engine import * from ignite.handlers import * from ignite.metrics import * from ignite.metrics.clustering import * from ignite.metrics.regression import * from ignite.utils import * # create default evaluator for doctests def eval_step(engine, batch): return batch default_evaluator = Engine(eval_step) # create default optimizer for doctests param_tensor = torch.zeros([1], requires_grad=True) default_optimizer = torch.optim.SGD([param_tensor], lr=0.1) # create default trainer for doctests # as handlers could be attached to the trainer, # each test must define his own trainer using `.. testsetup:` def get_default_trainer(): def train_step(engine, batch): return batch return Engine(train_step) # create default model for doctests default_model = nn.Sequential(OrderedDict([ ('base', nn.Linear(4, 2)), ('fc', nn.Linear(2, 1)) ])) manual_seed(666)
default_trainer = get_default_trainer() param_scheduler = PiecewiseLinearStateScheduler( param_name="param", milestones_values=[(5, 1.0), (10, 0.8), (15, 0.6)], create_new=True ) # parameter is param, milestone (5, 1.0) sets param to 1.0 # milestone is (5, 1.0), param=1 for Epoch 1 to 5, # next milestone is (10, 0.8), param linearly reduces from 1.0 to 0.8 # Epoch 10, param = 0.8 # next milestone is (15,0.6), param linearly reduces from 0.8 to 0.6 # Epoch 15, param = 0.6 param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED) @default_trainer.on(Events.EPOCH_COMPLETED) def print_param(): print(default_trainer.state.param) default_trainer.run([0], max_epochs=15)
1.0 1.0 1.0 1.0 1.0 0.96 0.92 0.88 0.8400... 0.8 0.76 0.72 0.68 0.64 0.6
New in version 0.4.7.
Methods
Method to get current parameter values