DiceCoefficient#
- ignite.metrics.DiceCoefficient(cm, ignore_index=None)[source]#
Calculates Dice Coefficient for a given
ConfusionMatrix
metric.- Parameters
cm (ConfusionMatrix) – instance of confusion matrix metric
ignore_index (Optional[int]) – index to ignore, e.g. background index
- Return type
Examples
For more information on how metric works with
Engine
, visit Attach Engine API.from collections import OrderedDict import torch from torch import nn, optim from ignite.engine import * from ignite.handlers import * from ignite.metrics import * from ignite.utils import * from ignite.contrib.metrics.regression import * from ignite.contrib.metrics import * # create default evaluator for doctests def eval_step(engine, batch): return batch default_evaluator = Engine(eval_step) # create default optimizer for doctests param_tensor = torch.zeros([1], requires_grad=True) default_optimizer = torch.optim.SGD([param_tensor], lr=0.1) # create default trainer for doctests # as handlers could be attached to the trainer, # each test must define his own trainer using `.. testsetup:` def get_default_trainer(): def train_step(engine, batch): return batch return Engine(train_step) # create default model for doctests default_model = nn.Sequential(OrderedDict([ ('base', nn.Linear(4, 2)), ('fc', nn.Linear(2, 1)) ])) manual_seed(666)
cm = ConfusionMatrix(num_classes=3) metric = DiceCoefficient(cm, ignore_index=0) metric.attach(default_evaluator, 'dice') y_true = torch.tensor([0, 1, 0, 1, 2]) y_pred = torch.tensor([ [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], ]) state = default_evaluator.run([[y_pred, y_true]]) print(state.metrics['dice'])
tensor([0.6667, 0.0000], dtype=torch.float64)