[docs]classLoss(Metric):""" Calculates the average loss according to the passed loss_fn. Args: loss_fn: a callable taking a prediction tensor, a target tensor, optionally other arguments, and returns the average loss over all observations in the batch. output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. The output is expected to be a tuple `(prediction, target)` or (prediction, target, kwargs) where kwargs is a dictionary of extra keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`. batch_size: a callable taking a target tensor that returns the first dimension size (usually the batch size). device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. Attributes: required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the criterion function requires additional arguments, which can be passed using ``criterion_kwargs``. See an example below. Examples: Let's implement a Loss metric that requires ``x``, ``y_pred``, ``y`` and ``criterion_kwargs`` as input for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy and the Loss metric using an ``evaluator`` created with :meth:`~ignite.engine.create_supervised_evaluator` method. For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`. .. include:: defaults.rst :start-after: :orphan: .. testcode:: model = default_model criterion = nn.NLLLoss() metric = Loss(criterion) metric.attach(default_evaluator, 'loss') y_pred = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]]) y_true = torch.tensor([2, 2]).long() state = default_evaluator.run([[y_pred, y_true]]) print(state.metrics['loss']) .. testoutput:: -0.3499999... """required_output_keys=("y_pred","y","criterion_kwargs")def__init__(self,loss_fn:Callable,output_transform:Callable=lambdax:x,batch_size:Callable=len,device:Union[str,torch.device]=torch.device("cpu"),):super(Loss,self).__init__(output_transform,device=device)self._loss_fn=loss_fnself._batch_size=batch_size
[docs]@reinit__is_reduceddefupdate(self,output:Sequence[Union[torch.Tensor,Dict]])->None:iflen(output)==2:y_pred,y=cast(Tuple[torch.Tensor,torch.Tensor],output)kwargs:Dict={}else:y_pred,y,kwargs=cast(Tuple[torch.Tensor,torch.Tensor,Dict],output)average_loss=self._loss_fn(y_pred,y,**kwargs).detach()iflen(average_loss.shape)!=0:raiseValueError("loss_fn did not return the average loss.")n=self._batch_size(y)self._sum+=average_loss.to(self._device)*nself._num_examples+=n
[docs]@sync_all_reduce("_sum","_num_examples")defcompute(self)->float:ifself._num_examples==0:raiseNotComputableError("Loss must have at least one example before it can be computed.")returnself._sum.item()/self._num_examples