Shortcuts

JaccardIndex#

ignite.metrics.JaccardIndex(cm, ignore_index=None)[source]#

Calculates the Jaccard Index using ConfusionMatrix metric. Implementation is based on IoU().

J(A,B)=ABAB\text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert }
Parameters
Returns

MetricsLambda

Return type

ignite.metrics.metrics_lambda.MetricsLambda

Examples

For more information on how metric works with Engine, visit Attach Engine API.

from collections import OrderedDict

import torch
from torch import nn, optim

from ignite.engine import *
from ignite.handlers import *
from ignite.metrics import *
from ignite.utils import *
from ignite.contrib.metrics.regression import *
from ignite.contrib.metrics import *

# create default evaluator for doctests

def eval_step(engine, batch):
    return batch

default_evaluator = Engine(eval_step)

# create default optimizer for doctests

param_tensor = torch.zeros([1], requires_grad=True)
default_optimizer = torch.optim.SGD([param_tensor], lr=0.1)

# create default trainer for doctests
# as handlers could be attached to the trainer,
# each test must define his own trainer using `.. testsetup:`

def get_default_trainer():

    def train_step(engine, batch):
        return batch

    return Engine(train_step)

# create default model for doctests

default_model = nn.Sequential(OrderedDict([
    ('base', nn.Linear(4, 2)),
    ('fc', nn.Linear(2, 1))
]))

manual_seed(666)
cm = ConfusionMatrix(num_classes=3)
metric = JaccardIndex(cm, ignore_index=0)
metric.attach(default_evaluator, 'jac')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
    [0.0, 1.0, 0.0],
    [0.0, 1.0, 0.0],
    [1.0, 0.0, 0.0],
    [0.0, 1.0, 0.0],
    [0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['jac'])
tensor([0.5000, 0.0000], dtype=torch.float64)