Skip to content

pydvl.influence.torch.influence_function_model

This module implements several implementations of InfluenceFunctionModel utilizing PyTorch.

TorchInfluenceFunctionModel

TorchInfluenceFunctionModel(
    model: Module, loss: Callable[[Tensor, Tensor], Tensor]
)

Bases: InfluenceFunctionModel[Tensor, DataLoader], ABC

Abstract base class for influence computation related to torch models

Source code in src/pydvl/influence/torch/influence_function_model.py
def __init__(
    self,
    model: nn.Module,
    loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
):
    self.loss = loss
    self.model = model
    self._n_parameters = sum(
        [p.numel() for p in model.parameters() if p.requires_grad]
    )
    self._model_device = next(
        (p.device for p in model.parameters() if p.requires_grad)
    )
    self._model_params = {
        k: p.detach() for k, p in self.model.named_parameters() if p.requires_grad
    }
    self._model_dtype = next(
        (p.dtype for p in model.parameters() if p.requires_grad)
    )
    super().__init__()

is_fitted abstractmethod property

is_fitted

Override this, to expose the fitting status of the instance.

fit abstractmethod

fit(data: DataLoaderType) -> InfluenceFunctionModel

Override this method to fit the influence function model to training data, e.g. pre-compute hessian matrix or matrix decompositions

PARAMETER DESCRIPTION
data

TYPE: DataLoaderType

RETURNS DESCRIPTION
InfluenceFunctionModel

The fitted instance

Source code in src/pydvl/influence/base_influence_function_model.py
@abstractmethod
def fit(self, data: DataLoaderType) -> InfluenceFunctionModel:
    """
    Override this method to fit the influence function model to training data,
    e.g. pre-compute hessian matrix or matrix decompositions

    Args:
        data:

    Returns:
        The fitted instance
    """

influences

influences(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Compute the approximation of

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle \]

for the case of up-weighting influence, resp.

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Compute the approximation of

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
    f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle
    \]

    for the case of up-weighting influence, resp.

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})),
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle
    \]

    for the perturbation type influence case. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x_test: model input to use in the gradient computations
            of $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    t: torch.Tensor = super().influences(x_test, y_test, x, y, mode=mode)
    return t

influence_factors

influence_factors(x: Tensor, y: Tensor) -> Tensor

Compute approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

where the gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise inverse Hessian matrix vector products

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    where the gradient is meant to be per sample of the batch $(x, y)$.
    For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        Tensor representing the element-wise inverse Hessian matrix vector products

    """
    return super().influence_factors(x, y)

influences_from_factors

influences_from_factors(
    z_test_factors: Tensor,
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors(
    self,
    z_test_factors: torch.Tensor,
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The gradient is meant to be per sample
    of the batch $(x, y)$. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    if mode == InfluenceMode.Up:
        return (
            z_test_factors.to(self.model_device)
            @ self._loss_grad(x.to(self.model_device), y.to(self.model_device)).T
        )
    elif mode == InfluenceMode.Perturbation:
        return torch.einsum(
            "ia,j...a->ij...",
            z_test_factors.to(self.model_device),
            self._flat_loss_mixed_grad(
                x.to(self.model_device), y.to(self.model_device)
            ),
        )
    else:
        raise UnsupportedInfluenceModeException(mode)

DirectInfluence

DirectInfluence(
    model: Module,
    loss: Callable[[Tensor, Tensor], Tensor],
    hessian_regularization: float = 0.0,
)

Bases: TorchInfluenceFunctionModel

Given a model and training data, it finds x such that \(Hx = b\), with \(H\) being the model hessian.

PARAMETER DESCRIPTION
model

A PyTorch model. The Hessian will be calculated with respect to this model's parameters.

TYPE: Module

loss

A callable that takes the model's output and target as input and returns the scalar loss.

TYPE: Callable[[Tensor, Tensor], Tensor]

hessian_regularization

Regularization of the hessian.

TYPE: float DEFAULT: 0.0

Source code in src/pydvl/influence/torch/influence_function_model.py
def __init__(
    self,
    model: nn.Module,
    loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
    hessian_regularization: float = 0.0,
):
    super().__init__(model, loss)
    self.hessian_regularization = hessian_regularization

influence_factors

influence_factors(x: Tensor, y: Tensor) -> Tensor

Compute approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

where the gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise inverse Hessian matrix vector products

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    where the gradient is meant to be per sample of the batch $(x, y)$.
    For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        Tensor representing the element-wise inverse Hessian matrix vector products

    """
    return super().influence_factors(x, y)

influences_from_factors

influences_from_factors(
    z_test_factors: Tensor,
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors(
    self,
    z_test_factors: torch.Tensor,
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The gradient is meant to be per sample
    of the batch $(x, y)$. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    if mode == InfluenceMode.Up:
        return (
            z_test_factors.to(self.model_device)
            @ self._loss_grad(x.to(self.model_device), y.to(self.model_device)).T
        )
    elif mode == InfluenceMode.Perturbation:
        return torch.einsum(
            "ia,j...a->ij...",
            z_test_factors.to(self.model_device),
            self._flat_loss_mixed_grad(
                x.to(self.model_device), y.to(self.model_device)
            ),
        )
    else:
        raise UnsupportedInfluenceModeException(mode)

fit

Compute the hessian matrix based on a provided dataloader.

PARAMETER DESCRIPTION
data

The data to compute the Hessian with.

TYPE: DataLoader

RETURNS DESCRIPTION
DirectInfluence

The fitted instance.

Source code in src/pydvl/influence/torch/influence_function_model.py
@log_duration(log_level=logging.INFO)
def fit(self, data: DataLoader) -> DirectInfluence:
    """
    Compute the hessian matrix based on a provided dataloader.

    Args:
        data: The data to compute the Hessian with.

    Returns:
        The fitted instance.
    """
    self.hessian = hessian(self.model, self.loss, data)
    return self

influences

influences(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Compute approximation of

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle, \]

for the case of up-weighting influence, resp.

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The action of \(H^{-1}\) is achieved via a direct solver using torch.linalg.solve.

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

A tensor representing the element-wise scalar products for the provided batch.

Source code in src/pydvl/influence/torch/influence_function_model.py
@log_duration
def influences(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
        f_{\theta}(x_{\text{test}})),
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle, \]

    for the case of up-weighting influence, resp.

    \[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
        f_{\theta}(x_{\text{test}})),
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The action of $H^{-1}$ is achieved
    via a direct solver using [torch.linalg.solve][torch.linalg.solve].

    Args:
        x_test: model input to use in the gradient computations of
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        A tensor representing the element-wise scalar products for the
            provided batch.

    """
    return super().influences(x_test, y_test, x, y, mode=mode)

CgInfluence

CgInfluence(
    model: Module,
    loss: Callable[[Tensor, Tensor], Tensor],
    hessian_regularization: float = 0.0,
    x0: Optional[Tensor] = None,
    rtol: float = 1e-07,
    atol: float = 1e-07,
    maxiter: Optional[int] = None,
    progress: bool = False,
    precompute_grad: bool = False,
    pre_conditioner: Optional[PreConditioner] = None,
    use_block_cg: bool = False,
    warn_on_max_iteration: bool = True,
)

Bases: TorchInfluenceFunctionModel

Given a model and training data, it uses conjugate gradient to calculate the inverse of the Hessian Vector Product. More precisely, it finds x such that \(Hx = b\), with \(H\) being the model hessian. For more info, see Conjugate Gradient.

PARAMETER DESCRIPTION
model

A PyTorch model. The Hessian will be calculated with respect to this model's parameters.

TYPE: Module

loss

A callable that takes the model's output and target as input and returns the scalar loss.

TYPE: Callable[[Tensor, Tensor], Tensor]

hessian_regularization

Optional regularization parameter added to the Hessian-vector product for numerical stability.

TYPE: float DEFAULT: 0.0

x0

Initial guess for hvp. If None, defaults to b.

TYPE: Optional[Tensor] DEFAULT: None

rtol

Maximum relative tolerance of result.

TYPE: float DEFAULT: 1e-07

atol

Absolute tolerance of result.

TYPE: float DEFAULT: 1e-07

maxiter

Maximum number of iterations. If None, defaults to 10*len(b).

TYPE: Optional[int] DEFAULT: None

progress

If True, display progress bars for computing in the non-block mode (use_block_cg=False).

TYPE: bool DEFAULT: False

precompute_grad

If True, the full data gradient is precomputed and kept in memory, which can speed up the hessian vector product computation. Set this to False, if you can't afford to keep the full computation graph in memory.

TYPE: bool DEFAULT: False

pre_conditioner

Optional pre-conditioner to improve convergence of conjugate gradient method

TYPE: Optional[PreConditioner] DEFAULT: None

use_block_cg

If True, use block variant of conjugate gradient method, which solves several right hand sides simultaneously

TYPE: bool DEFAULT: False

warn_on_max_iteration

If True, logs a warning, if the desired tolerance is not achieved within maxiter iterations. If False, the log level for this information is logging.DEBUG

TYPE: bool DEFAULT: True

Source code in src/pydvl/influence/torch/influence_function_model.py
def __init__(
    self,
    model: nn.Module,
    loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
    hessian_regularization: float = 0.0,
    x0: Optional[torch.Tensor] = None,
    rtol: float = 1e-7,
    atol: float = 1e-7,
    maxiter: Optional[int] = None,
    progress: bool = False,
    precompute_grad: bool = False,
    pre_conditioner: Optional[PreConditioner] = None,
    use_block_cg: bool = False,
    warn_on_max_iteration: bool = True,
):
    super().__init__(model, loss)
    self.warn_on_max_iteration = warn_on_max_iteration
    self.use_block_cg = use_block_cg
    self.pre_conditioner = pre_conditioner
    self.precompute_grad = precompute_grad
    self.progress = progress
    self.maxiter = maxiter
    self.atol = atol
    self.rtol = rtol
    self.x0 = x0
    self.hessian_regularization = hessian_regularization

influence_factors

influence_factors(x: Tensor, y: Tensor) -> Tensor

Compute approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

where the gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise inverse Hessian matrix vector products

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    where the gradient is meant to be per sample of the batch $(x, y)$.
    For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        Tensor representing the element-wise inverse Hessian matrix vector products

    """
    return super().influence_factors(x, y)

influences_from_factors

influences_from_factors(
    z_test_factors: Tensor,
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors(
    self,
    z_test_factors: torch.Tensor,
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The gradient is meant to be per sample
    of the batch $(x, y)$. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    if mode == InfluenceMode.Up:
        return (
            z_test_factors.to(self.model_device)
            @ self._loss_grad(x.to(self.model_device), y.to(self.model_device)).T
        )
    elif mode == InfluenceMode.Perturbation:
        return torch.einsum(
            "ia,j...a->ij...",
            z_test_factors.to(self.model_device),
            self._flat_loss_mixed_grad(
                x.to(self.model_device), y.to(self.model_device)
            ),
        )
    else:
        raise UnsupportedInfluenceModeException(mode)

influences

influences(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Compute an approximation of

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle, \]

for the case of up-weighting influence, resp.

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of perturbation-type influence. The approximate action of \(H^{-1}\) is achieved via the conjugate gradient method.

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

A tensor representing the element-wise scalar products for the provided batch.

Source code in src/pydvl/influence/torch/influence_function_model.py
@log_duration
def influences(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Compute an approximation of

    \[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
        f_{\theta}(x_{\text{test}})),
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle, \]

    for the case of up-weighting influence, resp.

    \[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
        f_{\theta}(x_{\text{test}})),
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of perturbation-type influence. The approximate action of
    $H^{-1}$ is achieved via the [conjugate gradient
    method](https://en.wikipedia.org/wiki/Conjugate_gradient_method).

    Args:
        x_test: model input to use in the gradient computations of
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        A tensor representing the element-wise scalar products for the
            provided batch.

    """
    return super().influences(x_test, y_test, x, y, mode=mode)

LissaInfluence

LissaInfluence(
    model: Module,
    loss: Callable[[Tensor, Tensor], Tensor],
    hessian_regularization: float = 0.0,
    maxiter: int = 1000,
    dampen: float = 0.0,
    scale: float = 10.0,
    h0: Optional[Tensor] = None,
    rtol: float = 0.0001,
    progress: bool = False,
    warn_on_max_iteration: bool = True,
)

Bases: TorchInfluenceFunctionModel

Uses LISSA, Linear time Stochastic Second-Order Algorithm, to iteratively approximate the inverse Hessian. More precisely, it finds x s.t. \(Hx = b\), with \(H\) being the model's second derivative wrt. the parameters. This is done with the update

\[H^{-1}_{j+1} b = b + (I - d) \ H - \frac{H^{-1}_j b}{s},\]

where \(I\) is the identity matrix, \(d\) is a dampening term and \(s\) a scaling factor that are applied to help convergence. For details, see Linear time Stochastic Second-Order Approximation (LiSSA)

PARAMETER DESCRIPTION
model

A PyTorch model. The Hessian will be calculated with respect to this model's parameters.

TYPE: Module

loss

A callable that takes the model's output and target as input and returns the scalar loss.

TYPE: Callable[[Tensor, Tensor], Tensor]

hessian_regularization

Optional regularization parameter added to the Hessian-vector product for numerical stability.

TYPE: float DEFAULT: 0.0

maxiter

Maximum number of iterations.

TYPE: int DEFAULT: 1000

dampen

Dampening factor, defaults to 0 for no dampening.

TYPE: float DEFAULT: 0.0

scale

Scaling factor, defaults to 10.

TYPE: float DEFAULT: 10.0

h0

Initial guess for hvp.

TYPE: Optional[Tensor] DEFAULT: None

rtol

tolerance to use for early stopping

TYPE: float DEFAULT: 0.0001

progress

If True, display progress bars.

TYPE: bool DEFAULT: False

warn_on_max_iteration

If True, logs a warning, if the desired tolerance is not achieved within maxiter iterations. If False, the log level for this information is logging.DEBUG

TYPE: bool DEFAULT: True

Source code in src/pydvl/influence/torch/influence_function_model.py
def __init__(
    self,
    model: nn.Module,
    loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
    hessian_regularization: float = 0.0,
    maxiter: int = 1000,
    dampen: float = 0.0,
    scale: float = 10.0,
    h0: Optional[torch.Tensor] = None,
    rtol: float = 1e-4,
    progress: bool = False,
    warn_on_max_iteration: bool = True,
):
    super().__init__(model, loss)
    self.warn_on_max_iteration = warn_on_max_iteration
    self.maxiter = maxiter
    self.hessian_regularization = hessian_regularization
    self.progress = progress
    self.rtol = rtol
    self.h0 = h0
    self.scale = scale
    self.dampen = dampen

influence_factors

influence_factors(x: Tensor, y: Tensor) -> Tensor

Compute approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

where the gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise inverse Hessian matrix vector products

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    where the gradient is meant to be per sample of the batch $(x, y)$.
    For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        Tensor representing the element-wise inverse Hessian matrix vector products

    """
    return super().influence_factors(x, y)

influences

influences(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Compute the approximation of

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle \]

for the case of up-weighting influence, resp.

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Compute the approximation of

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
    f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle
    \]

    for the case of up-weighting influence, resp.

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})),
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle
    \]

    for the perturbation type influence case. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x_test: model input to use in the gradient computations
            of $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    t: torch.Tensor = super().influences(x_test, y_test, x, y, mode=mode)
    return t

influences_from_factors

influences_from_factors(
    z_test_factors: Tensor,
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors(
    self,
    z_test_factors: torch.Tensor,
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The gradient is meant to be per sample
    of the batch $(x, y)$. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    if mode == InfluenceMode.Up:
        return (
            z_test_factors.to(self.model_device)
            @ self._loss_grad(x.to(self.model_device), y.to(self.model_device)).T
        )
    elif mode == InfluenceMode.Perturbation:
        return torch.einsum(
            "ia,j...a->ij...",
            z_test_factors.to(self.model_device),
            self._flat_loss_mixed_grad(
                x.to(self.model_device), y.to(self.model_device)
            ),
        )
    else:
        raise UnsupportedInfluenceModeException(mode)

ArnoldiInfluence

ArnoldiInfluence(
    model: Module,
    loss: Callable[[Tensor, Tensor], Tensor],
    hessian_regularization: float = 0.0,
    rank_estimate: int = 10,
    krylov_dimension: Optional[int] = None,
    tol: float = 1e-06,
    max_iter: Optional[int] = None,
    eigen_computation_on_gpu: bool = False,
    precompute_grad: bool = False,
)

Bases: TorchInfluenceFunctionModel

Solves the linear system Hx = b, where H is the Hessian of the model's loss function and b is the given right-hand side vector. It employs the [implicitly restarted Arnoldi method] (https://en.wikipedia.org/wiki/Arnoldi_iteration) for computing a partial eigen decomposition, which is used fo the inversion i.e.

\[x = V D^{-1} V^T b\]

where \(D\) is a diagonal matrix with the top (in absolute value) rank_estimate eigenvalues of the Hessian and \(V\) contains the corresponding eigenvectors. For more information, see Arnoldi.

PARAMETER DESCRIPTION
model

A PyTorch model. The Hessian will be calculated with respect to this model's parameters.

TYPE: Module

loss

A callable that takes the model's output and target as input and returns the scalar loss.

TYPE: Callable[[Tensor, Tensor], Tensor]

hessian_regularization

Optional regularization parameter added to the Hessian-vector product for numerical stability.

TYPE: float DEFAULT: 0.0

rank_estimate

The number of eigenvalues and corresponding eigenvectors to compute. Represents the desired rank of the Hessian approximation.

TYPE: int DEFAULT: 10

krylov_dimension

The number of Krylov vectors to use for the Lanczos method. Defaults to min(model's number of parameters, max(2 times rank_estimate + 1, 20)).

TYPE: Optional[int] DEFAULT: None

tol

The stopping criteria for the Lanczos algorithm. Ignored if low_rank_representation is provided.

TYPE: float DEFAULT: 1e-06

max_iter

The maximum number of iterations for the Lanczos method. Ignored if low_rank_representation is provided.

TYPE: Optional[int] DEFAULT: None

eigen_computation_on_gpu

If True, tries to execute the eigen pair approximation on the model's device via a cupy implementation. Ensure the model size or rank_estimate is appropriate for device memory. If False, the eigen pair approximation is executed on the CPU by the scipy wrapper to ARPACK.

TYPE: bool DEFAULT: False

precompute_grad

If True, the full data gradient is precomputed and kept in memory, which can speed up the hessian vector product computation. Set this to False, if you can't afford to keep the full computation graph in memory.

TYPE: bool DEFAULT: False

Source code in src/pydvl/influence/torch/influence_function_model.py
def __init__(
    self,
    model: nn.Module,
    loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
    hessian_regularization: float = 0.0,
    rank_estimate: int = 10,
    krylov_dimension: Optional[int] = None,
    tol: float = 1e-6,
    max_iter: Optional[int] = None,
    eigen_computation_on_gpu: bool = False,
    precompute_grad: bool = False,
):
    super().__init__(model, loss)
    self.hessian_regularization = hessian_regularization
    self.rank_estimate = rank_estimate
    self.tol = tol
    self.max_iter = max_iter
    self.krylov_dimension = krylov_dimension
    self.eigen_computation_on_gpu = eigen_computation_on_gpu
    self.precompute_grad = precompute_grad

influence_factors

influence_factors(x: Tensor, y: Tensor) -> Tensor

Compute approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

where the gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise inverse Hessian matrix vector products

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    where the gradient is meant to be per sample of the batch $(x, y)$.
    For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        Tensor representing the element-wise inverse Hessian matrix vector products

    """
    return super().influence_factors(x, y)

influences

influences(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Compute the approximation of

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle \]

for the case of up-weighting influence, resp.

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Compute the approximation of

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
    f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle
    \]

    for the case of up-weighting influence, resp.

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})),
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle
    \]

    for the perturbation type influence case. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x_test: model input to use in the gradient computations
            of $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    t: torch.Tensor = super().influences(x_test, y_test, x, y, mode=mode)
    return t

influences_from_factors

influences_from_factors(
    z_test_factors: Tensor,
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors(
    self,
    z_test_factors: torch.Tensor,
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The gradient is meant to be per sample
    of the batch $(x, y)$. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    if mode == InfluenceMode.Up:
        return (
            z_test_factors.to(self.model_device)
            @ self._loss_grad(x.to(self.model_device), y.to(self.model_device)).T
        )
    elif mode == InfluenceMode.Perturbation:
        return torch.einsum(
            "ia,j...a->ij...",
            z_test_factors.to(self.model_device),
            self._flat_loss_mixed_grad(
                x.to(self.model_device), y.to(self.model_device)
            ),
        )
    else:
        raise UnsupportedInfluenceModeException(mode)

fit

Fitting corresponds to the computation of the low rank decomposition

\[ V D^{-1} V^T \]

of the Hessian defined by the provided data loader.

PARAMETER DESCRIPTION
data

The data to compute the Hessian with.

TYPE: DataLoader

RETURNS DESCRIPTION
ArnoldiInfluence

The fitted instance.

Source code in src/pydvl/influence/torch/influence_function_model.py
@log_duration(log_level=logging.INFO)
def fit(self, data: DataLoader) -> ArnoldiInfluence:
    r"""
    Fitting corresponds to the computation of the low rank decomposition

    \[ V D^{-1} V^T \]

    of the Hessian defined by the provided data loader.

    Args:
        data: The data to compute the Hessian with.

    Returns:
        The fitted instance.

    """
    low_rank_representation = model_hessian_low_rank(
        self.model,
        self.loss,
        data,
        hessian_perturbation=0.0,  # regularization is applied, when computing values
        rank_estimate=self.rank_estimate,
        krylov_dimension=self.krylov_dimension,
        tol=self.tol,
        max_iter=self.max_iter,
        eigen_computation_on_gpu=self.eigen_computation_on_gpu,
        precompute_grad=self.precompute_grad,
    )
    self.low_rank_representation = low_rank_representation.to(self.model_device)
    return self

EkfacInfluence

EkfacInfluence(
    model: Module,
    update_diagonal: bool = False,
    hessian_regularization: float = 0.0,
    progress: bool = False,
)

Bases: TorchInfluenceFunctionModel

Approximately solves the linear system Hx = b, where H is the Hessian of a model with the empirical categorical cross entropy as loss function and b is the given right-hand side vector. It employs the EK-FAC method, which is based on the kronecker factorization of the Hessian.

Contrary to the other influence function methods, this implementation can only be used for classification tasks with a cross entropy loss function. However, it is much faster than the other methods and can be used efficiently for very large datasets and models. For more information, see Eigenvalue Corrected K-FAC.

PARAMETER DESCRIPTION
model

A PyTorch model. The Hessian will be calculated with respect to this model's parameters.

TYPE: Module

update_diagonal

If True, the diagonal values in the ekfac representation are refitted from the training data after calculating the KFAC blocks. This provides a more accurate approximation of the Hessian, but it is computationally more expensive.

TYPE: bool DEFAULT: False

hessian_regularization

Regularization of the hessian.

TYPE: float DEFAULT: 0.0

progress

If True, display progress bars.

TYPE: bool DEFAULT: False

Source code in src/pydvl/influence/torch/influence_function_model.py
def __init__(
    self,
    model: nn.Module,
    update_diagonal: bool = False,
    hessian_regularization: float = 0.0,
    progress: bool = False,
):
    super().__init__(model, torch.nn.functional.cross_entropy)
    self.hessian_regularization = hessian_regularization
    self.update_diagonal = update_diagonal
    self.active_layers = self._parse_active_layers()
    self.progress = progress

influence_factors

influence_factors(x: Tensor, y: Tensor) -> Tensor

Compute approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

where the gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise inverse Hessian matrix vector products

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    where the gradient is meant to be per sample of the batch $(x, y)$.
    For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        Tensor representing the element-wise inverse Hessian matrix vector products

    """
    return super().influence_factors(x, y)

influences

influences(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Compute the approximation of

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle \]

for the case of up-weighting influence, resp.

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Compute the approximation of

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
    f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle
    \]

    for the case of up-weighting influence, resp.

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})),
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle
    \]

    for the perturbation type influence case. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x_test: model input to use in the gradient computations
            of $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    t: torch.Tensor = super().influences(x_test, y_test, x, y, mode=mode)
    return t

influences_from_factors

influences_from_factors(
    z_test_factors: Tensor,
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors(
    self,
    z_test_factors: torch.Tensor,
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The gradient is meant to be per sample
    of the batch $(x, y)$. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    if mode == InfluenceMode.Up:
        return (
            z_test_factors.to(self.model_device)
            @ self._loss_grad(x.to(self.model_device), y.to(self.model_device)).T
        )
    elif mode == InfluenceMode.Perturbation:
        return torch.einsum(
            "ia,j...a->ij...",
            z_test_factors.to(self.model_device),
            self._flat_loss_mixed_grad(
                x.to(self.model_device), y.to(self.model_device)
            ),
        )
    else:
        raise UnsupportedInfluenceModeException(mode)

fit

fit(data: DataLoader) -> EkfacInfluence

Compute the KFAC blocks for each layer of the model, using the provided data. It then creates an EkfacRepresentation object that stores the KFAC blocks for each layer, their eigenvalue decomposition and diagonal values.

Source code in src/pydvl/influence/torch/influence_function_model.py
@log_duration(log_level=logging.INFO)
def fit(self, data: DataLoader) -> EkfacInfluence:
    """
    Compute the KFAC blocks for each layer of the model, using the provided data.
    It then creates an EkfacRepresentation object that stores the KFAC blocks for
    each layer, their eigenvalue decomposition and diagonal values.
    """
    forward_x, grad_y = self._get_kfac_blocks(data)
    layers_evecs_a = {}
    layers_evect_g = {}
    layers_diags = {}
    for key in self.active_layers.keys():
        evals_a, evecs_a = safe_torch_linalg_eigh(forward_x[key])
        evals_g, evecs_g = safe_torch_linalg_eigh(grad_y[key])
        layers_evecs_a[key] = evecs_a
        layers_evect_g[key] = evecs_g
        layers_diags[key] = torch.kron(evals_g.view(-1, 1), evals_a.view(-1, 1))

    self.ekfac_representation = EkfacRepresentation(
        self.active_layers.keys(),
        self.active_layers.values(),
        layers_evecs_a.values(),
        layers_evect_g.values(),
        layers_diags.values(),
    )
    if self.update_diagonal:
        self._update_diag(data)
    return self

influences_by_layer

influences_by_layer(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Dict[str, Tensor]

Compute the influence of the data on the test data for each layer of the model.

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Dict[str, Tensor]

A dictionary containing the influence of the data on the test data for each

Dict[str, Tensor]

layer of the model, with the layer name as key.

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_by_layer(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Dict[str, torch.Tensor]:
    r"""
    Compute the influence of the data on the test data for each layer of the model.

    Args:
        x_test: model input to use in the gradient computations of
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        A dictionary containing the influence of the data on the test data for each
        layer of the model, with the layer name as key.
    """
    if not self.is_fitted:
        raise ValueError(
            "Instance must be fitted before calling influence methods on it"
        )

    if x is None:
        if y is not None:
            raise ValueError(
                "Providing labels y, without providing model input x "
                "is not supported"
            )

        return self._symmetric_values_by_layer(
            x_test.to(self.model_device),
            y_test.to(self.model_device),
            mode,
        )

    if y is None:
        raise ValueError(
            "Providing model input x without providing labels y is not supported"
        )

    return self._non_symmetric_values_by_layer(
        x_test.to(self.model_device),
        y_test.to(self.model_device),
        x.to(self.model_device),
        y.to(self.model_device),
        mode,
    )

influence_factors_by_layer

influence_factors_by_layer(x: Tensor, y: Tensor) -> Dict[str, Tensor]

Computes the approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

for each layer of the model separately.

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Dict[str, Tensor]

A dictionary containing the influence factors for each layer of the model,

Dict[str, Tensor]

with the layer name as key.

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors_by_layer(
    self,
    x: torch.Tensor,
    y: torch.Tensor,
) -> Dict[str, torch.Tensor]:
    r"""
    Computes the approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    for each layer of the model separately.

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        A dictionary containing the influence factors for each layer of the model,
        with the layer name as key.
    """
    if not self.is_fitted:
        raise ValueError(
            "Instance must be fitted before calling influence methods on it"
        )

    return self._solve_hvp_by_layer(
        self._loss_grad(x.to(self.model_device), y.to(self.model_device)),
        self.ekfac_representation,
        self.hessian_regularization,
    )

influences_from_factors_by_layer

influences_from_factors_by_layer(
    z_test_factors: Dict[str, Tensor],
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Dict[str, Tensor]

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case for each layer of the model separately. The gradients are meant to be per sample of the batch \((x, y)\).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Dict[str, Tensor]

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Dict[str, Tensor]

A dictionary containing the influence of the data on the test data

Dict[str, Tensor]

for each layer of the model, with the layer name as key.

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors_by_layer(
    self,
    z_test_factors: Dict[str, torch.Tensor],
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Dict[str, torch.Tensor]:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case for each layer of the model
    separately. The gradients are meant to be per sample of the batch $(x,
    y)$.

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        A dictionary containing the influence of the data on the test data
        for each layer of the model, with the layer name as key.
    """
    if mode == InfluenceMode.Up:
        total_grad = self._loss_grad(
            x.to(self.model_device), y.to(self.model_device)
        )
        start_idx = 0
        influences = {}
        for layer_id, layer_z_test in z_test_factors.items():
            end_idx = start_idx + layer_z_test.shape[1]
            influences[layer_id] = (
                layer_z_test.to(self.model_device)
                @ total_grad[:, start_idx:end_idx].T
            )
            start_idx = end_idx
        return influences
    elif mode == InfluenceMode.Perturbation:
        total_mixed_grad = self._flat_loss_mixed_grad(
            x.to(self.model_device), y.to(self.model_device)
        )
        start_idx = 0
        influences = {}
        for layer_id, layer_z_test in z_test_factors.items():
            end_idx = start_idx + layer_z_test.shape[1]
            influences[layer_id] = torch.einsum(
                "ia,j...a->ij...",
                layer_z_test.to(self.model_device),
                total_mixed_grad[:, start_idx:end_idx],
            )
            start_idx = end_idx
        return influences
    else:
        raise UnsupportedInfluenceModeException(mode)

explore_hessian_regularization

explore_hessian_regularization(
    x: Tensor, y: Tensor, regularization_values: List[float]
) -> Dict[float, Dict[str, Tensor]]

Efficiently computes the influence for input x and label y for each layer of the model, for different values of the hessian regularization parameter. This is done by computing the gradient of the loss function for the input x and label y only once and then solving the Hessian Vector Product for each regularization value. This is useful for finding the optimal regularization value and for exploring how robust the influence values are to changes in the regularization value.

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

regularization_values

list of regularization values to use

TYPE: List[float]

RETURNS DESCRIPTION
Dict[float, Dict[str, Tensor]]

A dictionary containing with keys being the regularization values and values

Dict[float, Dict[str, Tensor]]

being dictionaries containing the influences for each layer of the model,

Dict[float, Dict[str, Tensor]]

with the layer name as key.

Source code in src/pydvl/influence/torch/influence_function_model.py
def explore_hessian_regularization(
    self,
    x: torch.Tensor,
    y: torch.Tensor,
    regularization_values: List[float],
) -> Dict[float, Dict[str, torch.Tensor]]:
    """
    Efficiently computes the influence for input x and label y for each layer of the
    model, for different values of the hessian regularization parameter. This is done
    by computing the gradient of the loss function for the input x and label y only once
    and then solving the Hessian Vector Product for each regularization value. This is
    useful for finding the optimal regularization value and for exploring
    how robust the influence values are to changes in the regularization value.

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients
        regularization_values: list of regularization values to use

    Returns:
        A dictionary containing with keys being the regularization values and values
        being dictionaries containing the influences for each layer of the model,
        with the layer name as key.
    """
    grad = self._loss_grad(x.to(self.model_device), y.to(self.model_device))
    influences_by_reg_value = {}
    for reg_value in regularization_values:
        reg_factors = self._solve_hvp_by_layer(
            grad, self.ekfac_representation, reg_value
        )
        values = {}
        start_idx = 0
        for layer_id, layer_fac in reg_factors.items():
            end_idx = start_idx + layer_fac.shape[1]
            values[layer_id] = layer_fac @ grad[:, start_idx:end_idx].T
            start_idx = end_idx
        influences_by_reg_value[reg_value] = values
    return influences_by_reg_value

NystroemSketchInfluence

NystroemSketchInfluence(
    model: Module,
    loss: Callable[[Tensor, Tensor], Tensor],
    hessian_regularization: float,
    rank: int,
)

Bases: TorchInfluenceFunctionModel

Given a model and training data, it uses a low-rank approximation of the Hessian (derived via random projection Nyström approximation) in combination with the Sherman–Morrison–Woodbury formula to calculate the inverse of the Hessian Vector Product. More concrete, it computes a low-rank approximation

\[\begin{align*} H_{\text{nys}} &= (H\Omega)(\Omega^TH\Omega)^{+}(H\Omega)^T \\\ &= U \Lambda U^T \end{align*}\]

in factorized form and approximates the action of the inverse Hessian via

\[ (H_{\text{nys}} + \lambda I)^{-1} = U(\Lambda+\lambda I)U^T + \frac{1}{\lambda}(I−UU^T). \]
PARAMETER DESCRIPTION
model

A PyTorch model. The Hessian will be calculated with respect to this model's parameters.

TYPE: Module

loss

A callable that takes the model's output and target as input and returns the scalar loss.

TYPE: Callable[[Tensor, Tensor], Tensor]

hessian_regularization

Optional regularization parameter added to the Hessian-vector product for numerical stability.

TYPE: float

rank

rank of the low-rank approximation

TYPE: int

Source code in src/pydvl/influence/torch/influence_function_model.py
def __init__(
    self,
    model: torch.nn.Module,
    loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
    hessian_regularization: float,
    rank: int,
):
    super().__init__(model, loss)
    self.hessian_regularization = hessian_regularization
    self.rank = rank

influence_factors

influence_factors(x: Tensor, y: Tensor) -> Tensor

Compute approximation of

\[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

where the gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x

model input to use in the gradient computations

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise inverse Hessian matrix vector products

Source code in src/pydvl/influence/torch/influence_function_model.py
def influence_factors(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    r"""
    Compute approximation of

    \[ H^{-1}\nabla_{\theta} \ell(y, f_{\theta}(x)) \]

    where the gradient is meant to be per sample of the batch $(x, y)$.
    For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x: model input to use in the gradient computations
        y: label tensor to compute gradients

    Returns:
        Tensor representing the element-wise inverse Hessian matrix vector products

    """
    return super().influence_factors(x, y)

influences

influences(
    x_test: Tensor,
    y_test: Tensor,
    x: Optional[Tensor] = None,
    y: Optional[Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Compute the approximation of

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle \]

for the case of up-weighting influence, resp.

\[ \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})), \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
x_test

model input to use in the gradient computations of \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

y_test

label tensor to compute gradients

TYPE: Tensor

x

optional model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\), if None, use \(x=x_{\text{test}}\)

TYPE: Optional[Tensor] DEFAULT: None

y

optional label tensor to compute gradients

TYPE: Optional[Tensor] DEFAULT: None

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences(
    self,
    x_test: torch.Tensor,
    y_test: torch.Tensor,
    x: Optional[torch.Tensor] = None,
    y: Optional[torch.Tensor] = None,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Compute the approximation of

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
    f_{\theta}(x_{\text{test}})), \nabla_{\theta} \ell(y, f_{\theta}(x))\rangle
    \]

    for the case of up-weighting influence, resp.

    \[
    \langle H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}})),
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle
    \]

    for the perturbation type influence case. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        x_test: model input to use in the gradient computations
            of $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
                f_{\theta}(x_{\text{test}}))$
        y_test: label tensor to compute gradients
        x: optional model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            if None, use $x=x_{\text{test}}$
        y: optional label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    t: torch.Tensor = super().influences(x_test, y_test, x, y, mode=mode)
    return t

influences_from_factors

influences_from_factors(
    z_test_factors: Tensor,
    x: Tensor,
    y: Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> Tensor

Computation of

\[ \langle z_{\text{test_factors}}, \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the case of up-weighting influence, resp.

\[ \langle z_{\text{test_factors}}, \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

for the perturbation type influence case. The gradient is meant to be per sample of the batch \((x, y)\). For all input tensors it is assumed, that the first dimension is the batch dimension (in case, you want to provide a single sample z, call z.unsqueeze(0) if no batch dimension is present).

PARAMETER DESCRIPTION
z_test_factors

pre-computed tensor, approximating \(H^{-1}\nabla_{\theta} \ell(y_{\text{test}}, f_{\theta}(x_{\text{test}}))\)

TYPE: Tensor

x

model input to use in the gradient computations \(\nabla_{\theta}\ell(y, f_{\theta}(x))\), resp. \(\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))\)

TYPE: Tensor

y

label tensor to compute gradients

TYPE: Tensor

mode

enum value of InfluenceMode

TYPE: InfluenceMode DEFAULT: Up

RETURNS DESCRIPTION
Tensor

Tensor representing the element-wise scalar products for the provided batch

Source code in src/pydvl/influence/torch/influence_function_model.py
def influences_from_factors(
    self,
    z_test_factors: torch.Tensor,
    x: torch.Tensor,
    y: torch.Tensor,
    mode: InfluenceMode = InfluenceMode.Up,
) -> torch.Tensor:
    r"""
    Computation of

    \[ \langle z_{\text{test_factors}},
        \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the case of up-weighting influence, resp.

    \[ \langle z_{\text{test_factors}},
        \nabla_{x} \nabla_{\theta} \ell(y, f_{\theta}(x)) \rangle \]

    for the perturbation type influence case. The gradient is meant to be per sample
    of the batch $(x, y)$. For all input tensors it is assumed,
    that the first dimension is the batch dimension (in case, you want to provide
    a single sample z, call z.unsqueeze(0) if no batch dimension is present).

    Args:
        z_test_factors: pre-computed tensor, approximating
            $H^{-1}\nabla_{\theta} \ell(y_{\text{test}},
            f_{\theta}(x_{\text{test}}))$
        x: model input to use in the gradient computations
            $\nabla_{\theta}\ell(y, f_{\theta}(x))$,
            resp. $\nabla_{x}\nabla_{\theta}\ell(y, f_{\theta}(x))$
        y: label tensor to compute gradients
        mode: enum value of [InfluenceMode]
            [pydvl.influence.base_influence_function_model.InfluenceMode]

    Returns:
        Tensor representing the element-wise scalar products for the provided batch

    """
    if mode == InfluenceMode.Up:
        return (
            z_test_factors.to(self.model_device)
            @ self._loss_grad(x.to(self.model_device), y.to(self.model_device)).T
        )
    elif mode == InfluenceMode.Perturbation:
        return torch.einsum(
            "ia,j...a->ij...",
            z_test_factors.to(self.model_device),
            self._flat_loss_mixed_grad(
                x.to(self.model_device), y.to(self.model_device)
            ),
        )
    else:
        raise UnsupportedInfluenceModeException(mode)