|
| 1 | +from typing import Literal, cast, overload |
| 2 | + |
| 3 | +import torch |
| 4 | +from torch import Tensor |
| 5 | + |
| 6 | +from ._matrix import Matrix, PSDMatrix, PSDTensor |
| 7 | + |
| 8 | + |
| 9 | +@overload |
| 10 | +def compute_gramian(t: Tensor) -> PSDMatrix: |
| 11 | + pass |
| 12 | + |
| 13 | + |
| 14 | +@overload |
| 15 | +def compute_gramian(t: Tensor, contracted_dims: Literal[-1]) -> PSDMatrix: |
| 16 | + pass |
| 17 | + |
| 18 | + |
| 19 | +@overload |
| 20 | +def compute_gramian(t: Matrix, contracted_dims: Literal[1]) -> PSDMatrix: |
| 21 | + pass |
| 22 | + |
| 23 | + |
| 24 | +def compute_gramian(t: Tensor, contracted_dims: int = -1) -> PSDTensor: |
| 25 | + """ |
| 26 | + Computes the `Gramian matrix <https://en.wikipedia.org/wiki/Gram_matrix>`_ of the input. |
| 27 | +
|
| 28 | + `contracted_dims` specifies the number of trailing dimensions to contract. If negative, |
| 29 | + it indicates the number of leading dimensions to preserve (e.g., ``-1`` preserves the |
| 30 | + first dimension). |
| 31 | + """ |
| 32 | + |
| 33 | + contracted_dims = contracted_dims if 0 <= contracted_dims else contracted_dims + t.ndim |
| 34 | + indices_source = list(range(t.ndim - contracted_dims)) |
| 35 | + indices_dest = list(range(t.ndim - 1, contracted_dims - 1, -1)) |
| 36 | + transposed = t.movedim(indices_source, indices_dest) |
| 37 | + gramian = torch.tensordot(t, transposed, dims=contracted_dims) |
| 38 | + return cast(PSDTensor, gramian) |
| 39 | + |
| 40 | + |
| 41 | +def normalize(gramian: PSDMatrix, eps: float) -> PSDMatrix: |
| 42 | + """ |
| 43 | + Normalizes the gramian `G=AA^T` with respect to the Frobenius norm of `A`. |
| 44 | +
|
| 45 | + If `G=A A^T`, then the Frobenius norm of `A` is the square root of the trace of `G`, i.e., the |
| 46 | + sqrt of the sum of the diagonal elements. The gramian of the (Frobenius) normalization of `A` is |
| 47 | + therefore `G` divided by the sum of its diagonal elements. |
| 48 | + """ |
| 49 | + squared_frobenius_norm = gramian.diagonal().sum() |
| 50 | + if squared_frobenius_norm < eps: |
| 51 | + output = torch.zeros_like(gramian) |
| 52 | + else: |
| 53 | + output = gramian / squared_frobenius_norm |
| 54 | + return cast(PSDMatrix, output) |
| 55 | + |
| 56 | + |
| 57 | +def regularize(gramian: PSDMatrix, eps: float) -> PSDMatrix: |
| 58 | + """ |
| 59 | + Adds a regularization term to the gramian to enforce positive definiteness. |
| 60 | +
|
| 61 | + Because of numerical errors, `gramian` might have slightly negative eigenvalue(s). Adding a |
| 62 | + regularization term which is a small proportion of the identity matrix ensures that the gramian |
| 63 | + is positive definite. |
| 64 | + """ |
| 65 | + |
| 66 | + regularization_matrix = eps * torch.eye( |
| 67 | + gramian.shape[0], dtype=gramian.dtype, device=gramian.device |
| 68 | + ) |
| 69 | + output = gramian + regularization_matrix |
| 70 | + return cast(PSDMatrix, output) |
0 commit comments