diff --git a/pyrecest/distributions/__init__.py b/pyrecest/distributions/__init__.py index 818450ca5..165585869 100644 --- a/pyrecest/distributions/__init__.py +++ b/pyrecest/distributions/__init__.py @@ -74,6 +74,9 @@ PartiallyWrappedNormalDistribution, ) from .cart_prod.se2_bingham_distribution import SE2BinghamDistribution +from .complex_hypersphere.complex_bingham_distribution import ( + ComplexBinghamDistribution, +) from .cart_prod.state_space_subdivision_distribution import ( StateSpaceSubdivisionDistribution, ) @@ -390,4 +393,5 @@ "SE3CartProdStackedDistribution", "SE3DiracDistribution", "SE2BinghamDistribution", + "ComplexBinghamDistribution", ] diff --git a/pyrecest/distributions/complex_hypersphere/__init__.py b/pyrecest/distributions/complex_hypersphere/__init__.py new file mode 100644 index 000000000..57e4787ce --- /dev/null +++ b/pyrecest/distributions/complex_hypersphere/__init__.py @@ -0,0 +1,9 @@ +from .abstract_complex_hyperspherical_distribution import ( + AbstractComplexHypersphericalDistribution, +) +from .complex_bingham_distribution import ComplexBinghamDistribution + +__all__ = [ + "AbstractComplexHypersphericalDistribution", + "ComplexBinghamDistribution", +] diff --git a/pyrecest/distributions/complex_hypersphere/abstract_complex_hyperspherical_distribution.py b/pyrecest/distributions/complex_hypersphere/abstract_complex_hyperspherical_distribution.py new file mode 100644 index 000000000..fb185e84e --- /dev/null +++ b/pyrecest/distributions/complex_hypersphere/abstract_complex_hyperspherical_distribution.py @@ -0,0 +1,58 @@ +import math +from abc import abstractmethod + +import numpy as np + +from pyrecest.distributions.abstract_manifold_specific_distribution import ( + AbstractManifoldSpecificDistribution, +) + + +class AbstractComplexHypersphericalDistribution(AbstractManifoldSpecificDistribution): + """ + Abstract base class for distributions on the complex unit hypersphere in C^d. + + The complex unit hypersphere in C^d is the set + {z ∈ C^d : ||z|| = 1} + which is isomorphic to the real sphere S^{2d-1} in R^{2d}. + + The complex dimension d is stored as ``complex_dim``. + The underlying manifold dimension (as a real manifold) is 2*d - 1. + """ + + def __init__(self, complex_dim: int): + """ + Parameters + ---------- + complex_dim : int + Complex dimension d of the ambient space C^d (d >= 1). + """ + if complex_dim < 1: + raise ValueError("complex_dim must be >= 1.") + # The real manifold dimension is 2*d - 1 + super().__init__(2 * complex_dim - 1) + self._complex_dim = complex_dim + + @property + def complex_dim(self) -> int: + """Complex dimension d of the ambient space C^d.""" + return self._complex_dim + + @property + def input_dim(self) -> int: + """Number of complex coordinates of a point on the sphere (= complex_dim).""" + return self._complex_dim + + def get_manifold_size(self) -> float: + """Surface area of the real sphere S^{2d-1} = 2π^d / (d-1)!""" + d = self._complex_dim + return float(2 * np.pi**d / math.factorial(d - 1)) + + @abstractmethod + def pdf(self, za): + """Probability density at the given point(s) on the complex unit sphere.""" + + def mean(self): + raise NotImplementedError( + "mean() is not defined for this complex hyperspherical distribution." + ) diff --git a/pyrecest/distributions/complex_hypersphere/complex_bingham_distribution.py b/pyrecest/distributions/complex_hypersphere/complex_bingham_distribution.py new file mode 100644 index 000000000..d9283b5df --- /dev/null +++ b/pyrecest/distributions/complex_hypersphere/complex_bingham_distribution.py @@ -0,0 +1,378 @@ +import math +import numpy as np +from scipy.optimize import least_squares + +from .abstract_complex_hyperspherical_distribution import ( + AbstractComplexHypersphericalDistribution, +) + + +class ComplexBinghamDistribution(AbstractComplexHypersphericalDistribution): + """ + Complex Bingham distribution on the complex unit hypersphere in C^d. + + The pdf is + f(z) = exp(log_c + Re(z^H B z)), ||z|| = 1 + where B is a Hermitian d×d parameter matrix and log_c = -log C(B) is the + (negative) log of the normalization integral C(B). + + References + ---------- + Kent, J. T. + The Complex Bingham Distribution and Shape Analysis. + J. Royal Statistical Society B, 56(2):285-299, 1994. + + Kent, J. T.; Constable, P. D. L. & Er, F. + Simulation for the complex Bingham distribution. + Statistics and Computing, 14:53-57, 2004. + """ + + def __init__(self, B): + """ + Parameters + ---------- + B : array_like, shape (d, d), complex or real + Hermitian parameter matrix. The complex dimension d is inferred + from B. + """ + B = np.asarray(B, dtype=complex) + if B.ndim != 2 or B.shape[0] != B.shape[1]: + raise ValueError("B must be a square matrix.") + if not np.allclose(B, B.conj().T, atol=1e-10): + raise ValueError("B must be Hermitian (B == B^H).") + + d = B.shape[0] + super().__init__(d) + + self.B = B + self.log_c = ComplexBinghamDistribution.log_norm(self.B) + + # ------------------------------------------------------------------ + # PDF + # ------------------------------------------------------------------ + + def pdf(self, za): + """ + Evaluate the pdf at the given point(s) on the complex unit sphere. + + Parameters + ---------- + za : array_like, shape (d,) or (d, n) + Each column is a unit vector in C^d. + + Returns + ------- + p : ndarray, shape () or (n,) + Real-valued probability densities. + """ + za = np.asarray(za, dtype=complex) + single = za.ndim == 1 + if single: + za = za[:, np.newaxis] + + # z^H B z is real for Hermitian B and unit vectors z; abs removes + # tiny imaginary residuals from floating-point arithmetic + val = np.einsum("id,ij,jd->d", za.conj(), self.B, za).real + p = np.exp(self.log_c + val) + return p[0] if single else p + + # ------------------------------------------------------------------ + # Sampling (Kent, Constable & Er, 2004) + # ------------------------------------------------------------------ + + def sample(self, n): + """ + Draw n i.i.d. samples from the complex Bingham distribution. + + Parameters + ---------- + n : int + Number of samples. + + Returns + ------- + Z : ndarray, shape (d, n), complex + Columns are unit vectors in C^d. + """ + d = self.complex_dim + if d < 2: + raise ValueError("B must be at least 2×2 for sampling.") + + # Eigendecomposition of -B; eigh returns ascending eigenvalues + eigenvalues, V = np.linalg.eigh(-self.B) + + # Sort eigenvalues in *descending* order + idx = np.argsort(eigenvalues)[::-1] + eigenvalues = eigenvalues[idx] + V = V[:, idx] + + # Eigenvalue shift so that the smallest (last) eigenvalue is 0 + eigenvalues = eigenvalues - eigenvalues[-1] + + # Pre-compute constants for the truncated-exponential sampler + lam = eigenvalues[:-1] # length d-1, all >= 0 + with np.errstate(divide="ignore", invalid="ignore"): + temp1 = np.where(lam > 0, -1.0 / lam, 0.0) + temp2 = np.where(lam > 0, 1.0 - np.exp(-lam), 0.0) + + Z = np.empty((d, n), dtype=complex) + rng = np.random.default_rng() + + for k in range(n): + # Sample simplex coordinate S via rejection + while True: + U = rng.uniform(0.0, 1.0, d - 1) + S = np.where( + lam < 0.03, + U, + temp1 * np.log(1.0 - U * temp2), + ) + if S.sum() < 1.0: + break + + S_full = np.empty(d) + S_full[:-1] = S + S_full[-1] = 1.0 - S.sum() + + # Independent uniform phases + theta = 2.0 * np.pi * rng.uniform(0.0, 1.0, d) + W = np.sqrt(S_full) * np.exp(1j * theta) + + Z[:, k] = V @ W + + return Z + + # ------------------------------------------------------------------ + # Static helpers + # ------------------------------------------------------------------ + + @staticmethod + def fit(Z): + """ + Fit a ComplexBinghamDistribution to complex samples. + + Parameters + ---------- + Z : array_like, shape (d, N), complex + N sample unit vectors in C^d. + + Returns + ------- + ComplexBinghamDistribution + """ + Z = np.asarray(Z, dtype=complex) + N = Z.shape[1] + S = Z @ Z.conj().T / N + B = ComplexBinghamDistribution.estimate_parameter_matrix(S) + return ComplexBinghamDistribution(B) + + @staticmethod + def estimate_parameter_matrix(S): + """ + Maximum-likelihood estimate of B from a scatter matrix S. + + The ML eigenvectors of B are the eigenvectors of S. The eigenvalues + κ_1 ≤ ... ≤ κ_{d-1} < κ_d = 0 are found by solving the score + equations E_κ[|z_k|²] = s_k (where s_k = eigenvalue_k(S)), + i.e. the expected scatter equals the sample scatter. + + Parameters + ---------- + S : array_like, shape (d, d), Hermitian positive semi-definite + Sample scatter matrix S = (1/N) Σ z_i z_i^H. + + Returns + ------- + B : ndarray, shape (d, d), complex Hermitian + """ + S = np.asarray(S, dtype=complex) + d = S.shape[0] + + # Eigendecompose S (eigh gives ascending order, real eigenvalues) + s_vals, V_S = np.linalg.eigh(S) + # s_vals are in ascending order; target proportions + s_total = s_vals.sum() + target = s_vals / s_total if s_total > 0 else np.ones(d) / d + + def _expected_scatter(kappa): + """E_κ[|z_k|²] via numerical derivative of log-integral.""" + h = 1e-5 + grad = np.zeros(d) + for i in range(d): + kp = kappa.copy() + km = kappa.copy() + kp[i] += h + km[i] -= h + B_p = V_S @ np.diag(kp.astype(complex)) @ V_S.conj().T + B_m = V_S @ np.diag(km.astype(complex)) @ V_S.conj().T + # log_norm returns -log(I), so -log_norm = log(I) + log_I_p = -ComplexBinghamDistribution.log_norm(B_p) + log_I_m = -ComplexBinghamDistribution.log_norm(B_m) + grad[i] = (log_I_p - log_I_m) / (2 * h) + return grad + + def residual(kappa_free): + kappa = np.append(kappa_free, 0.0) + expected = _expected_scatter(kappa) + exp_total = expected.sum() + if exp_total > 0: + expected = expected / exp_total + return expected[:-1] - target[:-1] + + # Initial guess: push smaller eigenvalues to more negative values + x0 = -(target[-1] - target[:-1]) * 10.0 + + try: + result = least_squares( + residual, + x0, + bounds=(np.full(d - 1, -1000.0), np.full(d - 1, -1e-2)), + method="trf", + ftol=1e-12, + xtol=1e-12, + max_nfev=int(1e5), + ) + kappa = np.append(result.x, 0.0) + except (ValueError, RuntimeError, OverflowError): + kappa = np.zeros(d) + + # Sort and construct B + idx = np.argsort(kappa) + kappa = kappa[idx] + V = V_S[:, idx] + B = V @ np.diag(kappa.astype(complex)) @ V.conj().T + B = 0.5 * (B + B.conj().T) + return B + + @staticmethod + def log_norm(B, variant="analytical", n_mc=100_000): + """ + Compute the (negative) log normalization constant. + + ``log_c = -log C(B)`` where + ``C(B) = ∫_{||z||=1} exp(z^H B z) dz``. + + The pdf satisfies f(z) = exp(log_c + Re(z^H B z)). + + Parameters + ---------- + B : array_like, shape (d, d), Hermitian + variant : {'analytical', 'monte_carlo'} + 'analytical' (default): uses the divided-differences formula + (exact for distinct eigenvalues; near-duplicate eigenvalues are + perturbed slightly). + 'monte_carlo': Monte Carlo estimate. + n_mc : int + Number of Monte Carlo samples (only used when variant='monte_carlo'). + + Returns + ------- + float + Negative log of the normalization integral. + """ + B = np.asarray(B, dtype=complex) + d = B.shape[0] + + if variant == "monte_carlo": + log_int = ComplexBinghamDistribution._log_norm_monte_carlo(B, n_mc) + return -log_int + + # Eigenvalue shift for numerical stability + eigenvalues = np.linalg.eigvalsh(B).real + shift = float(eigenvalues.max()) + eigenvalues = eigenvalues - shift + + # Nearly uniform case: all eigenvalues ≈ 0 + if np.all(np.abs(eigenvalues) < 1e-3): + log_int = np.log(2.0 * np.pi**d / math.factorial(d - 1)) + return -(log_int + shift) + + eigenvalues = ComplexBinghamDistribution._make_distinct(eigenvalues) + log_int_shifted = ComplexBinghamDistribution._log_norm_analytical( + eigenvalues, d + ) + # Correct for eigenvalue shift: C(B) = exp(shift) * C(B - shift*I) + log_int = log_int_shifted + shift + return -log_int + + # ------------------------------------------------------------------ + # Internal helpers for log_norm + # ------------------------------------------------------------------ + + @staticmethod + def _log_norm_analytical(eigenvalues, d): + """ + log C from the divided-differences (partial-fractions) formula: + + C(κ) = 2π^d · Σ_k exp(κ_k) / ∏_{j≠k} (κ_k - κ_j) + + All eigenvalues must be distinct. + """ + total = 0.0 + for k in range(d): + denom = 1.0 + for j in range(d): + if j != k: + denom *= eigenvalues[k] - eigenvalues[j] + total += np.exp(float(eigenvalues[k])) / denom + + log_c = np.log(2.0 * np.pi**d * total) + return float(log_c) + + @staticmethod + def _make_distinct(eigenvalues, min_gap=1e-2): + """ + Perturb eigenvalues (in descending order) so that consecutive pairs + differ by at least ``min_gap``. Mirrors the MATLAB workaround + ``makeSureEigenvaluesAreNotTooClose``. + """ + lam = np.sort(eigenvalues)[::-1].copy() # descending + for i in range(len(lam) - 1): + if lam[i] - lam[i + 1] < min_gap: + lam[i + 1] = lam[i] - min_gap + return lam # keep descending order (sign of products handled in formula) + + @staticmethod + def _log_norm_monte_carlo(B, n=100_000): + """Monte Carlo estimate of log C(B).""" + d = B.shape[0] + volume = 2.0 * np.pi**d / math.factorial(d - 1) + rng = np.random.default_rng() + Z = rng.standard_normal((d, n)) + 1j * rng.standard_normal((d, n)) + Z /= np.linalg.norm(Z, axis=0, keepdims=True) + val = np.einsum("id,ij,jd->d", Z.conj(), B, Z).real + return float(np.log(np.mean(np.exp(val)) * volume)) + + # ------------------------------------------------------------------ + # Cauchy-Schwarz divergence + # ------------------------------------------------------------------ + + @staticmethod + def cauchy_schwarz_divergence(cB1, cB2): + """ + Cauchy-Schwarz divergence between two complex Bingham distributions. + + D_CS = log_c(B1+B2) - ½ (log_c(2·B1) + log_c(2·B2)) + + where log_c uses the sign convention log_c = -log C(B). + + Parameters + ---------- + cB1, cB2 : ComplexBinghamDistribution or array_like + Distributions (or parameter matrices) to compare. + + Returns + ------- + float + """ + if isinstance(cB1, ComplexBinghamDistribution): + B1, B2 = cB1.B, cB2.B + else: + B1 = np.asarray(cB1, dtype=complex) + B2 = np.asarray(cB2, dtype=complex) + + log_c1 = ComplexBinghamDistribution.log_norm(2.0 * B1) + log_c2 = ComplexBinghamDistribution.log_norm(2.0 * B2) + log_c3 = ComplexBinghamDistribution.log_norm(B1 + B2) + + return float(log_c3 - 0.5 * (log_c1 + log_c2)) diff --git a/pyrecest/tests/distributions/test_complex_bingham_distribution.py b/pyrecest/tests/distributions/test_complex_bingham_distribution.py new file mode 100644 index 000000000..2cd0fb18a --- /dev/null +++ b/pyrecest/tests/distributions/test_complex_bingham_distribution.py @@ -0,0 +1,218 @@ +import unittest + +import math +import numpy as np +import numpy.testing as npt + +from pyrecest.distributions import ComplexBinghamDistribution + + +class TestComplexBinghamDistribution(unittest.TestCase): + """Tests for ComplexBinghamDistribution.""" + + def _make_dist_2d(self): + """Return a 2D complex Bingham distribution with known parameters.""" + # Diagonal B in C^2; after eigen-shift the eigenvalues are (-3, 0). + kappa = np.array([-3.0, 0.0]) + B = np.diag(kappa.astype(complex)) + return ComplexBinghamDistribution(B) + + def _make_dist_3d(self): + """Return a 3D complex Bingham distribution.""" + kappa = np.array([-5.0, -2.0, 0.0]) + B = np.diag(kappa.astype(complex)) + return ComplexBinghamDistribution(B) + + # ------------------------------------------------------------------ + # Constructor + # ------------------------------------------------------------------ + + def test_constructor_diagonal(self): + """Constructor succeeds for a valid diagonal Hermitian matrix.""" + dist = self._make_dist_2d() + self.assertEqual(dist.complex_dim, 2) + self.assertEqual(dist.input_dim, 2) + + def test_constructor_non_hermitian_raises(self): + """Constructor raises ValueError for a non-Hermitian matrix.""" + B_bad = np.array([[1.0, 1.0], [0.0, 1.0]], dtype=complex) + with self.assertRaises(ValueError): + ComplexBinghamDistribution(B_bad) + + def test_constructor_full_hermitian(self): + """Constructor works for a full non-diagonal Hermitian matrix.""" + A = np.array([[1.0, 0.5 + 0.3j], [0.5 - 0.3j, 2.0]], dtype=complex) + evals = np.linalg.eigvalsh(A) + B = A - evals.max() * np.eye(2) + dist = ComplexBinghamDistribution(B) + self.assertEqual(dist.complex_dim, 2) + + # ------------------------------------------------------------------ + # Normalization constant + # ------------------------------------------------------------------ + + def test_log_norm_uniform_limit(self): + """When B = 0 (uniform), C(B) equals the surface area of S^{2d-1}.""" + for d in (2, 3): + B = np.zeros((d, d), dtype=complex) + expected = np.log(2.0 * np.pi**d / math.factorial(d - 1)) + log_c = ComplexBinghamDistribution.log_norm(B) + npt.assert_almost_equal(-log_c, expected, decimal=6) + + def test_log_norm_2d_analytical_formula(self): + """For D=2 with kappa=(kappa1, 0), C = 2pi^2 (exp(kappa1)-1)/kappa1.""" + kappa1 = -3.0 + B = np.diag([kappa1, 0.0]).astype(complex) + expected_C = 2.0 * np.pi**2 * (np.exp(kappa1) - 1.0) / kappa1 + log_c = ComplexBinghamDistribution.log_norm(B) + npt.assert_almost_equal(-log_c, np.log(expected_C), decimal=6) + + def test_log_norm_analytical_vs_monte_carlo(self): + """Analytical and Monte Carlo log_norm agree to within ~1 decimal for D=2.""" + kappa1 = -3.0 + B = np.diag([kappa1, 0.0]).astype(complex) + log_c_analytical = ComplexBinghamDistribution.log_norm(B, variant="analytical") + log_c_mc = ComplexBinghamDistribution.log_norm( + B, variant="monte_carlo", n_mc=50_000 + ) + npt.assert_almost_equal(-log_c_analytical, -log_c_mc, decimal=1) + + # ------------------------------------------------------------------ + # PDF + # ------------------------------------------------------------------ + + def test_pdf_unit_norm_positive(self): + """PDF values are positive at unit vectors.""" + dist = self._make_dist_2d() + z = np.array([1.0 + 0j, 0.0 + 0j]) + p = dist.pdf(z) + self.assertGreater(float(p), 0.0) + + def test_pdf_normalizes_to_one(self): + """Numerical integration of the pdf over the sphere gives approx 1.""" + dist = self._make_dist_2d() + # Parameterise S^3: z1 = cos(t)e^{ip1}, z2 = sin(t)e^{ip2} + n_phi = 50 + n_theta = 50 + phi1 = np.linspace(0, 2 * np.pi, n_phi, endpoint=False) + phi2 = np.linspace(0, 2 * np.pi, n_phi, endpoint=False) + theta = np.linspace(0, np.pi / 2, n_theta, endpoint=False) + dtheta = theta[1] - theta[0] + dphi = phi1[1] - phi1[0] + + integral = 0.0 + for t in theta: + for p1 in phi1: + for p2 in phi2: + z = np.array( + [ + np.cos(t) * np.exp(1j * p1), + np.sin(t) * np.exp(1j * p2), + ] + ) + jacobian = np.sin(t) * np.cos(t) + integral += float(dist.pdf(z)) * jacobian * dtheta * dphi * dphi + + npt.assert_almost_equal(integral, 1.0, decimal=2) + + def test_pdf_mode_is_maximum(self): + """The mode (eigenvector for max eigenvalue) gives the largest pdf.""" + kappa = np.array([-5.0, 0.0]) + B = np.diag(kappa.astype(complex)) + dist = ComplexBinghamDistribution(B) + + mode = np.array([0.0 + 0j, 1.0 + 0j]) + p_mode = dist.pdf(mode) + other = np.array([1.0 + 0j, 0.0 + 0j]) + p_other = dist.pdf(other) + self.assertGreaterEqual(float(p_mode), float(p_other)) + + def test_pdf_batch(self): + """pdf works for batches of columns.""" + dist = self._make_dist_2d() + z1 = np.array([1.0 + 0j, 0.0 + 0j]) + z2 = np.array([0.0 + 0j, 1.0 + 0j]) + Z = np.stack([z1, z2], axis=1) + p = dist.pdf(Z) + npt.assert_array_almost_equal( + p, [dist.pdf(z1), dist.pdf(z2)], decimal=10 + ) + + # ------------------------------------------------------------------ + # Sampling + # ------------------------------------------------------------------ + + def test_sample_unit_norm(self): + """Samples lie on the complex unit sphere.""" + dist = self._make_dist_2d() + Z = dist.sample(100) + norms = np.linalg.norm(Z, axis=0) + npt.assert_array_almost_equal(norms, np.ones(100), decimal=12) + + def test_sample_shape(self): + """sample() returns array of shape (d, n).""" + dist = self._make_dist_3d() + Z = dist.sample(50) + self.assertEqual(Z.shape, (3, 50)) + + def test_sample_scatter_consistent(self): + """Empirical scatter aligns with B's eigenvectors.""" + n = 10_000 + kappa = np.array([-5.0, 0.0]) + B = np.diag(kappa.astype(complex)) + dist = ComplexBinghamDistribution(B) + Z = dist.sample(n) + S_empirical = Z @ Z.conj().T / n + evals = np.linalg.eigvalsh(S_empirical) + self.assertGreater(evals[-1], 0.5) + + # ------------------------------------------------------------------ + # Cauchy-Schwarz Divergence + # ------------------------------------------------------------------ + + def test_cauchy_schwarz_divergence_zero_for_same(self): + """D_CS(p, p) = 0 for identical distributions.""" + dist = self._make_dist_2d() + d = ComplexBinghamDistribution.cauchy_schwarz_divergence(dist, dist) + npt.assert_almost_equal(d, 0.0, decimal=6) + + def test_cauchy_schwarz_divergence_nonnegative(self): + """D_CS >= 0 for different distributions.""" + d1 = self._make_dist_2d() + B2 = np.diag(np.array([-1.0, 0.0]).astype(complex)) + d2 = ComplexBinghamDistribution(B2) + divergence = ComplexBinghamDistribution.cauchy_schwarz_divergence(d1, d2) + self.assertGreaterEqual(divergence, -1e-6) + + # ------------------------------------------------------------------ + # fit / estimate_parameter_matrix + # ------------------------------------------------------------------ + + def test_fit_recovers_eigenvalues(self): + """fit() from many samples recovers eigenvalues to 1 decimal place.""" + kappa = np.array([-3.0, 0.0]) + B_true = np.diag(kappa.astype(complex)) + dist_true = ComplexBinghamDistribution(B_true) + + Z = dist_true.sample(5_000) + dist_fit = ComplexBinghamDistribution.fit(Z) + + evals_true = np.sort(np.linalg.eigvalsh(B_true)) + evals_fit = np.sort(np.linalg.eigvalsh(dist_fit.B)) + npt.assert_array_almost_equal(evals_true, evals_fit, decimal=1) + + # ------------------------------------------------------------------ + # log_norm with non-diagonal matrix + # ------------------------------------------------------------------ + + def test_log_norm_non_diagonal(self): + """log_norm works for full Hermitian matrices.""" + A = np.array([[1.0, 0.5 + 0.3j], [0.5 - 0.3j, 2.0]], dtype=complex) + evals = np.linalg.eigvalsh(A) + B = A - evals.max() * np.eye(2) + log_c = ComplexBinghamDistribution.log_norm(B) + self.assertTrue(np.isfinite(log_c)) + + +if __name__ == "__main__": + unittest.main()