From d6a9c325c3d9b3b4147df88b64932f81b2769d44 Mon Sep 17 00:00:00 2001 From: Filippo Miatto Date: Wed, 29 Nov 2023 15:32:19 -0800 Subject: [PATCH] fix_types (#310) Fixes typing in the numpy backend (`np.array` is the array constructor, the type is `np.ndarray`). --- mrmustard/math/backend_numpy.py | 181 +++++++++++++++++--------------- mrmustard/utils/settings.py | 3 +- polyval2d-high-precision | 1 + 3 files changed, 98 insertions(+), 87 deletions(-) create mode 160000 polyval2d-high-precision diff --git a/mrmustard/math/backend_numpy.py b/mrmustard/math/backend_numpy.py index 610097054..94d7267f2 100644 --- a/mrmustard/math/backend_numpy.py +++ b/mrmustard/math/backend_numpy.py @@ -55,39 +55,41 @@ def __init__(self): def __repr__(self) -> str: return "BackendNumpy()" - def abs(self, array: np.array) -> np.array: + def abs(self, array: np.ndarray) -> np.ndarray: return np.abs(array) - def any(self, array: np.array) -> np.array: + def any(self, array: np.ndarray) -> np.ndarray: return np.any(array) - def arange(self, start: int, limit: int = None, delta: int = 1, dtype=np.float64) -> np.array: + def arange( + self, start: int, limit: Optional[int] = None, delta: int = 1, dtype=np.float64 + ) -> np.ndarray: return np.arange(start, limit, delta, dtype=dtype) - def asnumpy(self, tensor: np.array) -> np.array: + def asnumpy(self, tensor: np.ndarray) -> np.ndarray: if isinstance(tensor, np.ndarray): return tensor return np.array(tensor) - def assign(self, tensor: np.array, value: np.array) -> np.array: + def assign(self, tensor: np.ndarray, value: np.ndarray) -> np.ndarray: tensor = value return tensor - def astensor(self, array: Union[np.ndarray, np.array], dtype=None) -> np.array: + def astensor(self, array: Union[np.ndarray, np.ndarray], dtype=None) -> np.ndarray: dtype = dtype or np.float64 return self.cast(np.array(array), dtype=dtype) - def atleast_1d(self, array: np.array, dtype=None) -> np.array: + def atleast_1d(self, array: np.ndarray, dtype=None) -> np.ndarray: return self.cast(np.reshape(array, [-1]), dtype) - def block(self, blocks: List[List[np.array]], axes=(-2, -1)) -> np.array: + def block(self, blocks: List[List[np.ndarray]], axes=(-2, -1)) -> np.ndarray: rows = [self.concat(row, axis=axes[1]) for row in blocks] return self.concat(rows, axis=axes[0]) - def boolean_mask(self, tensor: np.array, mask: np.array) -> np.ndarray: + def boolean_mask(self, tensor: np.ndarray, mask: np.ndarray) -> np.ndarray: return np.array([t for i, t in enumerate(tensor) if mask[i]]) - def cast(self, array: np.array, dtype=None) -> np.array: + def cast(self, array: np.ndarray, dtype=None) -> np.ndarray: if dtype is None: return array @@ -95,29 +97,29 @@ def cast(self, array: np.array, dtype=None) -> np.array: array = self.real(array) return np.array(array, dtype=dtype) - def clip(self, array, a_min, a_max) -> np.array: + def clip(self, array, a_min, a_max) -> np.ndarray: return np.clip(array, a_min, a_max) - def concat(self, values: List[np.array], axis: int) -> np.array: + def concat(self, values: List[np.ndarray], axis: int) -> np.ndarray: # tf.concat can concatenate lists of scalars, while np.concatenate errors try: return np.concatenate(values, axis) except ValueError: return np.array(values) - def conj(self, array: np.array) -> np.array: + def conj(self, array: np.ndarray) -> np.ndarray: return np.conj(array) - def cos(self, array: np.array) -> np.array: + def cos(self, array: np.ndarray) -> np.ndarray: return np.cos(array) - def cosh(self, array: np.array) -> np.array: + def cosh(self, array: np.ndarray) -> np.ndarray: return np.cosh(array) - def det(self, matrix: np.array) -> np.array: + def det(self, matrix: np.ndarray) -> np.ndarray: return np.linalg.det(matrix) - def diag(self, array: np.array, k: int = 0) -> np.array: + def diag(self, array: np.ndarray, k: int = 0) -> np.ndarray: if len(array.shape) == 1: return np.diag(array, k=k) elif len(array.shape) == 2: @@ -140,12 +142,12 @@ def diag(self, array: np.array, k: int = 0) -> np.array: ) return ret.reshape(original_sh[:-1] + inner_shape) - def diag_part(self, array: np.array, k: int) -> np.array: + def diag_part(self, array: np.ndarray, k: int) -> np.ndarray: ret = np.diagonal(array, offset=k, axis1=-2, axis2=-1) ret.flags.writeable = True return ret - def set_diag(self, array: np.array, diag: np.array, k: int) -> np.array: + def set_diag(self, array: np.ndarray, diag: np.ndarray, k: int) -> np.ndarray: i = np.arange(0, array.shape[-2] - abs(k)) if k < 0: i -= array.shape[-2] - abs(k) @@ -158,76 +160,78 @@ def set_diag(self, array: np.array, diag: np.array, k: int) -> np.array: return array - def einsum(self, string: str, *tensors) -> np.array: + def einsum(self, string: str, *tensors) -> Optional[np.ndarray]: if type(string) is str: return np.einsum(string, *tensors) return None # provide same functionality as numpy.einsum or upgrade to opt_einsum - def exp(self, array: np.array) -> np.array: + def exp(self, array: np.ndarray) -> np.ndarray: return np.exp(array) - def expand_dims(self, array: np.array, axis: int) -> np.array: + def expand_dims(self, array: np.ndarray, axis: int) -> np.ndarray: return np.expand_dims(array, axis) - def expm(self, matrix: np.array) -> np.array: + def expm(self, matrix: np.ndarray) -> np.ndarray: return scipy_expm(matrix) - def eye(self, size: int, dtype=np.float64) -> np.array: + def eye(self, size: int, dtype=np.float64) -> np.ndarray: return np.eye(size, dtype=dtype) - def eye_like(self, array: np.array) -> np.ndarray: + def eye_like(self, array: np.ndarray) -> np.ndarray: return np.eye(array.shape[-1], dtype=array.dtype) def from_backend(self, value) -> bool: return isinstance(value, np.ndarray) - def gather(self, array: np.array, indices: np.array, axis: int = 0) -> np.array: + def gather(self, array: np.ndarray, indices: np.ndarray, axis: int = 0) -> np.ndarray: return np.take(array, indices, axis=axis) - def imag(self, array: np.array) -> np.array: + def imag(self, array: np.ndarray) -> np.ndarray: return np.imag(array) - def inv(self, tensor: np.array) -> np.array: + def inv(self, tensor: np.ndarray) -> np.ndarray: return np.linalg.inv(tensor) - def is_trainable(self, tensor: np.array) -> bool: # pylint: disable=unused-argument + def is_trainable(self, tensor: np.ndarray) -> bool: # pylint: disable=unused-argument return False - def lgamma(self, x: np.array) -> np.array: + def lgamma(self, x: np.ndarray) -> np.ndarray: return np.array([mlgamma(v) for v in x]) - def log(self, x: np.array) -> np.array: + def log(self, x: np.ndarray) -> np.ndarray: return np.log(x) @Autocast() def matmul( self, - a: np.array, - b: np.array, + a: np.ndarray, + b: np.ndarray, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, - ) -> np.array: + ) -> np.ndarray: a = a.T if transpose_a else a b = b.T if transpose_b else b a = np.conj(a) if adjoint_a else a b = np.conj(b) if adjoint_b else b return np.matmul(a, b) - def make_complex(self, real: np.array, imag: np.array) -> np.array: + def make_complex(self, real: np.ndarray, imag: np.ndarray) -> np.ndarray: return real + 1j * imag @Autocast() - def matvec(self, a: np.array, b: np.array, transpose_a=False, adjoint_a=False) -> np.array: + def matvec( + self, a: np.ndarray, b: np.ndarray, transpose_a=False, adjoint_a=False + ) -> np.ndarray: return self.matmul(a, b, transpose_a, adjoint_a) @Autocast() - def maximum(self, a: np.array, b: np.array) -> np.array: + def maximum(self, a: np.ndarray, b: np.ndarray) -> np.ndarray: return np.maximum(a, b) @Autocast() - def minimum(self, a: np.array, b: np.array) -> np.array: + def minimum(self, a: np.ndarray, b: np.ndarray) -> np.ndarray: return np.minimum(a, b) def new_variable( @@ -242,60 +246,60 @@ def new_variable( def new_constant(self, value, name: str, dtype=np.float64): # pylint: disable=unused-argument return np.array(value, dtype=dtype) - def norm(self, array: np.array) -> np.array: + def norm(self, array: np.ndarray) -> np.ndarray: return np.linalg.norm(array) - def ones(self, shape: Sequence[int], dtype=np.float64) -> np.array: + def ones(self, shape: Sequence[int], dtype=np.float64) -> np.ndarray: return np.ones(shape, dtype=dtype) - def ones_like(self, array: np.array) -> np.array: + def ones_like(self, array: np.ndarray) -> np.ndarray: return np.ones(array.shape, dtype=array.dtype) @Autocast() - def outer(self, array1: np.array, array2: np.array) -> np.array: + def outer(self, array1: np.ndarray, array2: np.ndarray) -> np.ndarray: return np.tensordot(array1, array2, [[], []]) def pad( self, - array: np.array, + array: np.ndarray, paddings: Sequence[Tuple[int, int]], mode="CONSTANT", constant_values=0, - ) -> np.array: + ) -> np.ndarray: if mode == "CONSTANT": mode = "constant" return np.pad(array, paddings, mode, constant_values=constant_values) @staticmethod - def pinv(matrix: np.array) -> np.array: + def pinv(matrix: np.ndarray) -> np.ndarray: return np.linalg.pinv(matrix) @Autocast() - def pow(self, x: np.array, y: float) -> np.array: + def pow(self, x: np.ndarray, y: float) -> np.ndarray: return np.power(x, y) - def real(self, array: np.array) -> np.array: + def real(self, array: np.ndarray) -> np.ndarray: return np.real(array) - def reshape(self, array: np.array, shape: Sequence[int]) -> np.array: + def reshape(self, array: np.ndarray, shape: Sequence[int]) -> np.ndarray: return np.reshape(array, shape) - def sin(self, array: np.array) -> np.array: + def sin(self, array: np.ndarray) -> np.ndarray: return np.sin(array) - def sinh(self, array: np.array) -> np.array: + def sinh(self, array: np.ndarray) -> np.ndarray: return np.sinh(array) - def solve(self, matrix: np.array, rhs: np.array) -> np.array: + def solve(self, matrix: np.ndarray, rhs: np.ndarray) -> np.ndarray: if len(rhs.shape) == len(matrix.shape) - 1: rhs = np.expand_dims(rhs, -1) return np.linalg.solve(matrix, rhs)[..., 0] return np.linalg.solve(matrix, rhs) - def sqrt(self, x: np.array, dtype=None) -> np.array: + def sqrt(self, x: np.ndarray, dtype=None) -> np.ndarray: return np.sqrt(self.cast(x, dtype)) - def sum(self, array: np.array, axes: Sequence[int] = None): + def sum(self, array: np.ndarray, axes: Sequence[int] = None): if axes is None: return np.sum(array) @@ -305,29 +309,29 @@ def sum(self, array: np.array, axes: Sequence[int] = None): return ret @Autocast() - def tensordot(self, a: np.array, b: np.array, axes: List[int]) -> np.array: + def tensordot(self, a: np.ndarray, b: np.ndarray, axes: List[int]) -> np.ndarray: return np.tensordot(a, b, axes) - def tile(self, array: np.array, repeats: Sequence[int]) -> np.array: + def tile(self, array: np.ndarray, repeats: Sequence[int]) -> np.ndarray: return np.tile(array, repeats) - def trace(self, array: np.array, dtype=None) -> np.array: + def trace(self, array: np.ndarray, dtype=None) -> np.ndarray: return self.cast(np.trace(array), dtype) - def transpose(self, a: np.array, perm: Sequence[int] = None) -> np.array: + def transpose(self, a: np.ndarray, perm: Sequence[int] = None) -> Optional[np.ndarray]: if a is None: return None # TODO: remove and address None inputs where tranpose is used return np.transpose(a, axes=perm) @Autocast() - def update_tensor(self, tensor: np.array, indices: np.array, values: np.array): + def update_tensor(self, tensor: np.ndarray, indices: np.ndarray, values: np.ndarray): ret = deepcopy(tensor) for n_index, index in enumerate(indices): ret[tuple(index)] = values[n_index] return ret @Autocast() - def update_add_tensor(self, tensor: np.array, indices: np.array, values: np.array): + def update_add_tensor(self, tensor: np.ndarray, indices: np.ndarray, values: np.ndarray): # https://stackoverflow.com/questions/65734836/numpy-equivalent-to-tf-tensor-scatter-nd-add-method indices = np.array(indices) # figure out why we need this indices = tuple(indices.reshape(-1, indices.shape[-1]).T) @@ -335,10 +339,10 @@ def update_add_tensor(self, tensor: np.array, indices: np.array, values: np.arra np.add.at(ret, indices, values) return ret - def zeros(self, shape: Sequence[int], dtype=np.float64) -> np.array: + def zeros(self, shape: Sequence[int], dtype=np.float64) -> np.ndarray: return np.zeros(shape, dtype=dtype) - def zeros_like(self, array: np.array) -> np.array: + def zeros_like(self, array: np.ndarray) -> np.ndarray: return np.zeros(np.array(array).shape, dtype=array.dtype) def map_fn(self, func, elements): @@ -380,18 +384,18 @@ def prob(self, x): return Generator(loc, scale_tril) @staticmethod - def eigvals(tensor: np.array) -> np.ndarray: + def eigvals(tensor: np.ndarray) -> np.ndarray: return np.linalg.eigvals(tensor) @staticmethod - def xlogy(x: np.array, y: np.array) -> np.ndarray: + def xlogy(x: np.ndarray, y: np.ndarray) -> np.ndarray: return scipy_xlogy(x, y) @staticmethod - def eigh(tensor: np.array) -> np.ndarray: + def eigh(tensor: np.ndarray) -> tuple: return np.linalg.eigh(tensor) - def sqrtm(self, tensor: np.array, rtol=1e-05, atol=1e-08) -> np.ndarray: + def sqrtm(self, tensor: np.ndarray, rtol=1e-05, atol=1e-08) -> np.ndarray: if np.allclose(tensor, 0, rtol=rtol, atol=atol): return self.zeros_like(tensor) return scipy_sqrtm(tensor) @@ -405,8 +409,8 @@ def DefaultEuclideanOptimizer() -> None: return None def hermite_renormalized( - self, A: np.array, B: np.array, C: np.array, shape: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: Tuple[int] + ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the denominator rather than :math:`n!`. It computes all the amplitudes within the @@ -444,20 +448,20 @@ def hermite_renormalized( return G def hermite_renormalized_batch( - self, A: np.array, B: np.array, C: np.array, shape: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: Tuple[int] + ) -> np.ndarray: G = vanilla_batch(tuple(shape), A, B, C) return G def hermite_renormalized_binomial( self, - A: np.array, - B: np.array, - C: np.array, + A: np.ndarray, + B: np.ndarray, + C: np.ndarray, shape: Tuple[int], max_l2: Optional[float], global_cutoff: Optional[int], - ) -> np.array: + ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the denominator rather than :math:`n!`. The computation fills a tensor of given shape @@ -486,7 +490,7 @@ def hermite_renormalized_binomial( return G - def reorder_AB_bargmann(self, A: np.array, B: np.array) -> Tuple[np.array, np.array]: + def reorder_AB_bargmann(self, A: np.ndarray, B: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: r"""In mrmustard.math.numba.compactFock~ dimensions of the Fock representation are ordered like [mode0,mode0,mode1,mode1,...] while in mrmustard.physics.bargmann the ordering is [mode0,mode1,...,mode0,mode1,...]. Here we reorder A and B. """ @@ -497,14 +501,17 @@ def reorder_AB_bargmann(self, A: np.array, B: np.array) -> Tuple[np.array, np.ar return A, B def hermite_renormalized_diagonal( - self, A: np.array, B: np.array, C: np.array, cutoffs: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + ) -> np.ndarray: + r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~ + Then, calculate the required renormalized multidimensional Hermite polynomial. + """ A, B = self.reorder_AB_bargmann(A, B) return self.hermite_renormalized_diagonal_reorderedAB(A, B, C, cutoffs=cutoffs) def hermite_renormalized_diagonal_reorderedAB( - self, A: np.array, B: np.array, C: np.array, cutoffs: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the denominator rather than :math:`n!`. Note the minus sign in front of ``A``. @@ -526,15 +533,15 @@ def hermite_renormalized_diagonal_reorderedAB( return poly0 def hermite_renormalized_diagonal_batch( - self, A: np.array, B: np.array, C: np.array, cutoffs: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + ) -> np.ndarray: r"""Same as hermite_renormalized_diagonal but works for a batch of different B's.""" A, B = self.reorder_AB_bargmann(A, B) return self.hermite_renormalized_diagonal_reorderedAB_batch(A, B, C, cutoffs=cutoffs) def hermite_renormalized_diagonal_reorderedAB_batch( - self, A: np.array, B: np.array, C: np.array, cutoffs: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + ) -> np.ndarray: r"""Same as hermite_renormalized_diagonal_reorderedAB but works for a batch of different B's. Args: @@ -551,8 +558,8 @@ def hermite_renormalized_diagonal_reorderedAB_batch( return poly0 def hermite_renormalized_1leftoverMode( - self, A: np.array, B: np.array, C: np.array, cutoffs: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + ) -> np.ndarray: r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~ Then, calculate the required renormalized multidimensional Hermite polynomial. """ @@ -560,8 +567,8 @@ def hermite_renormalized_1leftoverMode( return self.hermite_renormalized_1leftoverMode_reorderedAB(A, B, C, cutoffs=cutoffs) def hermite_renormalized_1leftoverMode_reorderedAB( - self, A: np.array, B: np.array, C: np.array, cutoffs: Tuple[int] - ) -> np.array: + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the denominator rather than :math:`n!`. Note the minus sign in front of ``A``. @@ -582,10 +589,12 @@ def hermite_renormalized_1leftoverMode_reorderedAB( poly0, _, _, _, _ = hermite_multidimensional_1leftoverMode(A, B, C, cutoffs) return poly0 + @staticmethod def getitem(tensor, *, key): value = np.array(tensor)[key] return value + @staticmethod def setitem(tensor, value, *, key): _tensor = np.array(tensor) value = np.array(value) diff --git a/mrmustard/utils/settings.py b/mrmustard/utils/settings.py index b91677305..f79377753 100644 --- a/mrmustard/utils/settings.py +++ b/mrmustard/utils/settings.py @@ -15,6 +15,7 @@ """A module containing the settings. """ +from typing import Any import os from rich import print import rich.table @@ -31,7 +32,7 @@ class ImmutableSetting: name (str): the name of this setting """ - def __init__(self, value: any, name: str) -> None: + def __init__(self, value: Any, name: str) -> None: self._value = value self._name = name self._is_immutable = False diff --git a/polyval2d-high-precision b/polyval2d-high-precision new file mode 160000 index 000000000..7c4a4afcd --- /dev/null +++ b/polyval2d-high-precision @@ -0,0 +1 @@ +Subproject commit 7c4a4afcd28331c92a2e42c3ab0bd1fe01d076c9