From 5619ac31ffe11421ee0cb09ff3f461ba018d96cb Mon Sep 17 00:00:00 2001 From: joanglaunes <34514333+joanglaunes@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:31:10 +0100 Subject: [PATCH 1/5] [ci skip] Update code_gen_utils.py changed error message regarding infinity and float16 ; for issue #393. --- keopscore/keopscore/utils/code_gen_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keopscore/keopscore/utils/code_gen_utils.py b/keopscore/keopscore/utils/code_gen_utils.py index 81d5fa387..3650d7a53 100644 --- a/keopscore/keopscore/utils/code_gen_utils.py +++ b/keopscore/keopscore/utils/code_gen_utils.py @@ -270,7 +270,7 @@ def infinity(dtype): code = "( 1.0/0.0 )" else: KeOps_Error( - "only float and double dtypes are implemented in new python engine for now" + "Using infinity in formula currently only works with float and double dtypes." ) return c_variable(dtype, code) From 17439b9aa1a4fc2b04391be92ad6dfb601db8de2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20Glaune=CC=80s?= Date: Wed, 30 Oct 2024 18:23:37 +0100 Subject: [PATCH 2/5] fixed ndim attribute of LazyTensor (issue #376) --- pykeops/pykeops/common/lazy_tensor.py | 120 +++++++++--------- .../test_soft_dtw_kernel_dissmatrix.py | 2 +- 2 files changed, 63 insertions(+), 59 deletions(-) diff --git a/pykeops/pykeops/common/lazy_tensor.py b/pykeops/pykeops/common/lazy_tensor.py index 791c5a2da..82ece5225 100644 --- a/pykeops/pykeops/common/lazy_tensor.py +++ b/pykeops/pykeops/common/lazy_tensor.py @@ -49,7 +49,7 @@ class GenericLazyTensor: symbolic_variables = () formula = None formula2 = None - ndim = None + inner_dim = None tools = None Genred = None KernelSolve = None @@ -130,15 +130,15 @@ def __init__(self, x=None, axis=None): self.symbolic_variables = (x,) self.ind = x[0] - self.ndim = x[1] + self.inner_dim = x[1] self.axis = x[2] - self.formula = "VarSymb({},{},{})".format(x[0], self.ndim, self.axis) + self.formula = "VarSymb({},{},{})".format(x[0], self.inner_dim, self.axis) return # That's it! # Integer constants are best handled directly by the compiler elif typex == int: self.formula = "IntCst(" + str(x) + ")" - self.ndim = 1 + self.inner_dim = 1 self.axis = 2 return # That's it! @@ -158,9 +158,9 @@ def __init__(self, x=None, axis=None): ) self.variables = (x,) - self.ndim = len(x) + self.inner_dim = len(x) self.axis = 2 - self.formula = "Var({},{},2)".format(id(x), self.ndim) + self.formula = "Var({},{},2)".format(id(x), self.inner_dim) return # That's it! else: self._dtype = self.tools.dtypename(self.tools.dtype(x)) @@ -228,9 +228,9 @@ def __init__(self, x=None, axis=None): x = self.tools.view(x, x.shape) self.variables = (x,) - self.ndim = x.shape[-1] + self.inner_dim = x.shape[-1] self.axis = axis - self.formula = "Var({},{},{})".format(id(x), self.ndim, self.axis) + self.formula = "Var({},{},{})".format(id(x), self.inner_dim, self.axis) if axis == 0: self.ni = x.shape[-2] @@ -247,9 +247,9 @@ def __init__(self, x=None, axis=None): "When 'x' is encoded as a 1D or 0D array, 'axis' must be None or 2 (= Parameter variable)." ) self.variables = (x,) - self.ndim = x.shape[-1] + self.inner_dim = x.shape[-1] self.axis = 2 - self.formula = "Var({},{},2)".format(id(x), self.ndim) + self.formula = "Var({},{},2)".format(id(x), self.inner_dim) else: raise ValueError( @@ -449,7 +449,7 @@ def unary( ) if not dimres: - dimres = self.ndim + dimres = self.inner_dim res = self.init(is_complex) # Copy of self, without a formula if opt_arg2 is not None: @@ -460,7 +460,7 @@ def unary( res.formula = "{}({},{})".format(operation, self.formula, opt_arg) else: res.formula = "{}({})".format(operation, self.formula) - res.ndim = dimres + res.inner_dim = dimres return res def binary( @@ -502,33 +502,33 @@ def binary( # By default, the dimension of the output variable is the max of the two operands: if not dimres: - dimres = max(self.ndim, other.ndim) + dimres = max(self.inner_dim, other.inner_dim) if dimcheck == "same": - if self.ndim != other.ndim: + if self.inner_dim != other.inner_dim: raise ValueError( "Operation {} expects inputs of the same dimension. ".format( operation ) - + "Received {} and {}.".format(self.ndim, other.ndim) + + "Received {} and {}.".format(self.inner_dim, other.inner_dim) ) elif dimcheck == "sameor1": - if self.ndim != other.ndim and self.ndim != 1 and other.ndim != 1: + if self.inner_dim != other.inner_dim and self.inner_dim != 1 and other.inner_dim != 1: raise ValueError( "Operation {} expects inputs of the same dimension or dimension 1. ".format( operation ) - + "Received {} and {}.".format(self.ndim, other.ndim) + + "Received {} and {}.".format(self.inner_dim, other.inner_dim) ) elif dimcheck == "vecand1": - if other.ndim != 1: + if other.inner_dim != 1: raise ValueError( "Operation {} expects a vector and a scalar input (of dimension 1). ".format( operation ) - + "Received {} and {}.".format(self.ndim, other.ndim) + + "Received {} and {}.".format(self.inner_dim, other.inner_dim) ) elif dimcheck != None: @@ -538,7 +538,7 @@ def binary( other, is_complex=is_complex ) # Merge the attributes and variables of both operands - res.ndim = dimres + res.inner_dim = dimres if not rversion: lformula, rformula = self.formula, other.formula @@ -561,7 +561,7 @@ def binary( else: res.formula = "{}({}, {})".format(operation, lformula, rformula) - if operation == "*" and other.formula[:3] == "Var" and other.ndim > 100: + if operation == "*" and other.formula[:3] == "Var" and other.inner_dim > 100: res.rec_multVar_highdim = (self, other) return res @@ -598,27 +598,27 @@ def ternary( # By default, the dimension of the output variable is the max of the three operands: if not dimres: - dimres = max(self.ndim, other1.ndim, other2.ndim) + dimres = max(self.inner_dim, other1.inner_dim, other2.inner_dim) if dimcheck == "same": - if (self.ndim != other1.ndim) or (self.ndim != other2.ndim): + if (self.inner_dim != other1.inner_dim) or (self.inner_dim != other2.inner_dim): raise ValueError( "Operation {} expects inputs of the same dimension. ".format( operation ) + "Received {}, {} and {}.".format( - self.ndim, other1.ndim, other2.ndim + self.inner_dim, other1.inner_dim, other2.inner_dim ) ) elif dimcheck == "sameor1": - if not same_or_one_test(self.ndim, other1.ndim, other2.ndim): + if not same_or_one_test(self.inner_dim, other1.inner_dim, other2.inner_dim): raise ValueError( "Operation {} expects inputs of the same dimension or dimension 1. ".format( operation ) + "Received {}, {} and {}.".format( - self.ndim, other1.ndim, other2.ndim + self.inner_dim, other1.inner_dim, other2.inner_dim ) ) @@ -628,7 +628,7 @@ def ternary( res = self.join( other1.join(other2) ) # Merge the attributes and variables of operands - res.ndim = dimres + res.inner_dim = dimres if opt_arg is not None: if hasattr(opt_arg, "__GenericLazyTensor__"): @@ -741,7 +741,7 @@ def reduction( kwargs_init, kwargs_call = self.separate_kwargs(kwargs) res.kwargs = kwargs_call - res.ndim = self.ndim + res.inner_dim = self.inner_dim if reduction_op == "Sum" and hasattr(self, "rec_multVar_highdim"): # this means we have detected that the reduction is of the form Sum(F*V) with V a high dimension variable. if res.axis != self.rec_multVar_highdim[1].axis: @@ -856,7 +856,7 @@ def solve(self, other, var=None, call=True, **kwargs): # we define var as a new symbolic variable with same dimension as other # and we assume axis of var is same as axis of reduction varindex = self.new_variable_index() - var = self.lt_constructor((varindex, other.ndim, axis)) + var = self.lt_constructor((varindex, other.inner_dim, axis)) res = self * var else: # var is given and must be a symbolic variable which is already inside self @@ -873,9 +873,9 @@ def solve(self, other, var=None, call=True, **kwargs): kwargs_init, res.kwargs = self.separate_kwargs(kwargs) - res.ndim = self.ndim + res.inner_dim = self.inner_dim - if other.ndim > 100: + if other.inner_dim > 100: res.rec_multVar_highdim = varindex else: res.rec_multVar_highdim = None @@ -996,8 +996,8 @@ def _shape(self): btch = () if self.batchdims is None else self.batchdims ni = 1 if self.ni is None else self.ni nj = 1 if self.nj is None else self.nj - ndim = 1 if self.ndim is None else self.ndim - return btch + (ni, nj, ndim) + inner_dim = 1 if self.inner_dim is None else self.inner_dim + return btch + (ni, nj, inner_dim) @property def shape(self): @@ -1013,6 +1013,10 @@ def dim(self): Just as in PyTorch, returns the number of dimensions of a :class:`LazyTensor`. """ return len(self._shape) + + @property + def ndim(self): + return self.dim() @property def nbatchdims(self): @@ -1353,12 +1357,12 @@ def __pow__(self, other): other = self.lt_constructor(other) if hasattr(other, "__GenericLazyTensor__"): - if other.ndim == 1 or other.ndim == self.ndim: + if other.inner_dim == 1 or other.inner_dim == self.inner_dim: return self.binary(other, "Powf", dimcheck=None) else: raise ValueError( "Incompatible dimensions for the LazyTensor and its exponent: " - + "{} and {}.".format(self.ndim, other.ndim) + + "{} and {}.".format(self.inner_dim, other.inner_dim) ) else: raise ValueError( @@ -1564,11 +1568,11 @@ def weightedsqnorm(self, other): if not hasattr(other, "__GenericLazyTensor__"): other = self.lt_constructor(other) - if other.ndim not in (1, self.ndim, self.ndim**2): + if other.inner_dim not in (1, self.inner_dim, self.inner_dim**2): raise ValueError( "Squared norm weights should be of size 1 (scalar), " + "D (diagonal) or D^2 (full symmetric tensor), but received " - + "{} with D={}.".format(other.ndim, self.ndim) + + "{} with D={}.".format(other.inner_dim, self.inner_dim) ) return self.binary( @@ -1592,7 +1596,7 @@ def difference_matrix(self, other): return self.binary( other, "DifferenceMatrix", - dimres=(other.ndim * self.ndim), + dimres=(other.inner_dim * self.inner_dim), dimcheck=None, ) @@ -1620,9 +1624,9 @@ def elem(self, i): """ if type(i) is not int: raise ValueError("Elem indexing is only supported for integer indices.") - if i < 0 or i >= self.ndim: + if i < 0 or i >= self.inner_dim: raise ValueError( - "Index i={} is out of bounds [0,D) = [0,{}).".format(i, self.ndim) + "Index i={} is out of bounds [0,D) = [0,{}).".format(i, self.inner_dim) ) return self.unary("Elem", dimres=1, opt_arg=i) @@ -1635,9 +1639,9 @@ def extract(self, i, d): """ if (type(i) is not int) or (type(d) is not int): raise ValueError("Indexing is only supported for integer indices.") - if i < 0 or i >= self.ndim: + if i < 0 or i >= self.inner_dim: raise ValueError("Starting index is out of bounds.") - if d < 1 or i + d > self.ndim: + if d < 1 or i + d > self.inner_dim: raise ValueError("Slice dimension is out of bounds.") return self.unary("Extract", dimres=d, opt_arg=i, opt_arg2=d) @@ -1678,7 +1682,7 @@ def __getitem__(self, key): if key.start is None: key = slice(0, key.stop) if key.stop is None: - key = slice(key.start, self.ndim) + key = slice(key.start, self.inner_dim) return self.extract(key.start, key.stop - key.start) elif isinstance(key, int): return self.elem(key) @@ -1698,7 +1702,7 @@ def one_hot(self, D): raise ValueError( "One-hot encoding expects an integer dimension of the output vector." ) - if self.ndim != 1: + if self.inner_dim != 1: raise ValueError("One-hot encoding is only supported for scalar formulas.") return self.unary("OneHot", dimres=D, opt_arg=D) @@ -1710,7 +1714,7 @@ def bspline(self, x, k=0): :param k: a non-negative integer. """ return self.binary( - x, "BSpline", dimres=(self.ndim - k - 1), dimcheck="vecand1", opt_arg=f"{k}" + x, "BSpline", dimres=(self.inner_dim - k - 1), dimcheck="vecand1", opt_arg=f"{k}" ) def concat(self, other): @@ -1721,7 +1725,7 @@ def concat(self, other): the concatenation of ``x`` and ``y`` along their last dimension. """ return self.binary( - other, "Concat", dimres=(self.ndim + other.ndim), dimcheck=None + other, "Concat", dimres=(self.inner_dim + other.inner_dim), dimcheck=None ) @staticmethod @@ -1780,7 +1784,7 @@ def matvecmult(self, other): the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( - other, "MatVecMult", dimres=(self.ndim // other.ndim), dimcheck=None + other, "MatVecMult", dimres=(self.inner_dim // other.inner_dim), dimcheck=None ) def vecmatmult(self, other): @@ -1795,7 +1799,7 @@ def vecmatmult(self, other): the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( - other, "VecMatMult", dimres=(other.ndim // self.ndim), dimcheck=None + other, "VecMatMult", dimres=(other.inner_dim // self.inner_dim), dimcheck=None ) def tensorprod(self, other): @@ -1810,7 +1814,7 @@ def tensorprod(self, other): the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( - other, "TensorProd", dimres=(other.ndim * self.ndim), dimcheck=None + other, "TensorProd", dimres=(other.inner_dim * self.inner_dim), dimcheck=None ) def keops_tensordot(self, other, dimfa, dimfb, contfa, contfb, *args): @@ -1910,7 +1914,7 @@ def keops_kron(self, other, dimfself, dimfother): return self.binary( other, "Kron", - dimres=(other.ndim * self.ndim), + dimres=(other.inner_dim * self.inner_dim), dimcheck=None, opt_arg=opt_arg, ) @@ -1929,7 +1933,7 @@ def grad(self, other, gradin): return self.binary( gradin, "Grad", - dimres=other.ndim, + dimres=other.inner_dim, dimcheck="same", opt_arg=other, opt_pos="middle", @@ -1949,7 +1953,7 @@ def diff(self, other, diffin): return self.binary( diffin, "Diff", - dimres=self.ndim, + dimres=self.inner_dim, dimcheck=None, opt_arg=other, opt_pos="middle", @@ -1968,7 +1972,7 @@ def factorize(self, other): return self.binary( other, "Factorize", - dimres=self.ndim, + dimres=self.inner_dim, dimcheck=None, ) @@ -1984,7 +1988,7 @@ def auto_factorize(self): """ return self.unary( "AutoFactorize", - dimres=self.ndim, + dimres=self.inner_dim, ) def grad_matrix(self, other): @@ -2000,7 +2004,7 @@ def grad_matrix(self, other): """ return self.unary( "GradMatrix", - dimres=self.ndim * other.ndim, + dimres=self.inner_dim * other.inner_dim, opt_arg=other, ) @@ -2031,7 +2035,7 @@ def trace_operator(self, var): ) if var.ind >= 0: res.formula = res.formula.replace( - var.formula, f"VarSymb(-{var.ind}-1,{var.ndim},{var.axis})" + var.formula, f"VarSymb(-{var.ind}-1,{var.inner_dim},{var.axis})" ) return res @@ -2042,7 +2046,7 @@ def divergence(self, var): ``z = x.divergence(v)`` returns a :class:`LazyTensor` which encodes, symbolically, the divergence of ``x``, with respect to variable ``v``. - Inner dimensions of ``x`` (``x.ndim``) and ``v`` (``v.ndim``) must match. + Inner dimensions of ``x`` (``x.inner_dim``) and ``v`` (``v.inner_dim``) must match. """ return self.binary( var, @@ -2058,7 +2062,7 @@ def laplacian(self, var): ``z = x.laplacian(v)`` returns a :class:`LazyTensor` which encodes, symbolically, the laplacian of ``x``, with respect to variable ``v``. - Inner dimension of ``x`` (``x.ndim``) must equal 1. + Inner dimension of ``x`` (``x.inner_dim``) must equal 1. """ return self.binary( var, diff --git a/pykeops/pykeops/sandbox/test_soft_dtw_kernel_dissmatrix.py b/pykeops/pykeops/sandbox/test_soft_dtw_kernel_dissmatrix.py index 46cb12097..41eceb0f4 100644 --- a/pykeops/pykeops/sandbox/test_soft_dtw_kernel_dissmatrix.py +++ b/pykeops/pykeops/sandbox/test_soft_dtw_kernel_dissmatrix.py @@ -435,7 +435,7 @@ def fun_lazytensor_diffmatrix(x, y, gamma): x = LazyTensor(x[:, None, :]) y = LazyTensor(y[None, :, :]) dist_l2 = x.difference_matrix(y) ** 2 - sdtw = dist_l2.softdtw(gamma, input_shape=(x.ndim, y.ndim)) + sdtw = dist_l2.softdtw(gamma, input_shape=(x.inner_dim, y.inner_dim)) K = (-sdtw).exp() return K.sum(axis=1) From 8a7787e40bb6e45c66219f850e5d490a3db6ade4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20Glaune=CC=80s?= Date: Wed, 30 Oct 2024 18:25:17 +0100 Subject: [PATCH 3/5] [ci skip] linting --- pykeops/pykeops/common/lazy_tensor.py | 37 +++++++++++++++++++++------ 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/pykeops/pykeops/common/lazy_tensor.py b/pykeops/pykeops/common/lazy_tensor.py index 82ece5225..d6448fdca 100644 --- a/pykeops/pykeops/common/lazy_tensor.py +++ b/pykeops/pykeops/common/lazy_tensor.py @@ -132,7 +132,9 @@ def __init__(self, x=None, axis=None): self.ind = x[0] self.inner_dim = x[1] self.axis = x[2] - self.formula = "VarSymb({},{},{})".format(x[0], self.inner_dim, self.axis) + self.formula = "VarSymb({},{},{})".format( + x[0], self.inner_dim, self.axis + ) return # That's it! # Integer constants are best handled directly by the compiler @@ -514,7 +516,11 @@ def binary( ) elif dimcheck == "sameor1": - if self.inner_dim != other.inner_dim and self.inner_dim != 1 and other.inner_dim != 1: + if ( + self.inner_dim != other.inner_dim + and self.inner_dim != 1 + and other.inner_dim != 1 + ): raise ValueError( "Operation {} expects inputs of the same dimension or dimension 1. ".format( operation @@ -601,7 +607,9 @@ def ternary( dimres = max(self.inner_dim, other1.inner_dim, other2.inner_dim) if dimcheck == "same": - if (self.inner_dim != other1.inner_dim) or (self.inner_dim != other2.inner_dim): + if (self.inner_dim != other1.inner_dim) or ( + self.inner_dim != other2.inner_dim + ): raise ValueError( "Operation {} expects inputs of the same dimension. ".format( operation @@ -1013,7 +1021,7 @@ def dim(self): Just as in PyTorch, returns the number of dimensions of a :class:`LazyTensor`. """ return len(self._shape) - + @property def ndim(self): return self.dim() @@ -1714,7 +1722,11 @@ def bspline(self, x, k=0): :param k: a non-negative integer. """ return self.binary( - x, "BSpline", dimres=(self.inner_dim - k - 1), dimcheck="vecand1", opt_arg=f"{k}" + x, + "BSpline", + dimres=(self.inner_dim - k - 1), + dimcheck="vecand1", + opt_arg=f"{k}", ) def concat(self, other): @@ -1784,7 +1796,10 @@ def matvecmult(self, other): the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( - other, "MatVecMult", dimres=(self.inner_dim // other.inner_dim), dimcheck=None + other, + "MatVecMult", + dimres=(self.inner_dim // other.inner_dim), + dimcheck=None, ) def vecmatmult(self, other): @@ -1799,7 +1814,10 @@ def vecmatmult(self, other): the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( - other, "VecMatMult", dimres=(other.inner_dim // self.inner_dim), dimcheck=None + other, + "VecMatMult", + dimres=(other.inner_dim // self.inner_dim), + dimcheck=None, ) def tensorprod(self, other): @@ -1814,7 +1832,10 @@ def tensorprod(self, other): the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( - other, "TensorProd", dimres=(other.inner_dim * self.inner_dim), dimcheck=None + other, + "TensorProd", + dimres=(other.inner_dim * self.inner_dim), + dimcheck=None, ) def keops_tensordot(self, other, dimfa, dimfb, contfa, contfb, *args): From 0fc008adc1a37bd958f1e851fd53c572bfdb0d57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20Alexis=20Glaun=C3=A8s?= Date: Thu, 17 Apr 2025 18:12:59 +0200 Subject: [PATCH 4/5] revert back to original ndim meaning to avoid breaking compatibility, but added a warning. --- pykeops/pykeops/common/lazy_tensor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pykeops/pykeops/common/lazy_tensor.py b/pykeops/pykeops/common/lazy_tensor.py index d6448fdca..6572a375d 100644 --- a/pykeops/pykeops/common/lazy_tensor.py +++ b/pykeops/pykeops/common/lazy_tensor.py @@ -1024,7 +1024,8 @@ def dim(self): @property def ndim(self): - return self.dim() + KeOps_Warning("Note that x.ndim does not return the number of dimensions of the tensor x as in other frameworks (e.g. PyTorch), but instead returns the trailing dimension x.shape[-1], i.e. the inner dimension of the KeOps formula. If you want to return the number of dimensions of the tensor, use len(x.shape). Otherwise, to suppress this warning, use x.inner_dim instead of x.ndim, or deactivate verbosity with pykeops.set_verbose(False).") + return self.inner_dim() @property def nbatchdims(self): From 35b75a76e7b2a56f3cb5a8edeb856e48dc96d624 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joan=20Alexis=20Glaun=C3=A8s?= Date: Thu, 17 Apr 2025 18:13:55 +0200 Subject: [PATCH 5/5] applied linting --- pykeops/pykeops/benchmarks/benchmark_KNN.py | 8 ++++---- pykeops/pykeops/benchmarks/plot_accuracy.py | 4 ++-- .../benchmarks/plot_benchmark_grad1convolutions.py | 4 ++-- .../pykeops/benchmarks/plot_benchmark_high_dimension.py | 6 +++--- pykeops/pykeops/benchmarks/plot_benchmark_invkernel.py | 6 +++--- .../pykeops/benchmarks/plot_benchmarks_convolutions_3D.py | 4 ++-- pykeops/pykeops/common/lazy_tensor.py | 4 +++- .../pykeops/examples/numpy/plot_generic_syntax_numpy.py | 2 +- pykeops/pykeops/examples/numpy/plot_gpu_select_numpy.py | 2 +- pykeops/pykeops/examples/numpy/plot_grid_cluster_numpy.py | 2 +- pykeops/pykeops/examples/numpy/plot_test_ArgKMin.py | 2 +- .../pykeops/examples/numpy/plot_test_invkernel_numpy.py | 2 +- pykeops/pykeops/examples/pytorch/plot_advanced_formula.py | 2 +- .../pykeops/examples/pytorch/plot_anisotropic_kernels.py | 2 +- .../pykeops/examples/pytorch/plot_gpu_select_example.py | 4 ++-- .../pykeops/examples/pytorch/plot_grid_cluster_pytorch.py | 2 +- .../pykeops/examples/pytorch/plot_test_invkernel_torch.py | 2 +- pykeops/pykeops/sandbox/test_gpu_cpu2.py | 2 +- .../pykeops/tutorials/a_LazyTensors/plot_lazytensors_a.py | 2 +- .../pykeops/tutorials/a_LazyTensors/plot_lazytensors_c.py | 2 +- pykeops/pykeops/tutorials/backends/plot_gpytorch.py | 4 ++-- pykeops/pykeops/tutorials/backends/plot_scipy.py | 6 +++--- .../interpolation/plot_RBF_interpolation_torch.py | 6 +++--- pykeops/pykeops/tutorials/kmeans/plot_kmeans_numpy.py | 6 +++--- pykeops/pykeops/tutorials/kmeans/plot_kmeans_torch.py | 6 +++--- pykeops/pykeops/tutorials/knn/plot_knn_mnist.py | 4 ++-- pykeops/pykeops/tutorials/knn/plot_knn_numpy.py | 2 +- pykeops/pykeops/tutorials/knn/plot_knn_torch.py | 2 +- 28 files changed, 51 insertions(+), 49 deletions(-) diff --git a/pykeops/pykeops/benchmarks/benchmark_KNN.py b/pykeops/pykeops/benchmarks/benchmark_KNN.py index 3ac22c6fe..e4147ac4e 100644 --- a/pykeops/pykeops/benchmarks/benchmark_KNN.py +++ b/pykeops/pykeops/benchmarks/benchmark_KNN.py @@ -2,7 +2,7 @@ K-Nearest Neighbors search ========================================= -We compare the performances of PyTorch, JAX, KeOps, Scikit-Learn and FAISS (when applicable) +We compare the performances of PyTorch, JAX, KeOps, Scikit-Learn and FAISS (when applicable) for K-NN queries on random samples and standard datasets. A detailed discussion of these results can be found in Section 5.2 of our `NeurIPS 2020 paper `_. @@ -17,7 +17,7 @@ it provides the only competitive run times in the many settings that are not supported by existing C++ libraries. -In this demo, we often use exact **bruteforce** computations +In this demo, we often use exact **bruteforce** computations (tensorized for PyTorch/JAX, on-the-fly for KeOps) and do not leverage any quantization scheme or multiscale decomposition of the distance matrix. @@ -33,9 +33,9 @@ .. note:: Note that timings are always subject to change: libraries and hardware get better with time. - If you find a way of improving these benchmarks, please + If you find a way of improving these benchmarks, please `let us know `_! - + """ ############################################## diff --git a/pykeops/pykeops/benchmarks/plot_accuracy.py b/pykeops/pykeops/benchmarks/plot_accuracy.py index 06e3a1dce..c5c014a4e 100644 --- a/pykeops/pykeops/benchmarks/plot_accuracy.py +++ b/pykeops/pykeops/benchmarks/plot_accuracy.py @@ -3,8 +3,8 @@ =========================================================== We test various options of KeOps regarding accuracy of computations. - - + + """ ############################################## diff --git a/pykeops/pykeops/benchmarks/plot_benchmark_grad1convolutions.py b/pykeops/pykeops/benchmarks/plot_benchmark_grad1convolutions.py index 6859d3784..9b8ebb297 100644 --- a/pykeops/pykeops/benchmarks/plot_benchmark_grad1convolutions.py +++ b/pykeops/pykeops/benchmarks/plot_benchmark_grad1convolutions.py @@ -11,8 +11,8 @@ where :math:`f` is a Gauss or Cauchy or Laplace or inverse multiquadric kernel. See e.g. `wikipedia `_ - - + + """ ##################################################################### diff --git a/pykeops/pykeops/benchmarks/plot_benchmark_high_dimension.py b/pykeops/pykeops/benchmarks/plot_benchmark_high_dimension.py index 9a167fbb6..67f07a515 100644 --- a/pykeops/pykeops/benchmarks/plot_benchmark_high_dimension.py +++ b/pykeops/pykeops/benchmarks/plot_benchmark_high_dimension.py @@ -2,11 +2,11 @@ Benchmarking Gaussian convolutions in high dimensions =========================================================== -Let's compare the performances of PyTorch and KeOps on +Let's compare the performances of PyTorch and KeOps on simple Gaussian RBF kernel products, as the dimension grows. - - + + """ ############################################## diff --git a/pykeops/pykeops/benchmarks/plot_benchmark_invkernel.py b/pykeops/pykeops/benchmarks/plot_benchmark_invkernel.py index 50f908b5c..15ca13638 100644 --- a/pykeops/pykeops/benchmarks/plot_benchmark_invkernel.py +++ b/pykeops/pykeops/benchmarks/plot_benchmark_invkernel.py @@ -3,13 +3,13 @@ ========================================= This benchmark compares the performances of KeOps versus Numpy and Pytorch on a inverse matrix operation. It uses the functions :class:`torch.KernelSolve ` (see also :doc:`here <../_auto_examples/pytorch/plot_test_invkernel_torch>`) and :class:`numpy.KernelSolve ` (see also :doc:`here <../_auto_examples/numpy/plot_test_invkernel_numpy>`). - + In a nutshell, given :math:`x \in\mathbb R^{N\\times D}` and :math:`b \in \mathbb R^{N\\times D_v}`, we compute :math:`a \in \mathbb R^{N\\times D_v}` so that .. math:: b = (\\alpha\operatorname{Id} + K_{x,x}) a \quad \Leftrightarrow \quad a = (\\alpha\operatorname{Id}+ K_{x,x})^{-1} b - + where :math:`K_{x,x} = \Big[\exp(-\|x_i -x_j\|^2 / \sigma^2)\Big]_{i,j=1}^N`. The method is based on a conjugate gradient scheme. The benchmark tests various values of :math:`N \in [10, \cdots,10^6]`. @@ -18,7 +18,7 @@ using a **bruteforce** implementation and do not leverage any multiscale or low-rank (Nystroem/multipole) decomposition of the Kernel matrix. Going further, advanced strategies and solvers - are now available through the + are now available through the `GPyTorch `_ and `Falkon `_ libraries, which rely on a KeOps backend whenever relevant. diff --git a/pykeops/pykeops/benchmarks/plot_benchmarks_convolutions_3D.py b/pykeops/pykeops/benchmarks/plot_benchmarks_convolutions_3D.py index 528ea7966..241eb5ea8 100644 --- a/pykeops/pykeops/benchmarks/plot_benchmarks_convolutions_3D.py +++ b/pykeops/pykeops/benchmarks/plot_benchmarks_convolutions_3D.py @@ -2,12 +2,12 @@ Scaling up Gaussian convolutions on 3D point clouds =========================================================== -Let's compare the performance of PyTorch and KeOps on +Let's compare the performance of PyTorch and KeOps on simple Gaussian RBF kernel products, as the number of samples grows from 100 to 1,000,000. .. note:: - In this demo, we use exact **bruteforce** computations + In this demo, we use exact **bruteforce** computations (tensorized for PyTorch and online for KeOps), without leveraging any multiscale or low-rank (Nystroem/multipole) decomposition of the Kernel matrix. We are working on providing transparent support for these approximations in KeOps. diff --git a/pykeops/pykeops/common/lazy_tensor.py b/pykeops/pykeops/common/lazy_tensor.py index 6572a375d..3b763c67c 100644 --- a/pykeops/pykeops/common/lazy_tensor.py +++ b/pykeops/pykeops/common/lazy_tensor.py @@ -1024,7 +1024,9 @@ def dim(self): @property def ndim(self): - KeOps_Warning("Note that x.ndim does not return the number of dimensions of the tensor x as in other frameworks (e.g. PyTorch), but instead returns the trailing dimension x.shape[-1], i.e. the inner dimension of the KeOps formula. If you want to return the number of dimensions of the tensor, use len(x.shape). Otherwise, to suppress this warning, use x.inner_dim instead of x.ndim, or deactivate verbosity with pykeops.set_verbose(False).") + KeOps_Warning( + "Note that x.ndim does not return the number of dimensions of the tensor x as in other frameworks (e.g. PyTorch), but instead returns the trailing dimension x.shape[-1], i.e. the inner dimension of the KeOps formula. If you want to return the number of dimensions of the tensor, use len(x.shape). Otherwise, to suppress this warning, use x.inner_dim instead of x.ndim, or deactivate verbosity with pykeops.set_verbose(False)." + ) return self.inner_dim() @property diff --git a/pykeops/pykeops/examples/numpy/plot_generic_syntax_numpy.py b/pykeops/pykeops/examples/numpy/plot_generic_syntax_numpy.py index ee55a9153..ca6e647cf 100644 --- a/pykeops/pykeops/examples/numpy/plot_generic_syntax_numpy.py +++ b/pykeops/pykeops/examples/numpy/plot_generic_syntax_numpy.py @@ -1,5 +1,5 @@ """ -Sum reduction +Sum reduction ===================== """ diff --git a/pykeops/pykeops/examples/numpy/plot_gpu_select_numpy.py b/pykeops/pykeops/examples/numpy/plot_gpu_select_numpy.py index cb39afbe8..ed6b8c279 100644 --- a/pykeops/pykeops/examples/numpy/plot_gpu_select_numpy.py +++ b/pykeops/pykeops/examples/numpy/plot_gpu_select_numpy.py @@ -7,7 +7,7 @@ let's see how to select the card on which a KeOps operation will be performed. - + """ ############################################################### diff --git a/pykeops/pykeops/examples/numpy/plot_grid_cluster_numpy.py b/pykeops/pykeops/examples/numpy/plot_grid_cluster_numpy.py index ce674e355..048808156 100755 --- a/pykeops/pykeops/examples/numpy/plot_grid_cluster_numpy.py +++ b/pykeops/pykeops/examples/numpy/plot_grid_cluster_numpy.py @@ -5,7 +5,7 @@ This script showcases the use of the optional **ranges** argument to compute block-sparse reductions with **sub-quadratic time complexity**. - + """ ######################################################################## diff --git a/pykeops/pykeops/examples/numpy/plot_test_ArgKMin.py b/pykeops/pykeops/examples/numpy/plot_test_ArgKMin.py index 4c471e7cd..2f3dbf6ac 100644 --- a/pykeops/pykeops/examples/numpy/plot_test_ArgKMin.py +++ b/pykeops/pykeops/examples/numpy/plot_test_ArgKMin.py @@ -5,7 +5,7 @@ Using the :mod:`pykeops.numpy` API, we define a dataset of N points in :math:`\mathbb R^D` and compute for each point the indices of its K nearest neighbours (including itself). - + """ ############################################################### diff --git a/pykeops/pykeops/examples/numpy/plot_test_invkernel_numpy.py b/pykeops/pykeops/examples/numpy/plot_test_invkernel_numpy.py index 6187ffaca..4b460a452 100644 --- a/pykeops/pykeops/examples/numpy/plot_test_invkernel_numpy.py +++ b/pykeops/pykeops/examples/numpy/plot_test_invkernel_numpy.py @@ -6,7 +6,7 @@ using the **conjugate gradient solver** provided by :class:`numpy.KernelSolve `. - + """ ############################################################################### diff --git a/pykeops/pykeops/examples/pytorch/plot_advanced_formula.py b/pykeops/pykeops/examples/pytorch/plot_advanced_formula.py index 89333f8ca..21327f745 100644 --- a/pykeops/pykeops/examples/pytorch/plot_advanced_formula.py +++ b/pykeops/pykeops/examples/pytorch/plot_advanced_formula.py @@ -4,7 +4,7 @@ Let's write generic formulas using the KeOps syntax. - + """ #################################################################### diff --git a/pykeops/pykeops/examples/pytorch/plot_anisotropic_kernels.py b/pykeops/pykeops/examples/pytorch/plot_anisotropic_kernels.py index 871e42731..837cf964e 100644 --- a/pykeops/pykeops/examples/pytorch/plot_anisotropic_kernels.py +++ b/pykeops/pykeops/examples/pytorch/plot_anisotropic_kernels.py @@ -5,7 +5,7 @@ Let's see how to encode anisotropic kernels with a minimal amount of effort. - + """ ############################################## diff --git a/pykeops/pykeops/examples/pytorch/plot_gpu_select_example.py b/pykeops/pykeops/examples/pytorch/plot_gpu_select_example.py index 82e570a43..73d30f80c 100644 --- a/pykeops/pykeops/examples/pytorch/plot_gpu_select_example.py +++ b/pykeops/pykeops/examples/pytorch/plot_gpu_select_example.py @@ -1,13 +1,13 @@ """ ========= -Multi GPU +Multi GPU ========= On multi-device clusters, let's see how to select the card on which a KeOps operation will be performed. - + """ ############################################################### diff --git a/pykeops/pykeops/examples/pytorch/plot_grid_cluster_pytorch.py b/pykeops/pykeops/examples/pytorch/plot_grid_cluster_pytorch.py index c0d8251da..4db8dfe3a 100755 --- a/pykeops/pykeops/examples/pytorch/plot_grid_cluster_pytorch.py +++ b/pykeops/pykeops/examples/pytorch/plot_grid_cluster_pytorch.py @@ -5,7 +5,7 @@ This script showcases the use of the optional **ranges** argument to compute block-sparse reductions with **sub-quadratic time complexity**. - + """ ######################################################################## diff --git a/pykeops/pykeops/examples/pytorch/plot_test_invkernel_torch.py b/pykeops/pykeops/examples/pytorch/plot_test_invkernel_torch.py index 1bda412b9..ac1be1424 100644 --- a/pykeops/pykeops/examples/pytorch/plot_test_invkernel_torch.py +++ b/pykeops/pykeops/examples/pytorch/plot_test_invkernel_torch.py @@ -6,7 +6,7 @@ using the **conjugate gradient solver** provided by :class:`pykeops.torch.KernelSolve`. - + """ ############################################################################### diff --git a/pykeops/pykeops/sandbox/test_gpu_cpu2.py b/pykeops/pykeops/sandbox/test_gpu_cpu2.py index ce674e355..048808156 100644 --- a/pykeops/pykeops/sandbox/test_gpu_cpu2.py +++ b/pykeops/pykeops/sandbox/test_gpu_cpu2.py @@ -5,7 +5,7 @@ This script showcases the use of the optional **ranges** argument to compute block-sparse reductions with **sub-quadratic time complexity**. - + """ ######################################################################## diff --git a/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_a.py b/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_a.py index a736fe812..c0d5a1d7f 100644 --- a/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_a.py +++ b/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_a.py @@ -8,7 +8,7 @@ it alleviates the need for **huge intermediate variables** such as *kernel* or *distance* matrices in machine learning and computational geometry. - + """ ######################################################################### diff --git a/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_c.py b/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_c.py index 181073cc3..4d52a2a0b 100644 --- a/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_c.py +++ b/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_c.py @@ -4,7 +4,7 @@ ========================================================= This tutorial shows some advanced features of the LazyTensor class. - + """ import time diff --git a/pykeops/pykeops/tutorials/backends/plot_gpytorch.py b/pykeops/pykeops/tutorials/backends/plot_gpytorch.py index 20076f31c..e7e191c94 100644 --- a/pykeops/pykeops/tutorials/backends/plot_gpytorch.py +++ b/pykeops/pykeops/tutorials/backends/plot_gpytorch.py @@ -4,7 +4,7 @@ ================================= Out-of-the-box, KeOps only provides :ref:`limited support ` for -`Kriging `_ +`Kriging `_ or `Gaussian process regression `_: the :class:`KernelSolve ` operator implements a conjugate gradient solver for kernel linear systems... @@ -24,7 +24,7 @@ .. note:: - The GPytorch team has now integrated + The GPytorch team has now integrated `explicit KeOps kernels `_ within their repository: they are documented `in this tutorial `_ and make the handcrafted example below diff --git a/pykeops/pykeops/tutorials/backends/plot_scipy.py b/pykeops/pykeops/tutorials/backends/plot_scipy.py index f99192431..1825d780b 100644 --- a/pykeops/pykeops/tutorials/backends/plot_scipy.py +++ b/pykeops/pykeops/tutorials/backends/plot_scipy.py @@ -8,13 +8,13 @@ the `LinearOperator `_ class, which represents generic "Matrix-Vector" products -and can be plugged seamlessly in a `large collection `_ +and can be plugged seamlessly in a `large collection `_ of linear algebra routines. Crucially, KeOps :class:`pykeops.torch.LazyTensor` are now **fully compatible** with this interface. -As an example, let's see how to combine KeOps with a -`fast eigenproblem solver `_ +As an example, let's see how to combine KeOps with a +`fast eigenproblem solver `_ to compute **spectral coordinates** on a large 2D or 3D point cloud. .. note:: diff --git a/pykeops/pykeops/tutorials/interpolation/plot_RBF_interpolation_torch.py b/pykeops/pykeops/tutorials/interpolation/plot_RBF_interpolation_torch.py index 0d86b37b8..ea0a91b59 100644 --- a/pykeops/pykeops/tutorials/interpolation/plot_RBF_interpolation_torch.py +++ b/pykeops/pykeops/tutorials/interpolation/plot_RBF_interpolation_torch.py @@ -12,13 +12,13 @@ where :math:`K_{xx}` is a symmetric, positive definite linear operator defined through the :ref:`KeOps generic syntax ` and :math:`\alpha` is a nonnegative regularization parameter. -In the following script, we use it to solve large-scale `Kriging `_ +In the following script, we use it to solve large-scale `Kriging `_ (aka. `Gaussian process regression `_ or `generalized spline interpolation `_) problems with a **linear memory footprint**. - - + + """ ############################################################################################### diff --git a/pykeops/pykeops/tutorials/kmeans/plot_kmeans_numpy.py b/pykeops/pykeops/tutorials/kmeans/plot_kmeans_numpy.py index daab5c17c..16a22b2b4 100644 --- a/pykeops/pykeops/tutorials/kmeans/plot_kmeans_numpy.py +++ b/pykeops/pykeops/tutorials/kmeans/plot_kmeans_numpy.py @@ -5,17 +5,17 @@ The :meth:`pykeops.numpy.LazyTensor.argmin` reduction supported by KeOps :class:`pykeops.numpy.LazyTensor` allows us to perform **bruteforce nearest neighbor search** with four lines of code. -It can thus be used to implement a **large-scale** +It can thus be used to implement a **large-scale** `K-means clustering `_, **without memory overflows**. .. note:: - For large and high dimensional datasets, this script + For large and high dimensional datasets, this script **is outperformed by its PyTorch counterpart** which avoids transfers between CPU (host) and GPU (device) memories. - + """ ######################################################################### diff --git a/pykeops/pykeops/tutorials/kmeans/plot_kmeans_torch.py b/pykeops/pykeops/tutorials/kmeans/plot_kmeans_torch.py index 77a1909bb..c09f87c7c 100644 --- a/pykeops/pykeops/tutorials/kmeans/plot_kmeans_torch.py +++ b/pykeops/pykeops/tutorials/kmeans/plot_kmeans_torch.py @@ -5,16 +5,16 @@ The :meth:`pykeops.torch.LazyTensor.argmin` reduction supported by KeOps :class:`pykeops.torch.LazyTensor` allows us to perform **bruteforce nearest neighbor search** with four lines of code. -It can thus be used to implement a **large-scale** +It can thus be used to implement a **large-scale** `K-means clustering `_, **without memory overflows**. .. note:: - For large and high dimensional datasets, this script + For large and high dimensional datasets, this script **outperforms its NumPy counterpart** as it avoids transfers between CPU (host) and GPU (device) memories. - + """ ######################################################################## diff --git a/pykeops/pykeops/tutorials/knn/plot_knn_mnist.py b/pykeops/pykeops/tutorials/knn/plot_knn_mnist.py index dff7df933..92caaa137 100644 --- a/pykeops/pykeops/tutorials/knn/plot_knn_mnist.py +++ b/pykeops/pykeops/tutorials/knn/plot_knn_mnist.py @@ -5,9 +5,9 @@ The :mod:`.argKmin(K)` reduction supported by KeOps :class:`pykeops.torch.LazyTensor` allows us to perform **bruteforce k-nearest neighbors search** with four lines of code. -It can thus be used to implement a **large-scale** +It can thus be used to implement a **large-scale** `K-NN classifier `_, -**without memory overflows** on the +**without memory overflows** on the full `MNIST `_ dataset. diff --git a/pykeops/pykeops/tutorials/knn/plot_knn_numpy.py b/pykeops/pykeops/tutorials/knn/plot_knn_numpy.py index b9f6f3b96..a68fdcca2 100644 --- a/pykeops/pykeops/tutorials/knn/plot_knn_numpy.py +++ b/pykeops/pykeops/tutorials/knn/plot_knn_numpy.py @@ -5,7 +5,7 @@ The :meth:`pykeops.numpy.LazyTensor.argKmin` reduction supported by KeOps :class:`pykeops.numpy.LazyTensor` allows us to perform **bruteforce k-nearest neighbors search** with four lines of code. -It can thus be used to implement a **large-scale** +It can thus be used to implement a **large-scale** `K-NN classifier `_, **without memory overflows**. diff --git a/pykeops/pykeops/tutorials/knn/plot_knn_torch.py b/pykeops/pykeops/tutorials/knn/plot_knn_torch.py index 4e2303c70..610cb48a5 100644 --- a/pykeops/pykeops/tutorials/knn/plot_knn_torch.py +++ b/pykeops/pykeops/tutorials/knn/plot_knn_torch.py @@ -5,7 +5,7 @@ The :mod:`.argKmin(K)` reduction supported by KeOps :class:`pykeops.torch.LazyTensor` allows us to perform **bruteforce k-nearest neighbors search** with four lines of code. -It can thus be used to implement a **large-scale** +It can thus be used to implement a **large-scale** `K-NN classifier `_, **without memory overflows**.