From 1ae02226f17fd9517a8f2f612995b359255b70dc Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Mon, 11 Dec 2023 17:23:53 -0500 Subject: [PATCH 01/19] Try remove dist from cartesian bases --- dedalus/core/basis.py | 95 +++++++++++++++---------------------- dedalus/core/coords.py | 77 ++++++++++-------------------- dedalus/core/distributor.py | 22 ++++++--- dedalus/core/domain.py | 25 ++++++---- dedalus/core/evaluator.py | 2 +- dedalus/core/operators.py | 25 +++++----- dedalus/core/solvers.py | 3 +- 7 files changed, 111 insertions(+), 138 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index efb25e86..7d613d76 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -102,9 +102,10 @@ class Basis: def __init__(self, coords): self.coords = coords - self.dist = coords.dist - self.axis = coords.axis - self.domain = Domain(self.dist, bases=(self,)) + + @CachedMethod + def domain(self, dist): + return Domain(dist, (self,)) def clone_with(self, **new_kw): (_, *argnames), _, _, _, _, _, _ = inspect.getfullargspec(type(self).__init__) @@ -128,14 +129,6 @@ def __radd__(self, other): def __rmul__(self, other): return self.__mul__(other) - @property - def first_axis(self): - return self.axis - - @property - def last_axis(self): - return self.axis + self.dim - 1 - def grid_shape(self, scales): shape = np.array([int(np.ceil(s*n)) for s, n in zip(scales, self.shape)]) shape[np.array(self.shape) == 1] = 1 @@ -153,14 +146,6 @@ def elements_to_groups(self, grid_space, elements): # Subclasses must implement raise NotImplementedError - def global_grid_spacing(self, *args, **kwargs): - """Global grids spacings.""" - raise NotImplementedError - - def local_grid_spacing(self, *args, **kwargs): - """Local grids spacings.""" - raise NotImplementedError - def global_grids(self, scales): """Global grids.""" # Subclasses must implement @@ -362,55 +347,53 @@ def __init__(self, coord, size, bounds, dealias): def matrix_dependence(self, matrix_coupling): return matrix_coupling - @CachedMethod - def global_grid_spacing(self, axis, scale=None): - """Global grids spacings.""" - grid = self.global_grid(scale=scale) - return np.gradient(grid, axis=axis, edge_order=2) - - @CachedMethod - def local_grid_spacing(self, axis, scale=None): - """Local grids spacings.""" - global_spacing = self.global_grid_spacing(axis, scale=scale) - if scale is None: scale = 1 - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[axis] - return reshape_vector(np.ravel(global_spacing)[local_elements], dim=self.dist.dim, axis=axis) - - # Why do we need this? def global_grids(self, scales=None): """Global grids.""" - if scales == None: scales = (1,) + if scales == None: + scales = (1,) return (self.global_grid(scales[0]),) def global_grid(self, scale=None): """Global grid.""" - if scale == None: scale = 1 + if scale == None: + scale = 1 native_grid = self._native_grid(scale) problem_grid = self.COV.problem_coord(native_grid) - return reshape_vector(problem_grid, dim=self.dist.dim, axis=self.axis) + return problem_grid - # Why do we need this? - def local_grids(self, scales=None): + def local_grids(self, dist, scales=None): """Local grids.""" - if scales == None: scales = (1,) - return (self.local_grid(scales[0]),) + if scales == None: + scales = (1,) + return (self.local_grid(dist, scales[0]),) - def local_grid(self, scale=None): + def local_grid(self, dist, scale=None): """Local grid.""" if scale == None: scale = 1 - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.axis] - native_grid = self._native_grid(scale)[local_elements] + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale) + native_grid = self._native_grid(scale)[local_elements[dist.get_basis_axis(self)]] problem_grid = self.COV.problem_coord(native_grid) - return reshape_vector(problem_grid, dim=self.dist.dim, axis=self.axis) + return reshape_vector(problem_grid, dim=dist.dim, axis=dist.get_basis_axis(self)) - def local_modes(self): - """Local grid.""" - local_modes = self.local_elements()[0] - return reshape_vector(local_modes, dim=self.dist.dim, axis=self.axis) + def global_grid_spacing(self, scale=None): + """Global grid spacings.""" + grid = self.global_grid(scale=scale) + return np.gradient(grid, edge_order=2) - def local_elements(self): - local_elements = self.dist.coeff_layout.local_elements(self.domain, scales=1)[self.axis] - return (local_elements,) + def local_grid_spacing(self, dist, scale=None): + """Local grids spacings.""" + if scale is None: + scale = 1 + global_spacing = self.global_grid_spacing(scale=scale) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale) + local_spacing_flat = global_spacing[local_elements[dist.get_basis_axis(self)]] + out = reshape_vector(local_spacing_flat, dim=dist.dim, axis=dist.get_basis_axis(self)) + return out + + def local_modes(self, dist): + """Local grid.""" + local_elements = dist.coeff_layout.local_elements(self.domain(dist), scales=1) + return reshape_vector(local_elements[dist.get_basis_axis(self)], dim=dist.dim, axis=dist.get_basis_axis(self)) def _native_grid(self, scale): """Native flat global grid.""" @@ -830,7 +813,7 @@ def build_polynomial(dist, basis, n): if n < 0: n += basis.size P = dist.Field(bases=basis) - axis = basis.first_axis + axis = dist.get_basis_axis(basis) P['c'][axslice(axis, n, n+1)] = 1 return P @@ -2450,7 +2433,7 @@ def global_grid_radius(self, scale): def local_grid_radius(self, scale): r = self.radial_COV.problem_coord(self._native_radius_grid(scale)) - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.axis+1] + local_elements = self.dist.grid_layout.local_elements(self.domain(dist), scales=scale)[self.axis+1] return reshape_vector(r[local_elements], dim=self.dist.dim, axis=self.axis+1) def _native_radius_grid(self, scale): @@ -6124,11 +6107,11 @@ def cfl_spacing(self, velocity): basis = velocity.domain.get_basis(c) if basis: dealias = basis.dealias[0] - axis_spacing = basis.local_grid_spacing(i, dealias) * dealias + axis_spacing = basis.local_grid_spacing(self.dist, dealias) * dealias N = basis.grid_shape((dealias,))[0] if isinstance(basis, Jacobi) and basis.a == -1/2 and basis.b == -1/2: #Special case for ChebyshevT (a=b=-1/2) - local_elements = basis.dist.grid_layout.local_elements(basis.domain, scales=dealias)[i] + local_elements = self.dist.grid_layout.local_elements(basis.domain(self.dist), scales=dealias)[i] i = np.arange(N)[local_elements].reshape(axis_spacing.shape) theta = np.pi * (i + 1/2) / N axis_spacing[:] = dealias * basis.COV.stretch * np.sin(theta) * np.pi / N diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index a0fdb5ff..84558e4f 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -9,6 +9,7 @@ # Public interface __all__ = ['Coordinate', + 'DirectProduct', 'CartesianCoordinates', 'S2Coordinates', 'PolarCoordinates', @@ -35,22 +36,9 @@ def __getitem__(self, key): else: return self.coords[key] - def set_distributor(self, distributor): - self.dist = distributor - for coord in self.coords: - coord.dist = distributor - def check_bounds(self, coord, bounds): pass - @property - def first_axis(self): - return self.dist.coords.index(self.coords[0]) - - @property - def axis(self): - return self.dist.coords.index(self.coords[0]) - class Coordinate: dim = 1 @@ -72,18 +60,20 @@ def __eq__(self, other): def __hash__(self): return id(self) - @property - def axis(self): - return self.dist.coords.index(self) - def check_bounds(self, bounds): if self.cs == None: return else: self.cs.check_bounds(self, bounds) - def set_distributor(self, distributor): - self.dist = distributor - if self.cs: - self.cs.dist = distributor + +class DirectProduct(CoordinateSystem): + + def __init__(self, *coords): + print(coords) + self.coords = coords + self.dim = sum(coord.dim for coord in coords) + + + class CartesianCoordinates(CoordinateSystem): @@ -100,10 +90,10 @@ def __init__(self, *names, right_handed=True): def __str__(self): return '{' + ','.join([c.name for c in self.coords]) + '}' - def forward_intertwiner(self, axis, order, group): + def forward_intertwiner(self, first_axis, axis, order, group): return np.identity(self.dim**order) - def backward_intertwiner(self, axis, order, group): + def backward_intertwiner(self, first_axis, axis, order, group): return np.identity(self.dim**order) @CachedMethod @@ -156,12 +146,8 @@ def _U_backward(cls, order): """Unitary transform from spin to coord components.""" return cls._U_forward(order).T.conj() - @property - def axis(self): - return self.azimuth.axis - - def forward_intertwiner(self, axis, order, group): - subaxis = axis - self.axis + def forward_intertwiner(self, first_axis, axis, order, group): + subaxis = axis - first_axis if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -171,8 +157,8 @@ def forward_intertwiner(self, axis, order, group): else: raise ValueError("Invalid axis") - def backward_intertwiner(self, axis, order, group): - subaxis = axis - self.axis + def backward_intertwiner(self, first_axis, axis, order, group): + subaxis = axis - first_axis if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -214,12 +200,8 @@ def _U_backward(cls, order): """Unitary transform from spin to coord components.""" return cls._U_forward(order).T.conj() - @property - def axis(self): - return self.azimuth.axis - - def forward_intertwiner(self, axis, order, group): - subaxis = axis - self.axis + def forward_intertwiner(self, first_axis, axis, order, group): + subaxis = axis - first_axis if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -229,8 +211,8 @@ def forward_intertwiner(self, axis, order, group): else: raise ValueError("Invalid axis") - def backward_intertwiner(self, axis, order, group): - subaxis = axis - self.axis + def backward_intertwiner(self, first_axis, axis, order, group): + subaxis = axis - first_axis if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -296,10 +278,6 @@ def _Q_backward(cls, ell, order): # This may not rebust to having spin and reg orderings be different? return dedalus_sphere.spin_operators.Intertwiner(ell, indexing=cls.reg_ordering)(order) - @property - def axis(self): - return self.azimuth.axis - def check_bounds(self, coord, bounds): if coord == self.radius: if min(bounds) < 0: @@ -316,11 +294,6 @@ def sub_cs(self, other): else: return False return False - def set_distributor(self, distributor): - self.dist = distributor - super().set_distributor(distributor) - self.S2coordsys.set_distributor(distributor) - @staticmethod def cartesian(phi, theta, r): x = r * np.sin(theta) * np.cos(phi) @@ -328,8 +301,8 @@ def cartesian(phi, theta, r): z = r * np.cos(theta) return x, y, z - def forward_intertwiner(self, axis, order, group): - subaxis = axis - self.axis + def forward_intertwiner(self, first_axis, axis, order, group): + subaxis = axis - first_axis if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -343,8 +316,8 @@ def forward_intertwiner(self, axis, order, group): else: raise ValueError("Invalid axis") - def backward_intertwiner(self, axis, order, group): - subaxis = axis - self.axis + def backward_intertwiner(self, first_axis, axis, order, group): + subaxis = axis - first_axis if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) diff --git a/dedalus/core/distributor.py b/dedalus/core/distributor.py index bc094b14..1c4ab7aa 100644 --- a/dedalus/core/distributor.py +++ b/dedalus/core/distributor.py @@ -9,6 +9,7 @@ from collections import OrderedDict from math import prod +from .coords import CoordinateSystem from ..tools.cache import CachedMethod, CachedAttribute from ..tools.config import config from ..tools.general import OrderedSet @@ -81,8 +82,6 @@ def __init__(self, coordsystems, comm=None, mesh=None, dtype=None): self.single_coordsys = False # Get coords self.coords = tuple([coord for coordsystem in coordsystems for coord in coordsystem.coords]) - for coordsystem in coordsystems: - coordsystem.set_distributor(self) self.coordsystems = coordsystems # Defaults if comm is None: @@ -194,8 +193,19 @@ def get_transform_object(self, axis): return self.transforms[axis] def get_axis(self, coord): + if isinstance(coord, CoordinateSystem): + coord = coord.coords[0] return self.coords.index(coord) + def get_basis_axis(self, basis): + return self.get_axis(basis.coordsystem.coords[0]) + + def first_axis(self, basis): + return self.get_basis_axis(basis) + + def last_axis(self, basis): + return self.first_axis(basis) + basis.dim - 1 + def Field(self, *args, **kw): """Alternate constructor for fields.""" from .field import Field @@ -227,13 +237,13 @@ def IdentityTensor(self, coordsys): def local_grid(self, basis, scale=None): # TODO: remove from bases and do it all here? if basis.dim == 1: - return basis.local_grid(scale=scale) + return basis.local_grid(self, scale=scale) else: raise ValueError("Use `local_grids` for multidimensional bases.") def local_grids(self, *bases, scales=None): # TODO: remove from bases and do it all here? - return sum((basis.local_grids(scales=scales) for basis in bases), ()) + return sum((basis.local_grids(self, scales=scales) for basis in bases), ()) def local_modes(self, basis): # TODO: remove from bases and do it all here? @@ -347,7 +357,7 @@ def valid_elements(self, tensorsig, domain, scales, rank=None, broadcast=False): vshape = tuple(cs.dim for cs in tensorsig) + elements[0].shape valid = np.ones(shape=vshape, dtype=bool) for basis in domain.bases: - basis_axes = slice(basis.first_axis, basis.last_axis+1) + basis_axes = slice(self.dist.first_axis(basis), self.dist.last_axis(basis)+1) valid &= basis.valid_elements(tensorsig, grid_space[basis_axes], elements[basis_axes]) return valid @@ -357,7 +367,7 @@ def _group_arrays(self, elements, domain): groups = np.zeros_like(elements) groups = np.ma.masked_array(groups) for basis in domain.bases: - basis_axes = slice(basis.first_axis, basis.last_axis+1) + basis_axes = slice(self.dist.first_axis(basis), self.dist.last_axis(basis)+1) groups[basis_axes] = basis.elements_to_groups(grid_space[basis_axes], elements[basis_axes]) return groups diff --git a/dedalus/core/domain.py b/dedalus/core/domain.py index 05a8f160..219ba747 100644 --- a/dedalus/core/domain.py +++ b/dedalus/core/domain.py @@ -37,7 +37,7 @@ def _preprocess_args(cls, dist, bases): if len(set(cs)) < len(cs): raise ValueError("Overlapping bases specified.") # Sort by first axis - key = lambda basis: basis.first_axis + key = lambda basis: dist.get_basis_axis(basis) bases = tuple(sorted(bases, key=key)) return (dist, bases), {} @@ -54,7 +54,7 @@ def volume(self): def bases_by_axis(self): bases_by_axis = OrderedDict() for basis in self.bases: - for axis in range(basis.first_axis, basis.first_axis+basis.dim): + for axis in range(self.dist.get_basis_axis(basis), self.dist.get_basis_axis(basis)+basis.dim): bases_by_axis[axis] = basis return bases_by_axis @@ -62,7 +62,7 @@ def bases_by_axis(self): def full_bases(self): full_bases = [None for i in range(self.dist.dim)] for basis in self.bases: - for axis in range(basis.first_axis, basis.first_axis+basis.dim): + for axis in range(self.dist.get_basis_axis(basis), self.dist.get_basis_axis(basis)+basis.dim): full_bases[axis] = basis return tuple(full_bases) @@ -84,7 +84,7 @@ def dealias(self): dealias = [1] * self.dist.dim for basis in self.bases: for subaxis in range(basis.dim): - dealias[basis.first_axis+subaxis] = basis.dealias[subaxis] + dealias[self.dist.get_basis_axis(basis)+subaxis] = basis.dealias[subaxis] return tuple(dealias) def substitute_basis(self, old_basis, new_basis): @@ -98,7 +98,7 @@ def get_basis(self, coords): if isinstance(coords, int): axis = coords else: - axis = coords.axis + axis = self.dist.get_axis(coords) return self.full_bases[axis] def get_basis_subaxis(self, coord): @@ -133,8 +133,9 @@ def constant(self): """Tuple of constant flags.""" const = np.ones(self.dist.dim, dtype=bool) for basis in self.bases: + first_axis = self.dist.get_basis_axis(basis) for subaxis in range(basis.dim): - const[basis.axis+subaxis] = basis.constant[subaxis] + const[first_axis+subaxis] = basis.constant[subaxis] return tuple(const) @CachedAttribute @@ -146,8 +147,9 @@ def mode_dependence(self): """Tuple of dependence flags.""" dep = np.zeros(self.dist.dim, dtype=bool) for basis in self.bases: + first_axis = self.dist.get_basis_axis(basis) for subaxis in range(basis.dim): - dep[basis.axis+subaxis] = basis.subaxis_dependence[subaxis] + dep[first_axis+subaxis] = basis.subaxis_dependence[subaxis] return tuple(dep) @CachedAttribute @@ -169,7 +171,8 @@ def grid_shape(self, scales): def global_shape(self, layout, scales): shape = np.ones(self.dist.dim, dtype=int) for basis in self.bases: - basis_axes = slice(basis.first_axis, basis.last_axis+1) + first_axis = self.dist.get_basis_axis(basis) + basis_axes = slice(first_axis, first_axis+basis.dim) shape[basis_axes] = basis.global_shape(layout.grid_space[basis_axes], scales[basis_axes]) return tuple(shape) @@ -178,7 +181,8 @@ def chunk_shape(self, layout): """Compute chunk shape.""" shape = np.ones(self.dist.dim, dtype=int) for basis in self.bases: - basis_axes = slice(basis.first_axis, basis.last_axis+1) + first_axis = self.dist.get_basis_axis(basis) + basis_axes = slice(first_axis, first_axis+basis.dim) shape[basis_axes] = basis.chunk_shape(layout.grid_space[basis_axes]) return tuple(shape) @@ -186,7 +190,8 @@ def group_shape(self, layout): """Compute group shape.""" group_shape = np.ones(self.dist.dim, dtype=int) for basis in self.bases: - basis_axes = slice(basis.first_axis, basis.last_axis+1) + first_axis = self.dist.get_basis_axis(basis) + basis_axes = slice(first_axis, first_axis+basis.dim) group_shape[basis_axes] = basis.group_shape group_shape[layout.grid_space] = 1 return group_shape diff --git a/dedalus/core/evaluator.py b/dedalus/core/evaluator.py index fc1adbec..4a37ba2d 100644 --- a/dedalus/core/evaluator.py +++ b/dedalus/core/evaluator.py @@ -550,7 +550,7 @@ def setup_file(self, file): if basis is None: sn = lookup = 'constant' else: - subaxis = axis - basis.axis + subaxis = axis - self.dist.get_basis_axis(basis) if layout.grid_space[axis]: sn = basis.coordsystem.coords[subaxis].name data = basis.global_grids(scales)[subaxis].ravel() diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index e42e7e2d..2e6ccc33 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -1088,8 +1088,8 @@ def __init__(self, operand, coord, position, out=None): self.coord = coord self.input_basis = operand.domain.get_basis(coord) self.output_basis = self._output_basis(self.input_basis, position) - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.get_basis_axis(self.input_basis) + self.last_axis = self.first_axis + self.input_basis.dim - 1 # LinearOperator requirements self.operand = operand # FutureField requirements @@ -1173,8 +1173,8 @@ def __init__(self, operand, coord): self.coord = coord self.input_basis = operand.domain.get_basis(coord) self.output_basis = self._output_basis(self.input_basis) - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.get_basis_axis(self.input_basis) + self.last_axis = self.first_axis + self.input_basis.dim - 1 # LinearOperator requirements self.operand = operand # FutureField requirements @@ -1243,8 +1243,8 @@ def __init__(self, operand, coord): self.coord = coord self.input_basis = operand.domain.get_basis(coord) self.output_basis = self._output_basis(self.input_basis) - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.get_basis_axis(self.input_basis) + self.last_axis = self.first_axis + self.input_basis.dim - 1 # LinearOperator requirements self.operand = operand # FutureField requirements @@ -1329,9 +1329,9 @@ def __init__(self, operand, coord, out=None): self.coord = coord self.input_basis = operand.domain.get_basis(coord) self.output_basis = self._output_basis(self.input_basis) - self.first_axis = coord.axis - self.last_axis = coord.axis - self.axis = coord.axis + self.first_axis = self.dist.get_axis(coord) + self.last_axis = self.first_axis + self.axis = self.first_axis # LinearOperator requirements self.operand = operand # FutureField requirements @@ -1517,8 +1517,8 @@ def __init__(self, operand, output_basis, out=None): self.coords = output_basis.coords self.input_basis = operand.domain.get_basis(self.coords) self.output_basis = output_basis - self.first_axis = self.output_basis.first_axis - self.last_axis = self.output_basis.last_axis + self.first_axis = self.dist.get_basis_axis(self.output_basis) + self.last_axis = self.first_axis + self.output_basis.dim - 1 # LinearOperator requirements self.operand = operand # FutureField requirements @@ -1686,6 +1686,7 @@ def enforce_conditions(self): @alias("trace") class Trace(LinearOperator, metaclass=MultiClass): # TODO: contract arbitrary indices instead of the first two? + # TODO: check that the two indices have same coordsys name = "Trace" @@ -3172,7 +3173,7 @@ def __init__(self, operand, index, coord, out=None): self.index = index self.coord = coord self.coordsys = operand.tensorsig[index] - self.coord_subaxis = self.coord.axis - self.coordsys.first_axis + self.coord_subaxis = self.dist.get_axis(coord) - self.dist.get_axis(self.coordsys) # LinearOperator requirements self.operand = operand # FutureField requirements diff --git a/dedalus/core/solvers.py b/dedalus/core/solvers.py index 4e4a4463..73362184 100644 --- a/dedalus/core/solvers.py +++ b/dedalus/core/solvers.py @@ -79,7 +79,8 @@ def __init__(self, problem, ncc_cutoff=1e-6, max_ncc_terms=None, entry_cutoff=1e self.matrix_dependence = np.array(problem.matrix_dependence) for eq in problem.eqs: for basis in eq['domain'].bases: - slices = slice(basis.first_axis, basis.last_axis+1) + first_axis = self.dist.get_basis_axis(basis) + slices = slice(first_axis, first_axis+basis.dim) self.matrix_dependence[slices] = self.matrix_dependence[slices] | basis.matrix_dependence(matrix_coupling[slices]) # Process config options if matsolver is None: From fc2160790de5ceae77f059093e6b2dbfdb71746e Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Tue, 12 Dec 2023 11:39:37 -0500 Subject: [PATCH 02/19] Further down rabbithole of removing dist from bases --- dedalus/core/basis.py | 527 +++++++++++----------- dedalus/core/distributor.py | 31 +- dedalus/core/domain.py | 7 +- dedalus/core/evaluator.py | 2 +- dedalus/core/operators.py | 49 +- dedalus/tests/test_sphere_calculus.py | 4 +- dedalus/tests/test_spherical_operators.py | 4 +- 7 files changed, 328 insertions(+), 296 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 7d613d76..4614dfc3 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -146,19 +146,19 @@ def elements_to_groups(self, grid_space, elements): # Subclasses must implement raise NotImplementedError - def global_grids(self, scales): + def global_grids(self, dist, scales): """Global grids.""" # Subclasses must implement # Returns tuple of global grids along each subaxis raise NotImplementedError(f"{type(self)} has not implement global_grids.") - def local_grids(self, scales): + def local_grids(self, dist, scales): """Local grids.""" # Subclasses must implement # Returns tuple of local grids along each subaxis raise NotImplementedError(f"{type(self)} has not implement local_grids.") - def local_modes(self, scales): + def local_modes(self, dist, scales): """Local modes.""" # Subclasses must implement # Returns tuple of local modes along each subaxis @@ -261,9 +261,9 @@ def product_matrix(self, arg_basis, out_basis, i): def build_ncc_matrix(self, product, subproblem, ncc_cutoff, max_ncc_terms): # Default to last axis only - if any(product.ncc.domain.nonconstant[self.first_axis:self.last_axis]): + if any(product.ncc.domain.nonconstant[dist.first_axis(self):dist.last_axis(self)]): raise NotImplementedError("Only last-axis NCCs implemented for this basis.") - axis = self.last_axis + axis = dist.last_axis(self) ncc_basis = product.ncc.domain.get_basis(axis) arg_basis = product.operand.domain.get_basis(axis) out_basis = product.domain.get_basis(axis) @@ -347,19 +347,19 @@ def __init__(self, coord, size, bounds, dealias): def matrix_dependence(self, matrix_coupling): return matrix_coupling - def global_grids(self, scales=None): + def global_grids(self, dist, scales=None): """Global grids.""" if scales == None: scales = (1,) - return (self.global_grid(scales[0]),) + return (self.global_grid(dist, scales[0]),) - def global_grid(self, scale=None): + def global_grid(self, dist, scale=None): """Global grid.""" if scale == None: scale = 1 native_grid = self._native_grid(scale) problem_grid = self.COV.problem_coord(native_grid) - return problem_grid + return reshape_vector(problem_grid, dim=dist.dim, axis=dist.get_basis_axis(self)) def local_grids(self, dist, scales=None): """Local grids.""" @@ -375,18 +375,18 @@ def local_grid(self, dist, scale=None): problem_grid = self.COV.problem_coord(native_grid) return reshape_vector(problem_grid, dim=dist.dim, axis=dist.get_basis_axis(self)) - def global_grid_spacing(self, scale=None): + def global_grid_spacing(self, dist, scale=None): """Global grid spacings.""" - grid = self.global_grid(scale=scale) - return np.gradient(grid, edge_order=2) + grid = self.global_grid(dist, scale=scale) + return np.gradient(grid, axis=dist.get_basis_axis(self), edge_order=2) def local_grid_spacing(self, dist, scale=None): """Local grids spacings.""" if scale is None: scale = 1 - global_spacing = self.global_grid_spacing(scale=scale) + global_spacing = self.global_grid_spacing(dist, scale=scale) local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale) - local_spacing_flat = global_spacing[local_elements[dist.get_basis_axis(self)]] + local_spacing_flat = np.ravel(global_spacing)[local_elements[dist.get_basis_axis(self)]] out = reshape_vector(local_spacing_flat, dim=dist.dim, axis=dist.get_basis_axis(self)) return out @@ -416,17 +416,17 @@ def forward_transform(self, field, axis, gdata, cdata): """Forward transform field data.""" data_axis = len(field.tensorsig) + axis grid_size = gdata.shape[data_axis] - plan = self.transform_plan(grid_size) + plan = self.transform_plan(field.dist, grid_size) plan.forward(gdata, cdata, data_axis) def backward_transform(self, field, axis, cdata, gdata): """Backward transform field data.""" data_axis = len(field.tensorsig) + axis grid_size = gdata.shape[data_axis] - plan = self.transform_plan(grid_size) + plan = self.transform_plan(field.dist, grid_size) plan.backward(cdata, gdata, data_axis) - def transform_plan(self, grid_size): + def transform_plan(self, dist, grid_size): # Subclasses must implement raise NotImplementedError @@ -503,7 +503,7 @@ def _native_grid(self, scale): return jacobi.build_grid(N, a=self.a0, b=self.b0) @CachedMethod - def transform_plan(self, grid_size): + def transform_plan(self, dist, grid_size): """Build transform plan.""" return self.transforms[self.library](grid_size, self.size, self.a, self.b, self.a0, self.b0) @@ -923,7 +923,7 @@ def _native_grid(self, scale): return (2 * np.pi / N) * np.arange(N) @CachedMethod - def transform_plan(self, grid_size): + def transform_plan(self, dist, grid_size): """Build transform plan.""" # Shortcut trivial transforms if grid_size == 1 or self.size == 1: @@ -1550,11 +1550,11 @@ def _group_matrix(group, input_basis, output_basis): class MultidimensionalBasis(Basis): def forward_transform(self, field, axis, gdata, cdata): - subaxis = axis - self.axis + subaxis = axis - field.dist.get_basis_axis(self) return self.forward_transforms[subaxis](field, axis, gdata, cdata) def backward_transform(self, field, axis, cdata, gdata): - subaxis = axis - self.axis + subaxis = axis - field.dist.get_basis_axis(self) return self.backward_transforms[subaxis](field, axis, cdata, gdata) @@ -1606,13 +1606,14 @@ def spin_recombination_matrix(self, tensorsig): + np.kron(matrix.imag,np.array([[0,-1],[1,0]]))) return matrix - def forward_spin_recombination(self, tensorsig, gdata, out): + def forward_spin_recombination(self, tensorsig, colat_axis, gdata, out): """Apply component-to-spin recombination.""" # We assume gdata and out are different data buffers # and that we can safely overwrite gdata if not tensorsig: np.copyto(out, gdata) else: + azimuth_axis = colat_axis - 1 U = self.spin_recombination_matrices(tensorsig) if gdata.dtype == np.complex128: # HACK: just copying the data so we can apply_matrix repeatedly @@ -1629,11 +1630,11 @@ def forward_spin_recombination(self, tensorsig, gdata, out): if Ui is not None: dim = Ui.shape[0] if num_recombinations % 2 == 0: - input_view = reduced_view_5(gdata, i, self.axis+len(tensorsig)) - output_view = reduced_view_5(out, i, self.axis+len(tensorsig)) + input_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) + output_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) else: - input_view = reduced_view_5(out, i, self.axis+len(tensorsig)) - output_view = reduced_view_5(gdata, i, self.axis+len(tensorsig)) + input_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) + output_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) if dim == 3: spin_recombination.recombine_forward_dim3(input_view, output_view) elif dim == 2: @@ -1642,13 +1643,14 @@ def forward_spin_recombination(self, tensorsig, gdata, out): if num_recombinations % 2 == 0: np.copyto(out, gdata) - def backward_spin_recombination(self, tensorsig, gdata, out): + def backward_spin_recombination(self, tensorsig, colat_axis, gdata, out): """Apply spin-to-component recombination.""" # We assume gdata and out are different data buffers # and that we can safely overwrite gdata if not tensorsig: np.copyto(out, gdata) else: + azimuth_axis = colat_axis - 1 U = self.spin_recombination_matrices(tensorsig) if gdata.dtype == np.complex128: # HACK: just copying the data so we can apply_matrix repeatedly @@ -1665,11 +1667,11 @@ def backward_spin_recombination(self, tensorsig, gdata, out): if Ui is not None: dim = Ui.shape[0] if num_recombinations % 2 == 0: - input_view = reduced_view_5(gdata, i, self.axis+len(tensorsig)) - output_view = reduced_view_5(out, i, self.axis+len(tensorsig)) + input_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) + output_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) else: - input_view = reduced_view_5(out, i, self.axis+len(tensorsig)) - output_view = reduced_view_5(gdata, i, self.axis+len(tensorsig)) + input_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) + output_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) if dim == 3: spin_recombination.recombine_backward_dim3(input_view, output_view) elif dim == 2: @@ -1926,74 +1928,74 @@ def __eq__(self, other): def __hash__(self): return id(self) - @CachedAttribute - def m_maps(self): + @CachedMethod + def m_maps(self, dist): """ Tuple of (m, mg_slice, mc_slice, n_slice) for all local m's. """ # Get colatitude transform object - colat_transform = self.dist.get_transform_object(self.first_axis+1) + colat_transform = dist.get_transform_object(dist.first_axis(self)+1) colat_coeff_layout = colat_transform.layout0 colat_grid_layout = colat_transform.layout1 # Get groupsets - domain = self.domain + domain = self.domain(dist) group_coupling = [True] * domain.dist.dim - group_coupling[self.first_axis] = False + group_coupling[dist.first_axis(self)] = False group_coupling = tuple(group_coupling) groupsets = colat_grid_layout.local_groupsets(group_coupling, domain, scales=domain.dealias, broadcast=True) # Build m_maps from groupset slices m_maps = [] for groupset in groupsets: - m = groupset[self.first_axis] + m = groupset[dist.first_axis(self)] coeff_slices = colat_coeff_layout.local_groupset_slices(groupset, domain, scales=domain.dealias, broadcast=True) grid_slices = colat_grid_layout.local_groupset_slices(groupset, domain, scales=domain.dealias, broadcast=True) if len(coeff_slices) != 1 or len(grid_slices) != 1: raise ValueError("This should never happpen. Ask for help.") - mg_slice = grid_slices[0][self.first_axis] - mc_slice = coeff_slices[0][self.first_axis] - n_slice = coeff_slices[0][self.first_axis+1] + mg_slice = grid_slices[0][dist.first_axis(self)] + mc_slice = coeff_slices[0][dist.first_axis(self)] + n_slice = coeff_slices[0][dist.first_axis(self)+1] m_maps.append((m, mg_slice, mc_slice, n_slice)) return tuple(m_maps) - @CachedAttribute - def ell_reversed(self): + @CachedMethod + def ell_reversed(self, dist): ell_reversed = {} - for m, mg_slice, mc_slice, ell_slice in self.m_maps: + for m, mg_slice, mc_slice, ell_slice in self.m_maps(dist): ell_reversed[m] = False return ell_reversed - def global_grids(self, scales=None): + def global_grids(self, dist, scales=None): if scales == None: scales = (1, 1) - return (self.global_grid_azimuth(scales[0]), - self.global_grid_radius(scales[1])) + return (self.global_grid_azimuth(dist, scales[0]), + self.global_grid_radius(dist, scales[1])) - def global_grid_radius(self, scale): + def global_grid_radius(self, dist, scale): r = self.radial_COV.problem_coord(self._native_radius_grid(scale)) - return reshape_vector(r, dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(r, dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedMethod - def global_grid_spacing(self, axis, scales=None): + def global_grid_spacing(self, dist, axis, scales=None): """Global grids spacings.""" if scales is None: scales = (1,1) - return np.gradient(self.global_grids(scales=scales)[axis], axis=axis, edge_order=2) + return np.gradient(self.global_grids(dist, scales=scales)[axis], axis=axis, edge_order=2) @CachedMethod - def local_grid_spacing(self, axis, scales=None): + def local_grid_spacing(self, dist, axis, scales=None): """Local grids spacings.""" - global_spacing = self.global_grid_spacing(axis, scales=scales) + global_spacing = self.global_grid_spacing(dist, axis, scales=scales) if scales is None: scales = (1,1) - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scales[axis])[axis] - return reshape_vector(np.ravel(global_spacing)[local_elements], dim=self.dist.dim, axis=axis) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] + return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) - def local_grids(self, scales=None): + def local_grids(self, dist, scales=None): if scales == None: scales = (1, 1) - return (self.local_grid_azimuth(scales[0]), - self.local_grid_radius(scales[1])) + return (self.local_grid_azimuth(dist, scales[0]), + self.local_grid_radius(dist, scales[1])) def forward_transform_azimuth_Mmax0(self, field, axis, gdata, cdata): # slice_axis = axis + len(field.tensorsig) # np.copyto(cdata[axslice(slice_axis, 0, 1)], gdata) - np.copyto(cdata[axslice(self.axis+len(field.tensorsig), 0, 1)], gdata) + np.copyto(cdata[axslice(axis+len(field.tensorsig), 0, 1)], gdata) def forward_transform_azimuth(self, field, axis, gdata, cdata): # Call Fourier transform @@ -2004,7 +2006,7 @@ def forward_transform_azimuth(self, field, axis, gdata, cdata): def backward_transform_azimuth_Mmax0(self, field, axis, cdata, gdata): # slice_axis = axis + len(field.tensorsig) # np.copyto(gdata, cdata[axslice(slice_axis, 0, 1)]) - np.copyto(gdata, cdata[axslice(self.axis+len(field.tensorsig), 0, 1)]) + np.copyto(gdata, cdata[axslice(axis+len(field.tensorsig), 0, 1)]) def backward_transform_azimuth(self, field, axis, cdata, gdata): # Permute m back from triangular truncation @@ -2139,14 +2141,14 @@ def __matmul__(self, other): # Same as __mul__ since conversion only needs to be upwards in k return self.__mul__(other) - def global_grid_radius(self, scale): + def global_grid_radius(self, dist, scale): r = self._radius_grid(scale) - return reshape_vector(r, dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(r, dim=dist.dim, axis=dist.get_basis_axis(self)+1) - def local_grid_radius(self, scale): + def local_grid_radius(self, dist, scale): r = self._radius_grid(scale) - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.axis+1] - return reshape_vector(r[local_elements], dim=self.dist.dim, axis=self.axis+1) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] + return reshape_vector(r[local_elements], dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedMethod def _radius_grid(self, scale): @@ -2165,18 +2167,18 @@ def _radius_weights(self, scale): normalization = self.dR/2 return normalization * ( (Q0 @ weights0).T ) @ (weights_proj*Q_proj) - def global_radius_weights(self, scale=None): + def global_radius_weights(self, dist, scale=None): if scale == None: scale = 1 N = int(np.ceil(scale * self.shape[1])) z, weights = dedalus_sphere.sphere.quadrature(2,N,k=self.alpha) - return reshape_vector(weights.astype(np.float64), dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(weights.astype(np.float64), dim=dist.dim, axis=dist.get_basis_axis(self)+1) - def local_radius_weights(self, scale=None): + def local_radius_weights(self, dist, scale=None): if scale == None: scale = 1 - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.axis+1] + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] N = int(np.ceil(scale * self.shape[1])) z, weights = dedalus_sphere.sphere.quadrature(2,N,k=self.alpha) - return reshape_vector(weights.astype(np.float64)[local_elements], dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(weights.astype(np.float64)[local_elements], dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedAttribute def constant_mode_value(self): @@ -2189,7 +2191,7 @@ def _new_k(self, k): return self.clone_with(k=k) @CachedMethod - def transform_plan(self, grid_size, k): + def transform_plan(self, dist, grid_size, k): """Build transform plan.""" a = self.alpha[0] + k b = self.alpha[1] + k @@ -2210,16 +2212,16 @@ def forward_transform_radius(self, field, axis, gdata, cdata): gdata *= self.radial_transform_factor(field.scales[axis], data_axis, -self.k) # Expand gdata if mmax=0 and dtype=float for spin recombination if self.mmax == 0 and self.dtype == np.float64: - m_axis = len(field.tensorsig) + self.axis + m_axis = len(field.tensorsig) + axis - 1 gdata = np.concatenate((gdata, np.zeros_like(gdata)), axis=m_axis) # Apply spin recombination from gdata to temp temp = np.zeros_like(gdata) - self.forward_spin_recombination(field.tensorsig, gdata, temp) + self.forward_spin_recombination(field.tensorsig, axis, gdata, temp) cdata.fill(0) # OPTIMIZE: shouldn't be necessary # Transform component-by-component from temp to cdata S = self.spin_weights(field.tensorsig) for i, s in np.ndenumerate(S): - plan = self.transform_plan(grid_size, self.k) + plan = self.transform_plan(field.dist, grid_size, self.k) plan.forward(temp[i], cdata[i], axis) def backward_transform_radius(self, field, axis, cdata, gdata): @@ -2227,7 +2229,7 @@ def backward_transform_radius(self, field, axis, cdata, gdata): grid_size = gdata.shape[data_axis] # Create temporary if self.mmax == 0 and self.dtype == np.float64: - m_axis = len(field.tensorsig) + self.axis + m_axis = len(field.tensorsig) + axis - 1 shape = list(gdata.shape) shape[m_axis] = 2 temp = np.zeros(shape, dtype=gdata.dtype) @@ -2239,11 +2241,11 @@ def backward_transform_radius(self, field, axis, cdata, gdata): # Transform component-by-component from cdata to temp S = self.spin_weights(field.tensorsig) for i, s in np.ndenumerate(S): - plan = self.transform_plan(grid_size, self.k) + plan = self.transform_plan(field.dist, grid_size, self.k) plan.backward(cdata[i], temp[i], axis) # Apply spin recombination from temp to gdata gdata.fill(0) # OPTIMIZE: shouldn't be necessary - self.backward_spin_recombination(field.tensorsig, temp, gdata) + self.backward_spin_recombination(field.tensorsig, axis, temp, gdata) # Multiply by radial factor if self.k > 0: gdata *= self.radial_transform_factor(field.scales[axis], data_axis, self.k) @@ -2427,14 +2429,14 @@ def __matmul__(self, other): return self.clone_with(shape=shape, k=k) return NotImplemented - def global_grid_radius(self, scale): + def global_grid_radius(self, dist, scale): r = self.radial_COV.problem_coord(self._native_radius_grid(scale)) - return reshape_vector(r, dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(r, dim=dist.dim, axis=dist.get_basis_axis(self)+1) - def local_grid_radius(self, scale): + def local_grid_radius(self, dist, scale): r = self.radial_COV.problem_coord(self._native_radius_grid(scale)) - local_elements = self.dist.grid_layout.local_elements(self.domain(dist), scales=scale)[self.axis+1] - return reshape_vector(r[local_elements], dim=self.dist.dim, axis=self.axis+1) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] + return reshape_vector(r[local_elements], dim=dist.dim, axis=dist.get_basis_axis(self)+1) def _native_radius_grid(self, scale): N = int(np.ceil(scale * self.shape[1])) @@ -2442,18 +2444,18 @@ def _native_radius_grid(self, scale): r = np.sqrt((z+1)/2).astype(np.float64) return r - def global_radius_weights(self, scale=None): + def global_radius_weights(self, dist, scale=None): if scale == None: scale = 1 N = int(np.ceil(scale * self.shape[1])) z, weights = dedalus_sphere.sphere.quadrature(2,N,k=self.alpha) - return reshape_vector(weights.astype(np.float64), dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(weights.astype(np.float64), dim=dist.dim, axis=dist.get_basis_axis(self)+1) - def local_radius_weights(self, scale=None): + def local_radius_weights(self, dist, scale=None): if scale == None: scale = 1 - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.axis+1] + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] N = int(np.ceil(scale * self.shape[1])) z, weights = dedalus_sphere.sphere.quadrature(2,N,k=self.alpha) - return reshape_vector(weights.astype(np.float64)[local_elements], dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(weights.astype(np.float64)[local_elements], dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedAttribute def constant_mode_value(self): @@ -2465,9 +2467,9 @@ def _new_k(self, k): return self.clone_with(k=k) @CachedMethod - def transform_plan(self, grid_shape, axis, s): + def transform_plan(self, dist, grid_shape, axis, s): """Build transform plan.""" - return self.transforms[self.radius_library](grid_shape, self.shape, axis, self.m_maps, s, self.k, self.alpha) + return self.transforms[self.radius_library](grid_shape, self.shape, axis, self.m_maps(dist), s, self.k, self.alpha) def forward_transform_radius_Nmax0(self, field, axis, gdata, cdata): raise NotImplementedError("Not yet.") @@ -2480,17 +2482,17 @@ def forward_transform_radius_Nmax0(self, field, axis, gdata, cdata): def forward_transform_radius(self, field, axis, gdata, cdata): # Expand gdata if mmax=0 and dtype=float for spin recombination if self.mmax == 0 and self.dtype == np.float64: - m_axis = len(field.tensorsig) + self.axis + m_axis = len(field.tensorsig) + axis - 1 gdata = np.concatenate((gdata, np.zeros_like(gdata)), axis=m_axis) # Apply spin recombination from gdata to temp temp = np.zeros_like(gdata) - self.forward_spin_recombination(field.tensorsig, gdata, temp) + self.forward_spin_recombination(field.tensorsig, axis, gdata, temp) cdata.fill(0) # OPTIMIZE: shouldn't be necessary # Transform component-by-component from temp to cdata S = self.spin_weights(field.tensorsig) for i, s in np.ndenumerate(S): grid_shape = gdata[i].shape - plan = self.transform_plan(grid_shape, axis, s) + plan = self.transform_plan(field.dist, grid_shape, axis, s) plan.forward(temp[i], cdata[i], axis) def backward_transform_radius_Nmax0(self, field, axis, cdata, gdata): @@ -2504,7 +2506,7 @@ def backward_transform_radius_Nmax0(self, field, axis, cdata, gdata): def backward_transform_radius(self, field, axis, cdata, gdata): # Create temporary if self.mmax == 0 and self.dtype == np.float64: - m_axis = len(field.tensorsig) + self.axis + m_axis = len(field.tensorsig) + axis - 1 shape = list(gdata.shape) shape[m_axis] = 2 temp = np.zeros(shape, dtype=gdata.dtype) @@ -2517,11 +2519,11 @@ def backward_transform_radius(self, field, axis, cdata, gdata): S = self.spin_weights(field.tensorsig) for i, s in np.ndenumerate(S): grid_shape = gdata[i].shape - plan = self.transform_plan(grid_shape, axis, s) + plan = self.transform_plan(field.dist, grid_shape, axis, s) plan.backward(cdata[i], temp[i], axis) # Apply spin recombination from temp to gdata gdata.fill(0) # OPTIMIZE: shouldn't be necessary - self.backward_spin_recombination(field.tensorsig, temp, gdata) + self.backward_spin_recombination(field.tensorsig, axis, temp, gdata) if self.mmax == 0 and self.dtype == np.float64: gdata_orig[:] = gdata[axslice(m_axis, 0, 1)] @@ -2620,7 +2622,7 @@ class ConvertPolar(operators.Convert, operators.PolarMOperator): def __init__(self, operand, output_basis, out=None): operators.Convert.__init__(self, operand, output_basis, out=out) - self.radius_axis = self.last_axis + self.radius_axis = dist.last_axis(self) def spinindex_out(self, spinindex_in): return (spinindex_in,) @@ -2966,51 +2968,51 @@ def __matmul__(self, other): # # TODO: check this is right for regtotal != 0? # return 1 / np.sqrt(2) - @CachedAttribute - def m_maps(self): + @CachedMethod + def m_maps(self, dist): """ Tuple of (m, mg_slice, mc_slice, ell_slice) for all local m's when the colatitude axis is local. """ # Get colatitude transform object - colat_transform = self.dist.get_transform_object(self.first_axis+1) + colat_transform = dist.get_transform_object(dist.first_axis(self)+1) colat_coeff_layout = colat_transform.layout0 colat_grid_layout = colat_transform.layout1 # Get groupsets - domain = self.domain + domain = self.domain(dist) group_coupling = [True] * domain.dist.dim - group_coupling[self.first_axis] = False + group_coupling[dist.first_axis(self)] = False group_coupling = tuple(group_coupling) groupsets = colat_coeff_layout.local_groupsets(group_coupling, domain, scales=domain.dealias, broadcast=True) # Build m_maps from groupset slices m_maps = [] for groupset in groupsets: - m = groupset[self.first_axis] + m = groupset[dist.first_axis(self)] coeff_slices = colat_coeff_layout.local_groupset_slices(groupset, domain, scales=domain.dealias, broadcast=True) grid_slices = colat_grid_layout.local_groupset_slices(groupset, domain, scales=domain.dealias, broadcast=True) if len(coeff_slices) != 1 or len(grid_slices) != 1: raise ValueError("This should never happpen. Ask for help.") - mg_slice = grid_slices[0][self.first_axis] - mc_slice = coeff_slices[0][self.first_axis] - ell_slice = coeff_slices[0][self.first_axis+1] + mg_slice = grid_slices[0][dist.first_axis(self)] + mc_slice = coeff_slices[0][dist.first_axis(self)] + ell_slice = coeff_slices[0][dist.first_axis(self)+1] # Reverse n_slice for folded modes so that ells are well-ordered if ell_slice.start == 0 and m != 0: ell_slice = slice(ell_slice.stop-1, None, -1) m_maps.append((m, mg_slice, mc_slice, ell_slice)) return tuple(m_maps) - @CachedAttribute - def ell_reversed(self): + @CachedMethod + def ell_reversed(self, dist): ell_reversed = {} - for m, mg_slice, mc_slice, ell_slice in self.m_maps: + for m, mg_slice, mc_slice, ell_slice in self.m_maps(dist): ell_reversed[m] = False if ell_slice.step is not None: if ell_slice.step < 0: ell_reversed[m] = True return ell_reversed - @CachedAttribute - def ell_maps(self): + @CachedMethod + def ell_maps(self, dist): """ Tuple of (ell, m_slice, ell_slice) for all local ells in coeff space. m_slice and ell_slice are local slices along the phi and theta axes. @@ -3020,11 +3022,11 @@ def ell_maps(self): for ell, m_slice, ell_slice in ell_maps: ell_data = data[m_slice, ell_slice] """ - coeff_layout = self.dist.coeff_layout - azimuth_axis = self.first_axis - colatitude_axis = self.first_axis + 1 + coeff_layout = dist.coeff_layout + azimuth_axis = dist.first_axis(self) + colatitude_axis = dist.first_axis(self) + 1 # Get groupsets - domain = self.domain + domain = self.domain(dist) group_coupling = [True] * domain.dist.dim group_coupling[colatitude_axis] = False group_coupling = tuple(group_coupling) @@ -3045,33 +3047,33 @@ def global_grids(self, scales=None): return (self.global_grid_azimuth(scales[0]), self.global_grid_colatitude(scales[1])) - def global_grid_colatitude(self, scale): + def global_grid_colatitude(self, dist, scale): theta = self._native_colatitude_grid(scale) - return reshape_vector(theta, dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(theta, dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedMethod - def global_grid_spacing(self, axis, scales=None): + def global_grid_spacing(self, dist, axis, scales=None): """Global grids spacings.""" if scales is None: scales = (1,1) - return np.gradient(self.global_grids(scales=scales)[axis], axis=axis, edge_order=2) + return np.gradient(self.global_grids(dist, scales=scales)[axis], axis=axis, edge_order=2) @CachedMethod - def local_grid_spacing(self, axis, scales=None): + def local_grid_spacing(self, dist, axis, scales=None): """Local grids spacings.""" - global_spacing = self.global_grid_spacing(axis, scales=scales) + global_spacing = self.global_grid_spacing(dist, axis, scales=scales) if scales is None: scales = (1,1) - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scales[axis])[axis] - return reshape_vector(np.ravel(global_spacing)[local_elements], dim=self.dist.dim, axis=axis) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] + return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) - def local_grids(self, scales=None): + def local_grids(self, dist, scales=None): if scales == None: scales = (1, 1) - return (self.local_grid_azimuth(scales[0]), - self.local_grid_colatitude(scales[1])) + return (self.local_grid_azimuth(dist, scales[0]), + self.local_grid_colatitude(dist, scales[1])) - def local_grid_colatitude(self, scale): - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.axis+1] + def local_grid_colatitude(self, dist, scale): + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] theta = self._native_colatitude_grid(scale)[local_elements] - return reshape_vector(theta, dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(theta, dim=dist.dim, axis=dist.get_basis_axis(self)+1) def _native_colatitude_grid(self, scale): N = int(np.ceil(scale * self.shape[1])) @@ -3079,23 +3081,23 @@ def _native_colatitude_grid(self, scale): theta = np.arccos(cos_theta).astype(np.float64) return theta - def global_colatitude_weights(self, scale=None): + def global_colatitude_weights(self, dist, scale=None): if scale == None: scale = 1 N = int(np.ceil(scale * self.shape[1])) cos_theta, weights = dedalus_sphere.sphere.quadrature(Lmax=N-1) - return reshape_vector(weights.astype(np.float64), dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(weights.astype(np.float64), dim=dist.dim, axis=dist.get_basis_axis(self)+1) - def local_colatitude_weights(self, scale=None): + def local_colatitude_weights(self, dist, scale=None): if scale == None: scale = 1 - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.axis+1] + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] N = int(np.ceil(scale * self.shape[1])) cos_theta, weights = dedalus_sphere.sphere.quadrature(Lmax=N-1) - return reshape_vector(weights.astype(np.float64)[local_elements], dim=self.dist.dim, axis=self.axis+1) + return reshape_vector(weights.astype(np.float64)[local_elements], dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedMethod - def transform_plan(self, Ntheta, s): + def transform_plan(self, dist, Ntheta, s): """Build transform plan.""" - return self.transforms[self.colatitude_library](Ntheta, self.Lmax, self.m_maps, s) + return self.transforms[self.colatitude_library](Ntheta, self.Lmax, self.m_maps(dist), s) def forward_transform_azimuth_Mmax0(self, field, axis, gdata, cdata): slice_axis = axis + len(field.tensorsig) @@ -3131,7 +3133,7 @@ def backward_transform_radius(self, field, axis, cdata, gdata): def forward_transform_colatitude_Lmax0(self, field, axis, gdata, cdata): # Apply spin recombination from gdata to temp temp = np.zeros_like(gdata) - self.forward_spin_recombination(field.tensorsig, gdata, temp) + self.forward_spin_recombination(field.tensorsig, axis, gdata, temp) # Copy from temp to cdata np.copyto(cdata, temp) # Scale to account for SWSH normalization? Is this right for all spins? @@ -3140,31 +3142,31 @@ def forward_transform_colatitude_Lmax0(self, field, axis, gdata, cdata): def forward_transform_colatitude(self, field, axis, gdata, cdata): # Expand gdata if mmax=0 and dtype=float for spin recombination if self.mmax == 0 and self.dtype == np.float64: - m_axis = len(field.tensorsig) + self.axis + m_axis = len(field.tensorsig) + axis - 1 gdata = np.concatenate((gdata, np.zeros_like(gdata)), axis=m_axis) # Apply spin recombination from gdata to temp temp = np.zeros_like(gdata) - self.forward_spin_recombination(field.tensorsig, gdata, temp) + self.forward_spin_recombination(field.tensorsig, axis, gdata, temp) cdata.fill(0) # OPTIMIZE: shouldn't be necessary # Transform component-by-component from temp to cdata S = self.spin_weights(field.tensorsig) for i, s in np.ndenumerate(S): Ntheta = gdata[i].shape[axis] - plan = self.transform_plan(Ntheta, s) + plan = self.transform_plan(field.dist, Ntheta, s) plan.forward(temp[i], cdata[i], axis) def backward_transform_colatitude_Lmax0(self, field, axis, cdata, gdata): # Copy from cdata to temp temp = np.copy(cdata) # Apply spin recombination from temp to gdata - self.backward_spin_recombination(field.tensorsig, temp, gdata) + self.backward_spin_recombination(field.tensorsig, axis, temp, gdata) # Scale to account for SWSH normalization? Is this right for all spins? gdata /= np.sqrt(2) def backward_transform_colatitude(self, field, axis, cdata, gdata): # Create temporary if self.mmax == 0 and self.dtype == np.float64: - m_axis = len(field.tensorsig) + self.axis + m_axis = len(field.tensorsig) + axis - 1 shape = list(gdata.shape) shape[m_axis] = 2 temp = np.zeros(shape, dtype=gdata.dtype) @@ -3176,11 +3178,11 @@ def backward_transform_colatitude(self, field, axis, cdata, gdata): S = self.spin_weights(field.tensorsig) for i, s in np.ndenumerate(S): Ntheta = gdata[i].shape[axis] - plan = self.transform_plan(Ntheta, s) + plan = self.transform_plan(field.dist, Ntheta, s) plan.backward(cdata[i], temp[i], axis) # Apply spin recombination from temp to gdata gdata.fill(0) # OPTIMIZE: shouldn't be necessary - self.backward_spin_recombination(field.tensorsig, temp, gdata) + self.backward_spin_recombination(field.tensorsig, axis, temp, gdata) if self.mmax == 0 and self.dtype == np.float64: gdata_orig[:] = gdata[axslice(m_axis, 0, 1)] @@ -3346,8 +3348,8 @@ def __init__(self, operand, index=0, out=None): self.operand = operand self.input_basis = operand.domain.get_basis(coordsys) self.output_basis = self.input_basis - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.first_axis(self.input_basis) + self.last_axis = self.dist.last_axis(self.input_basis) # FutureField requirements self.domain = operand.domain#.substitute_basis(self.input_basis, self.output_basis) self.tensorsig = operand.tensorsig[:index] + operand.tensorsig[index+1:] @@ -3384,8 +3386,8 @@ def __init__(self, operand, coordsys, out=None): self.operand = operand self.input_basis = operand.domain.get_basis(coordsys) self.output_basis = self.input_basis - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.first_axis(self.input_basis) + self.last_axis = self.dist.last_axis(self.input_basis) # FutureField requirements self.domain = operand.domain#.substitute_basis(self.input_basis, self.output_basis) self.tensorsig = (coordsys,) + operand.tensorsig @@ -3425,8 +3427,8 @@ def __init__(self, operand, coordsys, out=None): self.operand = operand self.input_basis = operand.domain.get_basis(coordsys) self.output_basis = self.input_basis - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.first_axis(self.input_basis) + self.last_axis = self.dist.last_axis(self.input_basis) # FutureField requirements self.domain = operand.domain#.substitute_basis(self.input_basis, self.output_basis) self.tensorsig = operand.tensorsig @@ -3474,7 +3476,6 @@ def __init__(self, coordsystem, radial_size, k, dealias, dtype): self.dtype = dtype # Call at end because dealias is needed to build self.domain Basis.__init__(self, coordsystem) - self.radial_axis = self.first_axis + 2 if dtype == np.float64: self.group_shape = (2, 1, 1) elif dtype == np.complex128: @@ -3533,32 +3534,36 @@ def elements_to_groups(self, grid_space, elements): groups[:, n < nmin] = np.ma.masked return groups - @CachedAttribute - def ell_maps(self): - return SphereBasis.ell_maps(self) + @CachedMethod + def ell_maps(self, dist): + return SphereBasis.ell_maps(self, dist) def get_radial_basis(self): return self - def global_grid(self, scale): + def global_grid(self, dist, scale): problem_grid = self._radius_grid(scale) - return reshape_vector(problem_grid, dim=self.dist.dim, axis=self.radial_axis) + radial_axis = dist.get_basis_axis(self) + 2 + return reshape_vector(problem_grid, dim=dist.dim, axis=radial_axis) - def local_grid(self, scale): - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.radial_axis] + def local_grid(self, dist, scale): + radial_axis = dist.get_basis_axis(self) + 2 + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[radial_axis] problem_grid = self._radius_grid(scale)[local_elements] - return reshape_vector(problem_grid, dim=self.dist.dim, axis=self.radial_axis) + return reshape_vector(problem_grid, dim=dist.dim, axis=radial_axis) - def global_weights(self, scale=None): + def global_weights(self, dist, scale=None): if scale == None: scale = 1 + radial_axis = dist.get_basis_axis(self) + 2 weights = self._radius_weights(scale) - return reshape_vector(weights.astype(np.float64), dim=self.dist.dim, axis=self.radial_axis) + return reshape_vector(weights.astype(np.float64), dim=dist.dim, axis=radial_axis) - def local_weights(self, scale=None): + def local_weights(self, dist, scale=None): if scale == None: scale = 1 - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scale)[self.radial_axis] + radial_axis = dist.get_basis_axis(self) + 2 + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[radial_axis] weights = self._radius_weights(scale) - return reshape_vector(weights.astype(np.float64)[local_elements], dim=self.dist.dim, axis=self.radial_axis) + return reshape_vector(weights.astype(np.float64)[local_elements], dim=dist.dim, axis=radial_axis) @CachedMethod def regularity_allowed(self,l,regularity): @@ -3625,10 +3630,8 @@ def regularity_allowed_vectorized(self, l, regindex): valid[walk == 0] = False return valid - def forward_regularity_recombination(self, tensorsig, axis, gdata, ell_maps=None): + def forward_regularity_recombination(self, tensorsig, axis, gdata, ell_maps): rank = len(tensorsig) - if ell_maps is None: - ell_maps = self.ell_maps ell_list = tuple(map[0] for map in ell_maps) # Apply radial recombinations if rank > 0: @@ -3636,7 +3639,7 @@ def forward_regularity_recombination(self, tensorsig, axis, gdata, ell_maps=None # Flatten tensor axes shape = gdata.shape temp = gdata.reshape((-1,)+shape[rank:]) - slices = [slice(None) for i in range(1+self.dist.dim)] + slices = [slice(None) for i in range(temp.ndim)] # Apply Q transformations for each ell to flattened tensor data for ell, m_ind, ell_ind in ell_maps: slices[axis-2+1] = m_ind # Add 1 for tensor axis @@ -3644,10 +3647,8 @@ def forward_regularity_recombination(self, tensorsig, axis, gdata, ell_maps=None temp_ell = temp[tuple(slices)] apply_matrix(Q[ell].T, temp_ell, axis=0, out=temp_ell) - def backward_regularity_recombination(self, tensorsig, axis, gdata, ell_maps=None): + def backward_regularity_recombination(self, tensorsig, axis, gdata, ell_maps): rank = len(tensorsig) - if ell_maps is None: - ell_maps = self.ell_maps ell_list = tuple(map[0] for map in ell_maps) # Apply radial recombinations if rank > 0: @@ -3655,7 +3656,7 @@ def backward_regularity_recombination(self, tensorsig, axis, gdata, ell_maps=Non # Flatten tensor axes shape = gdata.shape temp = gdata.reshape((-1,)+shape[rank:]) - slices = [slice(None) for i in range(1+self.dist.dim)] + slices = [slice(None) for i in range(temp.ndim)] # Apply Q transformations for each ell to flattened tensor data for ell, m_ind, ell_ind in ell_maps: slices[axis-2+1] = m_ind # Add 1 for tensor axis @@ -3691,13 +3692,13 @@ def forward_transform_azimuth(self, field, axis, gdata, cdata): def forward_transform_colatitude(self, field, axis, gdata, cdata): # Spin recombination temp = np.zeros_like(gdata) - self.forward_spin_recombination(field.tensorsig, gdata, temp) + self.forward_spin_recombination(field.tensorsig, axis, gdata, temp) np.copyto(cdata, temp) def backward_transform_colatitude(self, field, axis, cdata, gdata): # Spin recombination temp = np.copy(cdata) - self.backward_spin_recombination(field.tensorsig, temp, gdata) + self.backward_spin_recombination(field.tensorsig, axis, temp, gdata) def backward_transform_azimuth(self, field, axis, cdata, gdata): # Copy over real part of m = 0 @@ -3842,7 +3843,7 @@ def interpolation(self, position): return radial_factor*dedalus_sphere.jacobi.polynomials(self.n_size(0), a, b, native_position) @CachedMethod - def transform_plan(self, grid_size, k): + def transform_plan(self, dist, grid_size, k): """Build transform plan.""" a = self.alpha[0] + k b = self.alpha[1] + k @@ -3857,12 +3858,12 @@ def forward_transform_radius(self, field, axis, gdata, cdata): if self.k > 0: gdata *= self.radial_transform_factor(field.scales[axis], data_axis, -self.k) # Apply recombinations - self.forward_regularity_recombination(field.tensorsig, axis, gdata) + self.forward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps(field.dist)) temp = np.copy(gdata) # Perform radial transforms component-by-component R = self.regularity_classes(field.tensorsig) for regindex, regtotal in np.ndenumerate(R): - plan = self.transform_plan(grid_size, self.k) + plan = self.transform_plan(field.dist, grid_size, self.k) plan.forward(temp[regindex], cdata[regindex], axis) def backward_transform_radius(self, field, axis, cdata, gdata): @@ -3873,7 +3874,7 @@ def backward_transform_radius(self, field, axis, cdata, gdata): # HACK -- don't want to make a new array every transform temp = np.zeros_like(gdata) for i, r in np.ndenumerate(R): - plan = self.transform_plan(grid_size, self.k) + plan = self.transform_plan(field.dist, grid_size, self.k) plan.backward(cdata[i], temp[i], axis) np.copyto(gdata, temp) # Regularity recombination @@ -4056,19 +4057,19 @@ def interpolation(self, ell, regtotal, position): return dedalus_sphere.zernike.polynomials(3, self.n_size(ell), self.alpha + self.k, ell + regtotal, native_z) @CachedMethod - def transform_plan(self, grid_shape, regindex, axis, regtotal, k, alpha): + def transform_plan(self, dist, grid_shape, regindex, axis, regtotal, k, alpha): """Build transform plan.""" - return self.transforms[self.radius_library](grid_shape, self.Nmax+1, axis, self.ell_maps, regindex, regtotal, k, alpha) + return self.transforms[self.radius_library](grid_shape, self.Nmax+1, axis, self.ell_maps(dist), regindex, regtotal, k, alpha) def forward_transform_radius(self, field, axis, gdata, cdata): # Apply recombination - self.forward_regularity_recombination(field.tensorsig, axis, gdata) + self.forward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps(field.dist)) # Perform radial transforms component-by-component R = self.regularity_classes(field.tensorsig) temp = np.copy(gdata) for regindex, regtotal in np.ndenumerate(R): grid_shape = gdata[regindex].shape - plan = self.transform_plan(grid_shape, regindex, axis, regtotal, self.k, self.alpha) + plan = self.transform_plan(field.dist, grid_shape, regindex, axis, regtotal, self.k, self.alpha) plan.forward(temp[regindex], cdata[regindex], axis) def backward_transform_radius(self, field, axis, cdata, gdata): @@ -4078,7 +4079,7 @@ def backward_transform_radius(self, field, axis, cdata, gdata): temp = np.zeros_like(gdata) for regindex, regtotal in np.ndenumerate(R): grid_shape = gdata[regindex].shape - plan = self.transform_plan(grid_shape, regindex, axis, regtotal, self.k, self.alpha) + plan = self.transform_plan(field.dist, grid_shape, regindex, axis, regtotal, self.k, self.alpha) plan.backward(cdata[regindex], temp[regindex], axis) np.copyto(gdata, temp) # Apply recombinations @@ -4254,29 +4255,29 @@ def global_grids(self, scales=None): self.global_grid_colatitude(scales[1]), self.global_grid_radius(scales[2])) - def local_grids(self, scales=None): + def local_grids(self, dist, scales=None): if scales == None: scales = (1,1,1) - return (self.local_grid_azimuth(scales[0]), - self.local_grid_colatitude(scales[1]), - self.local_grid_radius(scales[2])) + return (self.local_grid_azimuth(dist, scales[0]), + self.local_grid_colatitude(dist, scales[1]), + self.local_grid_radius(dist, scales[2])) @CachedMethod - def global_grid_spacing(self, axis, scales=None): + def global_grid_spacing(self, dist, axis, scales=None): """Global grids spacings.""" if scales is None: scales = (1,1,1) - grid = self.global_grids(scales=scales)[axis] + grid = self.global_grids(dist, scales=scales)[axis] if grid.size == 1: return np.array([np.inf,], dtype=grid.dtype) else: return np.gradient(grid, axis=axis, edge_order=2) @CachedMethod - def local_grid_spacing(self, axis, scales=None): + def local_grid_spacing(self, dist, axis, scales=None): """Local grids spacings.""" - global_spacing = self.global_grid_spacing(axis, scales=scales) if scales is None: scales = (1,1,1) - local_elements = self.dist.grid_layout.local_elements(self.domain, scales=scales[axis])[axis] - return reshape_vector(np.ravel(global_spacing)[local_elements], dim=self.dist.dim, axis=axis) + global_spacing = self.global_grid_spacing(dist, axis, scales=scales) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] + return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) def local_elements(self): raise NotImplementedError() @@ -4532,13 +4533,13 @@ def forward_transform_radius(self, field, axis, gdata, cdata): if self.k > 0: gdata *= radial_basis.radial_transform_factor(field.scales[axis], data_axis, -self.k) # Apply regularity recombination using 3D ell map - radial_basis.forward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps) + radial_basis.forward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps(field.dist)) # Perform radial transforms component-by-component R = radial_basis.regularity_classes(field.tensorsig) # HACK -- don't want to make a new array every transform temp = np.copy(cdata) for regindex, regtotal in np.ndenumerate(R): - plan = radial_basis.transform_plan(grid_size, self.k) + plan = radial_basis.transform_plan(field.dist, grid_size, self.k) plan.forward(gdata[regindex], temp[regindex], axis) np.copyto(cdata, temp) @@ -4551,17 +4552,17 @@ def backward_transform_radius(self, field, axis, cdata, gdata): # HACK -- don't want to make a new array every transform temp = np.copy(gdata) for i, r in np.ndenumerate(R): - plan = radial_basis.transform_plan(grid_size, self.k) + plan = radial_basis.transform_plan(field.dist, grid_size, self.k) plan.backward(cdata[i], temp[i], axis) np.copyto(gdata, temp) # Apply regularity recombinations using 3D ell map - radial_basis.backward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps) + radial_basis.backward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps(field.dist)) # Multiply by radial factor if self.k > 0: gdata *= radial_basis.radial_transform_factor(field.scales[axis], data_axis, self.k) def build_ncc_matrix(self, product, subproblem, ncc_cutoff, max_ncc_terms): - axis = self.last_axis + axis = dist.last_axis(self) ncc_basis = product.ncc.domain.get_basis(axis) if ncc_basis is None: # NCC is constant @@ -4577,7 +4578,7 @@ def build_ncc_matrix(self, product, subproblem, ncc_cutoff, max_ncc_terms): raise NotImplementedError("Azimuthal NCCs not yet supported.") def build_meridional_ncc_matrix(self, product, subproblem, ncc_cutoff, max_ncc_terms): - axis = self.last_axis + axis = product.dist.last_axis(self) ncc_basis = product.ncc.domain.get_basis(axis) arg_basis = product.operand.domain.get_basis(axis) out_basis = product.domain.get_basis(axis) @@ -4592,7 +4593,7 @@ def build_meridional_ncc_matrix(self, product, subproblem, ncc_cutoff, max_ncc_t subcoeff_norms = np.max(subcoeff_norms, axis=0) # Convert NCC coeffs to spin components spin_coeffs = coeffs.copy() - self.radial_basis.backward_regularity_recombination(product.ncc.tensorsig, axis, spin_coeffs, ell_maps=ncc_basis.ell_maps) + self.radial_basis.backward_regularity_recombination(product.ncc.tensorsig, axis, spin_coeffs, ell_maps=ncc_basis.ell_maps(product.dist)) # Build deferred regcomp S2 NCC S2_basis = self.S2_basis() def reg_NCC_matrix(radial_index): @@ -4742,17 +4743,17 @@ def _new_k(self, k): return self.clone_with(k=k) @CachedMethod - def transform_plan(self, grid_shape, regindex, axis, regtotal, k, alpha): + def transform_plan(self, dist, grid_shape, regindex, axis, regtotal, k, alpha): """Build transform plan.""" radius_library = self.radial_basis.radius_library Nmax = self.radial_basis.Nmax - return self.transforms[radius_library](grid_shape, Nmax+1, axis, self.ell_maps, regindex, regtotal, k, alpha) + return self.transforms[radius_library](grid_shape, Nmax+1, axis, self.ell_maps(dist), regindex, regtotal, k, alpha) def forward_transform_radius(self, field, axis, gdata, cdata): # apply transforms based off the 3D basis' local_l radial_basis = self.radial_basis # Apply regularity recombination - radial_basis.forward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps) + radial_basis.forward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps(field.dist)) # Perform radial transforms component-by-component R = radial_basis.regularity_classes(field.tensorsig) # HACK -- don't want to make a new array every transform @@ -4760,7 +4761,7 @@ def forward_transform_radius(self, field, axis, gdata, cdata): temp = np.zeros_like(cdata) for regindex, regtotal in np.ndenumerate(R): grid_shape = gdata[regindex].shape - plan = self.transform_plan(grid_shape, regindex, axis, regtotal, radial_basis.k, radial_basis.alpha) + plan = self.transform_plan(field.dist, grid_shape, regindex, axis, regtotal, radial_basis.k, radial_basis.alpha) plan.forward(gdata[regindex], temp[regindex], axis) np.copyto(cdata, temp) @@ -4774,11 +4775,11 @@ def backward_transform_radius(self, field, axis, cdata, gdata): temp = np.zeros_like(gdata) for regindex, regtotal in np.ndenumerate(R): grid_shape = gdata[regindex].shape - plan = self.transform_plan(grid_shape, regindex, axis, regtotal, radial_basis.k, radial_basis.alpha) + plan = self.transform_plan(field.dist, grid_shape, regindex, axis, regtotal, radial_basis.k, radial_basis.alpha) plan.backward(cdata[regindex], temp[regindex], axis) np.copyto(gdata, temp) # Apply regularity recombinations - radial_basis.backward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps) + radial_basis.backward_regularity_recombination(field.tensorsig, axis, gdata, ell_maps=self.ell_maps(field.dist)) def reduced_view(data, axis, dim): @@ -4915,19 +4916,19 @@ def radial_matrix(self, regindex_in, regindex_out, ell): # def subproblem_matrix(self, subproblem): # operand = self.args[0] # radial_basis = self.radial_basis -# ell = subproblem.group[self.last_axis - 1] +# ell = subproblem.group[dist.last_axis(self) - 1] # # Build identity matrices for each axis # subshape_in = subproblem.coeff_shape(self.operand.domain) # subshape_out = subproblem.coeff_shape(self.domain) # # Substitute factor for radial axis # factors = [sparse.eye(m, n, format='csr') for m, n in zip(subshape_out, subshape_in)] -# factors[self.last_axis][:] = 0 +# factors[dist.last_axis(self)][:] = 0 # if ell == 0: -# factors[self.last_axis][0, 0] = 1 +# factors[dist.last_axis(self)][0, 0] = 1 # return reduce(sparse.kron, factors, 1).tocsr() -class PolarInterpolate(operators.Interpolate, operators.PolarMOperator): +class PolarRadialInterpolate(operators.Interpolate, operators.PolarMOperator): basis_type = PolarBasis basis_subaxis = 1 @@ -4948,7 +4949,7 @@ def __init__(self, operand, coord, position, out=None): operators.Interpolate.__init__(self, operand, coord, position, out=None) def subproblem_matrix(self, subproblem): - m = subproblem.group[self.last_axis - 1] + m = subproblem.group[dist.last_axis(self) - 1] matrix = super().subproblem_matrix(subproblem) radial_basis = self.input_basis if self.tensorsig != (): @@ -4962,17 +4963,17 @@ def operate(self, out): input_basis = self.input_basis output_basis = self.output_basis radial_basis = self.input_basis - axis = self.last_axis + axis = dist.last_axis(self) # Set output layout out.preset_layout(operand.layout) # Apply operator S = radial_basis.spin_weights(operand.tensorsig) - slices_in = [slice(None) for i in range(input_basis.dist.dim)] - slices_out = [slice(None) for i in range(input_basis.dist.dim)] + slices_in = [slice(None) for i in range(self.dist.dim)] + slices_out = [slice(None) for i in range(self.dist.dim)] for spinindex, spintotal in np.ndenumerate(S): comp_in = operand.data[spinindex] comp_out = out.data[spinindex] - for m, mg_slice, mc_slice, n_slice in input_basis.m_maps: + for m, mg_slice, mc_slice, n_slice in input_basis.m_maps(self.dist): slices_in[axis-1] = slices_out[axis-1] = mc_slice slices_in[axis] = n_slice vec_in = comp_in[tuple(slices_in)] @@ -4980,7 +4981,7 @@ def operate(self, out): A = self.radial_matrix(spinindex, spinindex, m) apply_matrix(A, vec_in, axis=axis, out=vec_out) temp = np.copy(out.data) - radial_basis.backward_spin_recombination(operand.tensorsig, temp, out.data) + radial_basis.backward_spin_recombination(operand.tensorsig, axis, temp, out.data) def radial_matrix(self, spinindex_in, spinindex_out, m): position = self.position @@ -5012,7 +5013,7 @@ def subproblem_matrix(self, subproblem): radial_basis = self.output_basis ## CHANGED RELATIVE TO POLARMOPERATOR S_in = radial_basis.spin_weights(operand.tensorsig) S_out = radial_basis.spin_weights(self.tensorsig) # Should this use output_basis? - m = subproblem.group[self.last_axis - 1] + m = subproblem.group[dist.last_axis(self) - 1] # Loop over components submatrices = [] for spinindex_out, spintotal_out in np.ndenumerate(S_out): @@ -5024,7 +5025,7 @@ def subproblem_matrix(self, subproblem): if spinindex_out in self.spinindex_out(spinindex_in): # Substitute factor for radial axis factors = [sparse.eye(i, j, format='csr') for i, j in zip(subshape_out, subshape_in)] - factors[self.last_axis] = self.radial_matrix(spinindex_in, spinindex_out, m) + factors[dist.last_axis(self)] = self.radial_matrix(spinindex_in, spinindex_out, m) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5062,7 +5063,7 @@ def subproblem_matrix(self, subproblem): radial_basis = self.output_basis ## CHANGED RELATIVE TO POLARMOPERATOR S_in = radial_basis.spin_weights(operand.tensorsig) S_out = radial_basis.spin_weights(self.tensorsig) # Should this use output_basis? - m = subproblem.group[self.last_axis - 1] + m = subproblem.group[dist.last_axis(self) - 1] # Loop over components submatrices = [] for spinindex_out, spintotal_out in np.ndenumerate(S_out): @@ -5074,7 +5075,7 @@ def subproblem_matrix(self, subproblem): if spinindex_out in self.spinindex_out(spinindex_in): # Substitute factor for radial axis factors = [sparse.eye(i, j, format='csr') for i, j in zip(subshape_out, subshape_in)] - factors[self.last_axis] = self.radial_matrix(spinindex_in, spinindex_out, m) + factors[dist.last_axis(self)] = self.radial_matrix(spinindex_in, spinindex_out, m) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5116,7 +5117,7 @@ def subproblem_matrix(self, subproblem): radial_basis = self.output_basis.radial_basis ## CHANGED RELATIVE TO POLARMOPERATOR R_in = radial_basis.regularity_classes(operand.tensorsig) R_out = radial_basis.regularity_classes(self.tensorsig) # Should this use output_basis? - ell = subproblem.group[self.last_axis - 1] + ell = subproblem.group[self.dist.last_axis(self) - 1] # Loop over components submatrices = [] for regindex_out, regtotal_out in np.ndenumerate(R_out): @@ -5129,7 +5130,7 @@ def subproblem_matrix(self, subproblem): if (regindex_out in self.regindex_out(regindex_in)) and radial_basis.regularity_allowed(ell, regindex_in) and radial_basis.regularity_allowed(ell, regindex_out): # Substitute factor for radial axis factors = [sparse.eye(m, n, format='csr') for m, n in zip(subshape_out, subshape_in)] - factors[self.last_axis] = self.radial_matrix(regindex_in, regindex_out, ell) + factors[dist.last_axis(self)] = self.radial_matrix(regindex_in, regindex_out, ell) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5170,7 +5171,7 @@ def subproblem_matrix(self, subproblem): radial_basis = self.output_basis R_in = radial_basis.regularity_classes(operand.tensorsig) R_out = radial_basis.regularity_classes(self.tensorsig) # Should this use output_basis? - ell = subproblem.group[self.last_axis - 1] + ell = subproblem.group[dist.last_axis(self) - 1] # Loop over components submatrices = [] for regindex_out, regtotal_out in np.ndenumerate(R_out): @@ -5183,7 +5184,7 @@ def subproblem_matrix(self, subproblem): if (regindex_out in self.regindex_out(regindex_in)) and radial_basis.regularity_allowed(ell, regindex_in) and radial_basis.regularity_allowed(ell, regindex_out): # Substitute factor for radial axis factors = [sparse.eye(m, n, format='csr') for m, n in zip(subshape_out, subshape_in)] - factors[self.last_axis] = self.radial_matrix(regindex_in, regindex_out, ell) + factors[dist.last_axis(self)] = self.radial_matrix(regindex_in, regindex_out, ell) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5226,8 +5227,8 @@ def regindex_out(self, regindex_in): def subproblem_matrix(self, subproblem): matrix = super().subproblem_matrix(subproblem) # Get relevant Qs - m = subproblem.group[self.last_axis - 2] - ell = subproblem.group[self.last_axis - 1] + m = subproblem.group[dist.last_axis(self) - 2] + ell = subproblem.group[dist.last_axis(self) - 1] if ell is None: ell_list = np.arange(abs(m), self.input_basis.Lmax + 1) if self.input_basis.ell_reversed[m]: @@ -5304,7 +5305,7 @@ def operate(self, out): out.preset_layout(layout) out.data[:] = 0 # Apply operator - azimuth_axis = self.input_basis.first_axis + azimuth_axis = self.dist.first_axis(self.input_basis) domain_in = self.operand.domain domain_out = self.domain groups_in = layout.local_group_arrays(domain_in, scales=domain_in.dealias) @@ -5333,7 +5334,7 @@ def operate(self, out): out.preset_layout(layout) out.data[:] = 0 # Apply operator - azimuth_axis = self.input_basis.first_axis + azimuth_axis = self.dist.first_axis(self.input_basis) domain_in = self.operand.domain domain_out = self.domain groups_in = layout.local_group_arrays(domain_in, scales=domain_in.dealias) @@ -5435,15 +5436,15 @@ def operate(self, out): """Perform operation.""" operand = self.args[0] basis = self.input_basis - axis = self.radial_basis.last_axis + axis = self.dist.last_axis(self.radial_basis) # Set output layout out.preset_layout(operand.layout) out.data[:] = 0 # Apply operator - for m, mg_slice, mc_slice, n_slice in basis.m_maps: + for m, mg_slice, mc_slice, n_slice in basis.m_maps(self.dist): if m == 0: # Modify mc_slice to ignore sin component - slices = [slice(None) for i in range(basis.dist.dim)] + slices = [slice(None) for i in range(self.dist.dim)] slices[axis-1] = slice(mc_slice.start, mc_slice.start+1) slices[axis] = n_slice slices = tuple(slices) @@ -5523,14 +5524,14 @@ def operate(self, out): """Perform operation.""" operand = self.args[0] basis = self.input_basis - axis = basis.radial_basis.radial_axis + axis = self.dist.last_axis(basis.radial_basis) # Set output layout out.preset_layout(operand.layout) out.data[:] = 0 # Apply operator - for ell, m_ind, ell_ind in basis.ell_maps: + for ell, m_ind, ell_ind in basis.ell_maps(self.dist): if ell == 0: - slices = [slice(None) for i in range(basis.dist.dim)] + slices = [slice(None) for i in range(self.dist.dim)] # Modify m slice to ignore sin component slices[axis-2] = slice(m_ind.start, m_ind.start+1) slices[axis-1] = ell_ind @@ -5601,7 +5602,7 @@ def _output_basis(input_basis, position): def check_conditions(self): """Check that arguments are in a proper layout.""" arg0 = self.args[0] - azimuth_axis = self.first_axis + azimuth_axis = dist.first_axis(self) # Require grid space and locality along azimuthal axis is_grid = arg0.layout.grid_space[azimuth_axis] is_local = arg0.layout.local[azimuth_axis] @@ -5610,7 +5611,7 @@ def check_conditions(self): def enforce_conditions(self): """Require arguments to be in a proper layout.""" arg0 = self.args[0] - azimuth_axis = self.first_axis + azimuth_axis = dist.first_axis(self) # Require grid space and locality along azimuthal axis arg0.require_grid_space(azimuth_axis) arg0.require_local(azimuth_axis) @@ -5639,9 +5640,9 @@ def operate(self, out): # Set output layout out.preset_layout(layout) # Set output lock - out.lock_axis_to_grid(self.first_axis) + out.lock_axis_to_grid(dist.first_axis(self)) # Apply matrix - data_axis = self.first_axis + len(arg.tensorsig) + data_axis = dist.first_axis(self) + len(arg.tensorsig) apply_matrix(self.interpolation_vector(), arg.data, data_axis, out=out.data) @@ -5669,7 +5670,7 @@ def _output_basis(input_basis, position): def check_conditions(self): """Check that arguments are in a proper layout.""" arg0 = self.args[0] - azimuth_axis = self.first_axis + azimuth_axis = dist.first_axis(self) colat_axis = azimuth_axis + 1 # Require azimuth coeff, colat grid, colat, local az_coeff = not arg0.layout.grid_space[azimuth_axis] @@ -5680,7 +5681,7 @@ def check_conditions(self): def enforce_conditions(self): """Require arguments to be in a proper layout.""" arg0 = self.args[0] - azimuth_axis = self.first_axis + azimuth_axis = dist.first_axis(self) colat_axis = azimuth_axis + 1 # Require azimuth coeff, colat grid, colat, local arg0.require_coeff_space(azimuth_axis) @@ -5689,23 +5690,23 @@ def enforce_conditions(self): def interpolation_vectors(self, Ntheta, s): # Wrap class-based caching - return self._interpolation_vectors(self.sphere_basis, Ntheta, s, self.position) + return self._interpolation_vectors(self.dist, self.sphere_basis, Ntheta, s, self.position) @staticmethod @CachedMethod - def _interpolation_vectors(sphere_basis, Ntheta, s, theta): + def _interpolation_vectors(dist, sphere_basis, Ntheta, s, theta): interp_vectors = {} z = np.cos(theta) - colat_transform = sphere_basis.dist.get_transform_object(sphere_basis.first_axis+1) + colat_transform = dist.get_transform_object(dist.first_axis(sphere_basis)+1) layout = colat_transform.layout1 - coupling = [True] * sphere_basis.dist.dim - coupling[sphere_basis.first_axis] = False + coupling = [True] * dist.dim + coupling[dist.first_axis(sphere_basis)] = False coupling = tuple(coupling) - domain = sphere_basis.domain + domain = sphere_basis.domain(dist) m_groupsets = layout.local_groupsets(coupling, domain, scales=domain.dealias, broadcast=True) - forward = sphere_basis.transform_plan(Ntheta, s) + forward = sphere_basis.transform_plan(dist, Ntheta, s) for group in m_groupsets: - m = group[sphere_basis.first_axis] + m = group[dist.first_axis(sphere_basis)] if m <= sphere_basis.Lmax: Lmin = max(abs(m), abs(s)) interp_m = dedalus_sphere.sphere.harmonics(sphere_basis.Lmax, m, s, z)[None, :] @@ -5720,7 +5721,7 @@ def operate(self, out): arg = self.args[0] basis = self.sphere_basis layout = arg.layout - azimuth_axis = self.first_axis + azimuth_axis = dist.first_axis(self) colat_axis = azimuth_axis + 1 Ntheta = arg.data.shape[len(arg.tensorsig) + colat_axis] # Set output layout @@ -5730,7 +5731,7 @@ def operate(self, out): # Forward spin recombination arg_temp = np.zeros_like(arg.data) out_temp = np.zeros_like(out.data) - basis.forward_spin_recombination(arg.tensorsig, arg.data, arg_temp) + basis.forward_spin_recombination(arg.tensorsig, colat_axis, arg.data, arg_temp) # Loop over spin components S = basis.spin_weights(arg.tensorsig) for i, s in np.ndenumerate(S): @@ -5738,13 +5739,13 @@ def operate(self, out): out_s = out_temp[i] interp_vectors = self.interpolation_vectors(Ntheta, s) # Loop over m - for m, mg_slice, _, _ in basis.m_maps: + for m, mg_slice, _, _ in basis.m_maps(self.dist): mg_slice = axindex(azimuth_axis, mg_slice) arg_sm = arg_s[mg_slice] out_sm = out_s[mg_slice] apply_matrix(interp_vectors[m], arg_sm, axis=colat_axis, out=out_sm) # Backward spin recombination - basis.backward_spin_recombination(out.tensorsig, out_temp, out.data) + basis.backward_spin_recombination(out.tensorsig, colat_axis, out_temp, out.data) class BallRadialInterpolate(operators.Interpolate, operators.SphericalEllOperator): @@ -5775,7 +5776,7 @@ def __init__(self, operand, coord, position, out=None): self.radial_basis = self.input_basis def subproblem_matrix(self, subproblem): - ell = subproblem.group[self.last_axis - 1] + ell = subproblem.group[dist.last_axis(self) - 1] matrix = super().subproblem_matrix(subproblem) radial_basis = self.radial_basis if self.tensorsig != (): @@ -5792,17 +5793,17 @@ def operate(self, out): input_basis = self.input_basis output_basis = self.output_basis radial_basis = self.radial_basis - axis = radial_basis.radial_axis + axis = self.dist.last_axis(radial_basis) # Set output layout out.preset_layout(operand.layout) # Apply operator R = radial_basis.regularity_classes(operand.tensorsig) - slices_in = [slice(None) for i in range(input_basis.dist.dim)] - slices_out = [slice(None) for i in range(input_basis.dist.dim)] + slices_in = [slice(None) for i in range(self.dist.dim)] + slices_out = [slice(None) for i in range(self.dist.dim)] for regindex, regtotal in np.ndenumerate(R): comp_in = operand.data[regindex] comp_out = out.data[regindex] - for ell, m_ind, ell_ind in input_basis.ell_maps: + for ell, m_ind, ell_ind in input_basis.ell_maps(self.dist): allowed = radial_basis.regularity_allowed(ell, regindex) if allowed: slices_in[axis-2] = slices_out[axis-2] = m_ind @@ -5812,7 +5813,7 @@ def operate(self, out): vec_out = comp_out[tuple(slices_out)] A = self.radial_matrix(regindex, regindex, ell) apply_matrix(A, vec_in, axis=axis, out=vec_out) - radial_basis.backward_regularity_recombination(operand.tensorsig, self.basis_subaxis, out.data, ell_maps=input_basis.ell_maps) + radial_basis.backward_regularity_recombination(operand.tensorsig, self.basis_subaxis, out.data, ell_maps=input_basis.ell_maps(self.dist)) def radial_matrix(self, regindex_in, regindex_out, ell): position = self.position @@ -5856,8 +5857,8 @@ def _output_basis(input_basis, position): def subproblem_matrix(self, subproblem): matrix = super().subproblem_matrix(subproblem) # Get relevant Qs - m = subproblem.group[self.last_axis - 2] - ell = subproblem.group[self.last_axis - 1] + m = subproblem.group[dist.last_axis(self) - 2] + ell = subproblem.group[dist.last_axis(self) - 1] if ell is None: ell_list = np.arange(abs(m), self.input_basis.Lmax + 1) if self.input_basis.ell_reversed[m]: @@ -5881,7 +5882,7 @@ def operate(self, out): radial_basis = self.radial_basis input_basis = self.input_basis # Q matrix - radial_basis.backward_regularity_recombination(operand.tensorsig, self.basis_subaxis, out.data, ell_maps=input_basis.ell_maps) + radial_basis.backward_regularity_recombination(operand.tensorsig, self.basis_subaxis, out.data, ell_maps=input_basis.ell_maps(self.dist)) def radial_matrix(self, regindex_in, regindex_out, ell): position = self.position @@ -5925,8 +5926,8 @@ def subproblem_matrix(self, subproblem): if self.dtype == np.float64: matrix = sparse.kron(matrix, sparse.eye(2)) # Block over ell - m = subproblem.group[self.input_basis.last_axis - 1] - ell = subproblem.group[self.input_basis.last_axis] + m = subproblem.group[self.dist.last_axis(self.input_basis) - 1] + ell = subproblem.group[self.dist.last_axis(self.input_basis)] if ell is None: n_ell = self.input_basis.Lmax + 1 - np.abs(m) matrix = sparse.kron(matrix, sparse.eye(n_ell)) @@ -5964,8 +5965,8 @@ def subproblem_matrix(self, subproblem): if self.dtype == np.float64: matrix = sparse.kron(matrix, sparse.eye(2)) # Block over ell - m = subproblem.group[self.input_basis.last_axis - 1] - ell = subproblem.group[self.input_basis.last_axis] + m = subproblem.group[self.dist.last_axis(self.input_basis) - 1] + ell = subproblem.group[self.dist.last_axis(self.input_basis)] if ell is None: n_ell = self.input_basis.Lmax + 1 - np.abs(m) matrix = sparse.kron(matrix, sparse.eye(n_ell)) diff --git a/dedalus/core/distributor.py b/dedalus/core/distributor.py index 1c4ab7aa..b3902171 100644 --- a/dedalus/core/distributor.py +++ b/dedalus/core/distributor.py @@ -10,6 +10,7 @@ from math import prod from .coords import CoordinateSystem +from ..tools.array import reshape_vector from ..tools.cache import CachedMethod, CachedAttribute from ..tools.config import config from ..tools.general import OrderedSet @@ -198,7 +199,7 @@ def get_axis(self, coord): return self.coords.index(coord) def get_basis_axis(self, basis): - return self.get_axis(basis.coordsystem.coords[0]) + return self.get_axis(basis.coordsys.coords[0]) def first_axis(self, basis): return self.get_basis_axis(basis) @@ -241,6 +242,34 @@ def local_grid(self, basis, scale=None): else: raise ValueError("Use `local_grids` for multidimensional bases.") + # def global_grids(self, *bases, scales=None): + # """Global grids.""" + # grids = [] + # scales = self.remedy_scales(scales) + # for basis in bases: + # basis_axis = self.get_basis_axis(basis) + # basis_scales = scales[basis_axis:basis_axis+basis.dim] + # global_grids = basis.global_grids(scales=basis_scales) + # for subaxis in range(basis.dim): + # axis = basis_axis + subaxis + # grids.append(reshape_vector(global_grids[subaxis], dim=self.dim, axis=axis)) + # return tuple(grids) + + # def local_grids(self, *bases, scales=None): + # """Local grid.""" + # grids = [] + # scales = self.remedy_scales(scales) + # for basis in bases: + # basis_axis = self.get_basis_axis(basis) + # basis_scales = scales[basis_axis:basis_axis+basis.dim] + # local_elements = self.grid_layout.local_elements(basis.domain(self), scales=scales) + # global_grids = basis.global_grids(scales=basis_scales) + # for subaxis in range(basis.dim): + # axis = basis_axis + subaxis + # local_grid = global_grids[subaxis][local_elements[axis]] + # grids.append(reshape_vector(local_grid, dim=self.dim, axis=axis)) + # return tuple(grids) + def local_grids(self, *bases, scales=None): # TODO: remove from bases and do it all here? return sum((basis.local_grids(self, scales=scales) for basis in bases), ()) diff --git a/dedalus/core/domain.py b/dedalus/core/domain.py index 219ba747..56bb6d13 100644 --- a/dedalus/core/domain.py +++ b/dedalus/core/domain.py @@ -102,10 +102,11 @@ def get_basis(self, coords): return self.full_bases[axis] def get_basis_subaxis(self, coord): - axis = coord.axis + axis = self.dist.get_axis(coord) for basis in self.bases: - if (axis >= basis.axis) and (axis <= basis.axis + basis.dim): - return axis - basis.axis + basis_axis = self.dist.get_basis_axis(basis) + if (axis >= basis_axis) and (axis <= basis_axis + basis.dim): + return axis - basis_axis def get_coord(self, name): for basis in self.bases: diff --git a/dedalus/core/evaluator.py b/dedalus/core/evaluator.py index 4a37ba2d..2628f5a7 100644 --- a/dedalus/core/evaluator.py +++ b/dedalus/core/evaluator.py @@ -553,7 +553,7 @@ def setup_file(self, file): subaxis = axis - self.dist.get_basis_axis(basis) if layout.grid_space[axis]: sn = basis.coordsystem.coords[subaxis].name - data = basis.global_grids(scales)[subaxis].ravel() + data = basis.global_grids(self.dist, scales)[subaxis].ravel() else: sn = 'k' + basis.coordsystem.coords[subaxis].name data = layout.global_group_arrays(op.domain, scales)[subaxis] diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index 2e6ccc33..6a4b78de 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -1989,7 +1989,7 @@ def operate(self, out): out.data[:] = np.transpose(operand.data, self.new_axis_order) else: radial_basis = self.radial_basis - ell_maps = self.input_basis.ell_maps + ell_maps = self.input_basis.ell_maps(self.dist) # Copy to output for in-place regularity recombination copyto(out.data, operand.data) out.data[:] = operand.data @@ -2492,6 +2492,7 @@ def subproblem_matrix(self, subproblem): m_dep = self.subaxis_dependence[0] l_dep = self.subaxis_dependence[1] # Loop over components + m_axis = self.dist.first_axis(basis) submatrices = [] for spinindex_out, spintotal_out in np.ndenumerate(S_out): submatrix_row = [] @@ -2507,8 +2508,8 @@ def subproblem_matrix(self, subproblem): elif l_coupled or (not m_dep): if l_coupled: local_groups = self.dist.coeff_layout.local_group_arrays(domain, scales=1) - local_m = local_groups[basis.first_axis] - local_ell = local_groups[basis.first_axis+1] + local_m = local_groups[m_axis] + local_ell = local_groups[m_axis+1] ell_list = local_ell[local_m == m].ravel() elif not m_dep: ell_list = [l] @@ -2570,7 +2571,7 @@ def operate(self, out): out.data[:] = 0 # Apply operator S_in = input_basis.spin_weights(operand.tensorsig) - slices = [slice(None) for i in range(input_basis.dist.dim)] + slices = [slice(None) for i in range(self.dist.dim)] for spinindex_in, spintotal_in in np.ndenumerate(S_in): comp_in = operand.data[spinindex_in] reduced_in = reduced_view_3_ravel(comp_in, axis, dim) @@ -2761,8 +2762,8 @@ def __init__(self, operand, coordsys, ell_r_func, out=None): self.operand = operand self.input_basis = operand.domain.get_basis(coordsys) self.output_basis = self.input_basis - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.first_axis(self.input_basis) + self.last_axis = self.dist.last_axis(self.input_basis) # FutureField requirements self.domain = operand.domain self.tensorsig = operand.tensorsig @@ -2792,8 +2793,8 @@ def __init__(self, operand, coordsys): # SpectralOperator requirements self.input_basis = input_basis self.output_basis = self._output_basis(self.input_basis) - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.dist.first_axis(self.input_basis) + self.last_axis = self.dist.last_axis(self.input_basis) # LinearOperator requirements self.operand = operand @@ -2804,18 +2805,18 @@ def operate(self, out): basis = self.output_basis else: basis = self.input_basis - axis = basis.first_axis + 1 + axis = self.last_axis # Set output layout out.preset_layout(operand.layout) out.data[:] = 0 # Apply operator S_in = basis.spin_weights(operand.tensorsig) - slices = [slice(None) for i in range(basis.dist.dim)] + slices = [slice(None) for i in range(self.dist.dim)] for spinindex_in, spintotal_in in np.ndenumerate(S_in): for spinindex_out in self.spinindex_out(spinindex_in): comp_in = operand.data[spinindex_in] comp_out = out.data[spinindex_out] - for m, mg_slice, mc_slice, n_slice in basis.m_maps: + for m, mg_slice, mc_slice, n_slice in basis.m_maps(self.dist): slices[axis-1] = mc_slice slices[axis] = n_slice vec_in = comp_in[tuple(slices)] @@ -2985,7 +2986,7 @@ class SphericalEllOperator(SpectralOperator): def __init__(self, operand, coordsys): self.coordsys = coordsys - self.radius_axis = coordsys.coords[2].axis + self.radius_axis = operand.dist.get_axis(coordsys) + 2 input_basis = operand.domain.get_basis(coordsys) if input_basis is None: input_basis = operand.domain.get_basis(coordsys.radius) @@ -2993,8 +2994,8 @@ def __init__(self, operand, coordsys): # SpectralOperator requirements self.input_basis = input_basis self.output_basis = self._output_basis(self.input_basis) - self.first_axis = self.input_basis.first_axis - self.last_axis = self.input_basis.last_axis + self.first_axis = self.radius_axis - 2 + self.last_axis = self.radius_axis # LinearOperator requirements self.operand = operand @@ -3014,19 +3015,19 @@ def operate(self, out): else: basis = self.input_basis radial_basis = self.radial_basis - axis = radial_basis.radial_axis + axis = self.dist.last_axis(radial_basis) # Set output layout out.preset_layout(operand.layout) out.data[:] = 0 # Apply operator R_in = radial_basis.regularity_classes(operand.tensorsig) - slices = [slice(None) for i in range(radial_basis.dist.dim)] + slices = [slice(None) for i in range(self.dist.dim)] for regindex_in, regtotal_in in np.ndenumerate(R_in): for regindex_out in self.regindex_out(regindex_in): comp_in = operand.data[regindex_in] comp_out = out.data[regindex_out] # Should reorder to make ell loop first, check forbidden reg, remove reg from radial_vector_3 - for ell, m_ind, ell_ind in basis.ell_maps: + for ell, m_ind, ell_ind in basis.ell_maps(self.dist): allowed_in = radial_basis.regularity_allowed(ell, regindex_in) allowed_out = radial_basis.regularity_allowed(ell, regindex_out) if allowed_in and allowed_out: @@ -3625,19 +3626,19 @@ def operate(self, out): operand = self.args[0] input_basis = self.input_basis radial_basis = self.radial_basis - axis = radial_basis.radial_axis + axis = self.dist.last_axis(self.radial_basis) # Set output layout out.preset_layout(operand.layout) out.data.fill(0) # Apply operator R_in = radial_basis.regularity_classes(operand.tensorsig) - slices = [slice(None) for i in range(input_basis.dist.dim)] + slices = [slice(None) for i in range(self.dist.dim)] for regindex_in, regtotal_in in np.ndenumerate(R_in): for regindex_out in self.regindex_out(regindex_in): comp_in = operand.data[regindex_in] comp_out = out.data[regindex_out] # Should reorder to make ell loop first, check forbidden reg, remove reg from radial_vector_3 - for ell, m_ind, ell_ind in input_basis.ell_maps: + for ell, m_ind, ell_ind in input_basis.ell_maps(self.dist): allowed_in = radial_basis.regularity_allowed(ell, regindex_in) allowed_out = radial_basis.regularity_allowed(ell, regindex_out) if allowed_in and allowed_out: @@ -3769,13 +3770,13 @@ def operate(self, out): out.data[:] = 0 # Apply operator S_in = input_basis.spin_weights(operand.tensorsig) - slices = [slice(None) for i in range(input_basis.dist.dim)] + slices = [slice(None) for i in range(self.dist.dim)] for spinindex_in, spintotal_in in np.ndenumerate(S_in): for spinindex_out in self.spinindex_out(spinindex_in): comp_in = operand.data[spinindex_in] comp_out = out.data[spinindex_out] - for m, mg_slice, mc_slice, n_slice in input_basis.m_maps: + for m, mg_slice, mc_slice, n_slice in input_basis.m_maps(self.dist): slices[axis-1] = mc_slice slices[axis] = n_slice cos_slice = axslice(axis-1, 0, None, 2) @@ -4046,8 +4047,8 @@ def __init__(self, operand, output_basis, n, out=None): self.output_basis = output_basis #self.first_axis = min(self.input_basis.first_axis, self.output_basis.first_axis) #self.last_axis = max(self.input_basis.last_axis, self.output_basis.last_axis) - self.first_axis = self.output_basis.first_axis - self.last_axis = self.output_basis.last_axis + self.first_axis = operand.dist.get_basis_axis(self.output_basis) + self.last_axis = self.first_axis + output_basis.dim - 1 # LinearOperator requirements self.operand = operand # FutureField requirements diff --git a/dedalus/tests/test_sphere_calculus.py b/dedalus/tests/test_sphere_calculus.py index e7f1ff71..277318c3 100644 --- a/dedalus/tests/test_sphere_calculus.py +++ b/dedalus/tests/test_sphere_calculus.py @@ -481,7 +481,7 @@ def test_sphere_ell_product_scalar(Nphi, Ntheta, dealias, dtype): g = field.Field(dist=d, bases=(b,), dtype=dtype) f.fill_random('g') func = lambda ell, r: ell + 3 - for ell, m_ind, ell_ind in b.ell_maps: + for ell, m_ind, ell_ind in b.ell_maps(d): g['c'][m_ind, ell_ind] = func(ell, b.radius) * f['c'][m_ind, ell_ind] h = operators.SphereEllProduct(f, c, func).evaluate() assert np.allclose(g['c'], h['c']) @@ -497,7 +497,7 @@ def test_sphere_ell_product_vector(Nphi, Ntheta, dealias, dtype): g = field.Field(dist=d, bases=(b,), dtype=dtype, tensorsig=(c,)) f.fill_random('g') func = lambda ell, r: ell + 3 - for ell, m_ind, ell_ind in b.ell_maps: + for ell, m_ind, ell_ind in b.ell_maps(d): for i in range(c.dim): g['c'][i, m_ind, ell_ind] = func(ell, b.radius) * f['c'][i, m_ind, ell_ind] h = operators.SphereEllProduct(f, c, func).evaluate() diff --git a/dedalus/tests/test_spherical_operators.py b/dedalus/tests/test_spherical_operators.py index 2f6c299d..bd9e46de 100644 --- a/dedalus/tests/test_spherical_operators.py +++ b/dedalus/tests/test_spherical_operators.py @@ -49,7 +49,7 @@ def test_spherical_ell_product_scalar(Nphi, Ntheta, Nr, k, dealias, basis, dtype g = field.Field(dist=d, bases=(b,), dtype=dtype) f.preset_scales(b.domain.dealias) f['g'] = 3*x**2 + 2*y*z - for ell, m_ind, ell_ind in b.ell_maps: + for ell, m_ind, ell_ind in b.ell_maps(d): g['c'][m_ind, ell_ind, :] = (ell+3)*f['c'][m_ind, ell_ind, :] func = lambda ell: ell+3 h = operators.SphericalEllProduct(f, c, func).evaluate() @@ -75,7 +75,7 @@ def test_spherical_ell_product_vector(Nphi, Ntheta, Nr, k, dealias, basis, dtype uk0['g'] = u['g'] v = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) v.preset_scales(b.domain.dealias) - for ell, m_ind, ell_ind in b.ell_maps: + for ell, m_ind, ell_ind in b.ell_maps(d): v['c'][0, m_ind, ell_ind, :] = (ell+2)*uk0['c'][0, m_ind, ell_ind, :] v['c'][1, m_ind, ell_ind, :] = (ell+4)*uk0['c'][1, m_ind, ell_ind, :] v['c'][2, m_ind, ell_ind, :] = (ell+3)*uk0['c'][2, m_ind, ell_ind, :] From 774e835a3ae4bfd62f3a852bc3feb3fac20f08b0 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Tue, 12 Dec 2023 12:18:24 -0500 Subject: [PATCH 03/19] More dist removal --- dedalus/core/arithmetic.py | 8 +- dedalus/core/basis.py | 263 +++++++++--------- dedalus/core/coords.py | 26 +- dedalus/core/distributor.py | 14 +- dedalus/core/domain.py | 4 +- dedalus/core/evaluator.py | 4 +- dedalus/core/operators.py | 8 +- dedalus/tests/test_cartesian_operators.py | 17 +- dedalus/tests/test_cfl.py | 4 +- dedalus/tests/test_evp.py | 4 +- dedalus/tests/test_fourier_operators.py | 2 +- dedalus/tests/test_grid_operators.py | 10 +- dedalus/tests/test_ivp.py | 2 +- dedalus/tests/test_jacobi_operators.py | 2 +- dedalus/tests/test_lbvp.py | 8 +- dedalus/tests/test_nlbvp.py | 10 +- dedalus/tests/test_polar_calculus.py | 4 +- dedalus/tests/test_polar_ncc.py | 4 +- dedalus/tests/test_polar_operators.py | 4 +- dedalus/tests/test_sphere_calculus.py | 2 +- dedalus/tests/test_sphere_ncc.py | 6 +- dedalus/tests/test_spherical_arithmetic.py | 16 +- dedalus/tests/test_spherical_calculus.py | 4 +- dedalus/tests/test_spherical_operators.py | 4 +- dedalus/tests/test_transforms.py | 38 +-- .../tests_parallel/test_output_parallel.py | 20 +- .../test_spherical3d_arithmetic_parallel.py | 4 +- 27 files changed, 237 insertions(+), 255 deletions(-) diff --git a/dedalus/core/arithmetic.py b/dedalus/core/arithmetic.py index 65e9e02b..8a13366a 100644 --- a/dedalus/core/arithmetic.py +++ b/dedalus/core/arithmetic.py @@ -569,9 +569,11 @@ def Gamma(self, A_tensorsig, B_tensorsig, C_tensorsig, A_group, B_group, C_group G = self.Gamma(A_tensorsig, B_tensorsig, C_tensorsig, A_group, B_group, C_group, axis-1) # Apply Q cs = self.dist.get_coordsystem(axis) - QA = cs.backward_intertwiner(axis, len(A_tensorsig), A_group).T - QB = cs.backward_intertwiner(axis, len(B_tensorsig), B_group).T - QC = cs.forward_intertwiner(axis, len(C_tensorsig), C_group) + cs_axis = self.dist.get_axis(cs) + subaxis = axis - cs_axis + QA = cs.backward_intertwiner(subaxis, len(A_tensorsig), A_group[cs_axis:]).T + QB = cs.backward_intertwiner(subaxis, len(B_tensorsig), B_group[cs_axis:]).T + QC = cs.forward_intertwiner(subaxis, len(C_tensorsig), C_group[cs_axis:]) Q = kron(QA, QB, QC) G = (Q @ G.ravel()).reshape(G.shape) return G diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 4614dfc3..2b746be2 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -261,6 +261,7 @@ def product_matrix(self, arg_basis, out_basis, i): def build_ncc_matrix(self, product, subproblem, ncc_cutoff, max_ncc_terms): # Default to last axis only + dist = product.dist if any(product.ncc.domain.nonconstant[dist.first_axis(self):dist.last_axis(self)]): raise NotImplementedError("Only last-axis NCCs implemented for this basis.") axis = dist.last_axis(self) @@ -332,7 +333,7 @@ class IntervalBasis(Basis): def __init__(self, coord, size, bounds, dealias): self.coord = coord coord.check_bounds(bounds) - self.coordsystem = coord + self.coordsys = coord self.size = size self.shape = (size,) self.bounds = bounds @@ -347,29 +348,24 @@ def __init__(self, coord, size, bounds, dealias): def matrix_dependence(self, matrix_coupling): return matrix_coupling - def global_grids(self, dist, scales=None): + def global_grids(self, dist, scales): """Global grids.""" - if scales == None: - scales = (1,) - return (self.global_grid(dist, scales[0]),) + scale = scales[dist.first_axis(self)] + return (self.global_grid(dist, scale),) - def global_grid(self, dist, scale=None): + def global_grid(self, dist, scale): """Global grid.""" - if scale == None: - scale = 1 native_grid = self._native_grid(scale) problem_grid = self.COV.problem_coord(native_grid) return reshape_vector(problem_grid, dim=dist.dim, axis=dist.get_basis_axis(self)) - def local_grids(self, dist, scales=None): + def local_grids(self, dist, scales): """Local grids.""" - if scales == None: - scales = (1,) - return (self.local_grid(dist, scales[0]),) + scale = scales[dist.first_axis(self)] + return (self.local_grid(dist, scale),) - def local_grid(self, dist, scale=None): + def local_grid(self, dist, scale): """Local grid.""" - if scale == None: scale = 1 local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale) native_grid = self._native_grid(scale)[local_elements[dist.get_basis_axis(self)]] problem_grid = self.COV.problem_coord(native_grid) @@ -1580,15 +1576,15 @@ def spin_recombination_matrices(self, tensorsig): # Perform unitary spin recombination along relevant tensor indeces U = [] for i, cs in enumerate(tensorsig): - if (cs is self.coordsystem or - (type(cs) is SphericalCoordinates and self.coordsystem is cs.S2coordsys) or - (type(self.coordsystem) is SphericalCoordinates and self.coordsystem.S2coordsys is cs)): + if (cs is self.coordsys or + (type(cs) is SphericalCoordinates and self.coordsys is cs.S2coordsys) or + (type(self.coordsys) is SphericalCoordinates and self.coordsys.S2coordsys is cs)): U.append(Us[cs.dim]) - #if self.coordsystem is vs: # kludge before we decide how compound coordinate systems work + #if self.coordsys is vs: # kludge before we decide how compound coordinate systems work # Ui = np.identity(vs.dim, dtype=np.complex128) # Ui[:self.dim, :self.dim] = Us # U.append(Ui) - #elif self.coordsystem in vs.spaces: + #elif self.coordsys in vs.spaces: # n = vector_space.get_index(self.space) # Ui = np.identity(vector_space.dim, dtype=np.complex128) # Ui[n:n+self.dim, n:n+self.dim] = Us @@ -1684,8 +1680,8 @@ def backward_spin_recombination(self, tensorsig, colat_axis, gdata, out): # These are common for S2 and D2 class SpinBasis(MultidimensionalBasis, SpinRecombinationBasis): - def __init__(self, coordsystem, shape, dtype, dealias, azimuth_library=None): - self.coordsystem = coordsystem + def __init__(self, coordsys, shape, dtype, dealias, azimuth_library=None): + self.coordsys = coordsys self.shape = shape self.dtype = dtype if np.isscalar(dealias): @@ -1697,14 +1693,14 @@ def __init__(self, coordsystem, shape, dtype, dealias, azimuth_library=None): self.azimuth_library = azimuth_library self.mmax = (shape[0] - 1) // 2 if dtype == np.complex128: - self.azimuth_basis = ComplexFourier(coordsystem.coords[0], shape[0], bounds=(0, 2*np.pi), library=azimuth_library, dealias=self.dealias[0]) + self.azimuth_basis = ComplexFourier(coordsys.coords[0], shape[0], bounds=(0, 2*np.pi), library=azimuth_library, dealias=self.dealias[0]) elif dtype == np.float64: - self.azimuth_basis = RealFourier(coordsystem.coords[0], shape[0], bounds=(0, 2*np.pi), library=azimuth_library, dealias=self.dealias[0]) + self.azimuth_basis = RealFourier(coordsys.coords[0], shape[0], bounds=(0, 2*np.pi), library=azimuth_library, dealias=self.dealias[0]) else: raise NotImplementedError() self.global_grid_azimuth = self.azimuth_basis.global_grid self.local_grid_azimuth = self.azimuth_basis.local_grid - super().__init__(coordsystem) + super().__init__(coordsys) @CachedAttribute def constant(self): @@ -1723,14 +1719,14 @@ def spin_weights(self, tensorsig): Ss = {2:np.array([-1, 1], dtype=int), 3:np.array([-1, 1, 0], dtype=int)} S = np.zeros([cs.dim for cs in tensorsig], dtype=int) for i, cs in enumerate(tensorsig): - if (self.coordsystem == cs or - (type(cs) is SphericalCoordinates and self.coordsystem == cs.S2coordsys) or - (type(self.coordsystem) is SphericalCoordinates and self.coordsystem.S2coordsys == cs)): + if (self.coordsys == cs or + (type(cs) is SphericalCoordinates and self.coordsys == cs.S2coordsys) or + (type(self.coordsys) is SphericalCoordinates and self.coordsys.S2coordsys == cs)): S[axslice(i, 0, cs.dim)] += reshape_vector(Ss[cs.dim], dim=len(tensorsig), axis=i) - #if self.coordsystem is vs: # kludge before we decide how compound coordinate systems work + #if self.coordsys is vs: # kludge before we decide how compound coordinate systems work # S[axslice(i, 0, self.dim)] += reshape_vector(Ss, dim=len(tensorsig), axis=i) - #elif self.coordsystem in vs: - # n = vs.get_index(self.coordsystem) + #elif self.coordsys in vs: + # n = vs.get_index(self.coordsys) # S[axslice(i, n, n+self.dim)] += reshape_vector(Ss, dim=len(tensorsig), axis=i) return S @@ -1747,8 +1743,8 @@ class PolarBasis(SpinBasis): dim = 2 dims = ['azimuth', 'radius'] - def __init__(self, coordsystem, shape, dtype, k=0, dealias=(1,1), azimuth_library=None): - super().__init__(coordsystem, shape, dtype, dealias, azimuth_library=azimuth_library) + def __init__(self, coordsys, shape, dtype, k=0, dealias=(1,1), azimuth_library=None): + super().__init__(coordsys, shape, dtype, dealias, azimuth_library=azimuth_library) self.k = k self.Nmax = shape[1] - 1 if self.mmax == 0: @@ -1823,9 +1819,9 @@ def valid_elements(self, tensorsig, grid_space, elements): @CachedMethod def S1_basis(self, radius=1): if self.dtype == np.complex128: - S1_basis = ComplexFourier(self.coordsystem.coords[0], self.shape[0], bounds=(0, 2*np.pi), dealias=self.dealias[0], library=self.azimuth_library) + S1_basis = ComplexFourier(self.coordsys.coords[0], self.shape[0], bounds=(0, 2*np.pi), dealias=self.dealias[0], library=self.azimuth_library) elif self.dtype == np.float64: - S1_basis = RealFourier(self.coordsystem.coords[0], self.shape[0], bounds=(0, 2*np.pi), dealias=self.dealias[0], library=self.azimuth_library) + S1_basis = RealFourier(self.coordsys.coords[0], self.shape[0], bounds=(0, 2*np.pi), dealias=self.dealias[0], library=self.azimuth_library) else: raise NotImplementedError() S1_basis.radius = radius @@ -1919,7 +1915,7 @@ def n_slice(self, m): def __eq__(self, other): if isinstance(other, type(self)): if self.dtype == other.dtype: - if self.coordsystem == other.coordsystem: + if self.coordsys == other.coordsys: if self.grid_params == other.grid_params: if self.k == other.k: return True @@ -1964,10 +1960,10 @@ def ell_reversed(self, dist): ell_reversed[m] = False return ell_reversed - def global_grids(self, dist, scales=None): - if scales == None: scales = (1, 1) - return (self.global_grid_azimuth(dist, scales[0]), - self.global_grid_radius(dist, scales[1])) + def global_grids(self, dist, scales): + first_axis = dist.first_axis(self) + return (self.global_grid_azimuth(dist, scales[first_axis]), + self.global_grid_radius(dist, scales[first_axis+1])) def global_grid_radius(self, dist, scale): r = self.radial_COV.problem_coord(self._native_radius_grid(scale)) @@ -1987,10 +1983,10 @@ def local_grid_spacing(self, dist, axis, scales=None): local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) - def local_grids(self, dist, scales=None): - if scales == None: scales = (1, 1) - return (self.local_grid_azimuth(dist, scales[0]), - self.local_grid_radius(dist, scales[1])) + def local_grids(self, dist, scales): + first_axis = dist.first_axis(self) + return (self.local_grid_azimuth(dist, scales[first_axis]), + self.local_grid_radius(dist, scales[first_axis+1])) def forward_transform_azimuth_Mmax0(self, field, axis, gdata, cdata): # slice_axis = axis + len(field.tensorsig) @@ -2041,11 +2037,11 @@ class AnnulusBasis(PolarBasis, metaclass=CachedClass): subaxis_dependence = (False, True) @classmethod - def _preprocess_cache_args(cls, coordsystem, shape, dtype, radii, k, alpha, dealias, azimuth_library, radius_library): + def _preprocess_cache_args(cls, coordsys, shape, dtype, radii, k, alpha, dealias, azimuth_library, radius_library): """Preprocess arguments into canonical form for caching. Must accept and return __init__ arguments.""" - # coordsystem: PolarCoordinates - if not isinstance(coordsystem, PolarCoordinates): - raise ValueError("Annulus coordsystem must be PolarCoordinates.") + # coordsys: PolarCoordinates + if not isinstance(coordsys, PolarCoordinates): + raise ValueError("Annulus coordsys must be PolarCoordinates.") # shape: length-2 tuple shape = tuple(shape) if len(shape) != 2: @@ -2083,12 +2079,12 @@ def _preprocess_cache_args(cls, coordsystem, shape, dtype, radii, k, alpha, deal radius_library = Jacobi.default_dct else: radius_library = Jacobi.default_library - return (coordsystem, shape, dtype, radii, k, alpha, dealias, azimuth_library, radius_library) + return (coordsys, shape, dtype, radii, k, alpha, dealias, azimuth_library, radius_library) - def __init__(self, coordsystem, shape, dtype, radii=(1,2), k=0, alpha=(-0.5,-0.5), dealias=(1,1), azimuth_library=None, radius_library=None): - super().__init__(coordsystem, shape, dtype, k=k, dealias=dealias, azimuth_library=azimuth_library) + def __init__(self, coordsys, shape, dtype, radii=(1,2), k=0, alpha=(-0.5,-0.5), dealias=(1,1), azimuth_library=None, radius_library=None): + super().__init__(coordsys, shape, dtype, k=k, dealias=dealias, azimuth_library=azimuth_library) # Save arguments without modification for caching - self.coordsystem = coordsystem + self.coordsys = coordsys self.shape = shape self.dtype = dtype self.radii = radii @@ -2101,7 +2097,7 @@ def __init__(self, coordsystem, shape, dtype, radii=(1,2), k=0, alpha=(-0.5,-0.5 self.volume = np.pi * (radii[1]**2 - radii[0]**2) self.dR = radii[1] - radii[0] self.rho = (radii[1] + radii[0])/self.dR - self.grid_params = (coordsystem, dtype, radii, alpha, dealias, azimuth_library, radius_library) + self.grid_params = (coordsys, dtype, radii, alpha, dealias, azimuth_library, radius_library) self.inner_edge = self.S1_basis(radii[0]) self.outer_edge = self.S1_basis(radii[1]) @@ -2335,11 +2331,11 @@ class DiskBasis(PolarBasis, metaclass=CachedClass): default_library = "matrix" @classmethod - def _preprocess_cache_args(cls, coordsystem, shape, dtype, radius, k, alpha, dealias, azimuth_library, radius_library): + def _preprocess_cache_args(cls, coordsys, shape, dtype, radius, k, alpha, dealias, azimuth_library, radius_library): """Preprocess arguments into canonical form for caching. Must accept and return __init__ arguments.""" - # coordsystem: PolarCoordinates - if not isinstance(coordsystem, PolarCoordinates): - raise ValueError("Disk coordsystem must be PolarCoordinates.") + # coordsys: PolarCoordinates + if not isinstance(coordsys, PolarCoordinates): + raise ValueError("Disk coordsys must be PolarCoordinates.") # shape: length-2 tuple shape = tuple(shape) if len(shape) != 2: @@ -2363,12 +2359,12 @@ def _preprocess_cache_args(cls, coordsystem, shape, dtype, radius, k, alpha, dea # radius_library: pick default if radius_library is None: radius_library = cls.default_library - return (coordsystem, shape, dtype, radius, k, alpha, dealias, azimuth_library, radius_library) + return (coordsys, shape, dtype, radius, k, alpha, dealias, azimuth_library, radius_library) - def __init__(self, coordsystem, shape, dtype, radius=1, k=0, alpha=0, dealias=(1,1), azimuth_library=None, radius_library=None): - super().__init__(coordsystem, shape, dtype, k=k, dealias=dealias, azimuth_library=azimuth_library) + def __init__(self, coordsys, shape, dtype, radius=1, k=0, alpha=0, dealias=(1,1), azimuth_library=None, radius_library=None): + super().__init__(coordsys, shape, dtype, k=k, dealias=dealias, azimuth_library=azimuth_library) # Save arguments without modification for caching - self.coordsystem = coordsystem + self.coordsys = coordsys self.shape = shape self.dtype = dtype self.radius = radius @@ -2383,7 +2379,7 @@ def __init__(self, coordsystem, shape, dtype, radius=1, k=0, alpha=0, dealias=(1 if self.mmax > 2*self.Nmax: logger.warning("You are using more azimuthal modes than can be resolved with your current radial resolution") #raise ValueError("shape[0] cannot be more than twice shape[1].") - self.grid_params = (coordsystem, dtype, radius, alpha, dealias, azimuth_library, radius_library) + self.grid_params = (coordsys, dtype, radius, alpha, dealias, azimuth_library, radius_library) self.edge = self.S1_basis(radius) @CachedAttribute @@ -2704,11 +2700,11 @@ class SphereBasis(SpinBasis, metaclass=CachedClass): default_library = "matrix" @classmethod - def _preprocess_cache_args(cls, coordsystem, shape, dtype, radius, dealias, azimuth_library, colatitude_library): + def _preprocess_cache_args(cls, coordsys, shape, dtype, radius, dealias, azimuth_library, colatitude_library): """Preprocess arguments into canonical form for caching. Must accept and return __init__ arguments.""" - # coordsystem: S2Coordinates or SphericalCoordinates - if not isinstance(coordsystem, (S2Coordinates, SphericalCoordinates)): - raise ValueError("Sphere coordsystem must be S2Coordinates.") + # coordsys: S2Coordinates or SphericalCoordinates + if not isinstance(coordsys, (S2Coordinates, SphericalCoordinates)): + raise ValueError("Sphere coordsys must be S2Coordinates.") # shape: length-2 tuple shape = tuple(shape) if len(shape) != 2: @@ -2730,12 +2726,12 @@ def _preprocess_cache_args(cls, coordsystem, shape, dtype, radius, dealias, azim # colatitude_library: pick default if colatitude_library is None: colatitude_library = cls.default_library - return (coordsystem, shape, dtype, radius, dealias, azimuth_library, colatitude_library) + return (coordsys, shape, dtype, radius, dealias, azimuth_library, colatitude_library) - def __init__(self, coordsystem, shape, dtype, radius=1, dealias=(1,1), azimuth_library=None, colatitude_library=None): - super().__init__(coordsystem, shape, dtype, dealias, azimuth_library=azimuth_library) + def __init__(self, coordsys, shape, dtype, radius=1, dealias=(1,1), azimuth_library=None, colatitude_library=None): + super().__init__(coordsys, shape, dtype, dealias, azimuth_library=azimuth_library) # Save arguments without modification for caching - self.coordsystem = coordsystem + self.coordsys = coordsys self.shape = shape self.dtype = dtype self.radius = radius @@ -2764,7 +2760,7 @@ def __init__(self, coordsystem, shape, dtype, radius=1, dealias=(1,1), azimuth_l self.backward_transforms = [self.backward_transform_azimuth, self.backward_transform_colatitude, self.backward_transform_radius] - self.grid_params = (coordsystem, dtype, radius, dealias, azimuth_library, colatitude_library) + self.grid_params = (coordsys, dtype, radius, dealias, azimuth_library, colatitude_library) if self.shape[0] > 1 and shape[0] % 2 != 0: raise ValueError("Don't use an odd phi resolution please") if self.shape[0] > 1 and self.dtype == np.float64 and shape[0] % 4 != 0: @@ -3042,10 +3038,10 @@ def ell_maps(self, dist): ell_maps.append((ell, m_slice, ell_slice)) return tuple(ell_maps) - def global_grids(self, scales=None): - if scales == None: scales = (1, 1) - return (self.global_grid_azimuth(scales[0]), - self.global_grid_colatitude(scales[1])) + def global_grids(self, dist, scales): + first_axis = dist.first_axis(self) + return (self.global_grid_azimuth(scales[first_axis]), + self.global_grid_colatitude(scales[first_axis+1])) def global_grid_colatitude(self, dist, scale): theta = self._native_colatitude_grid(scale) @@ -3065,10 +3061,10 @@ def local_grid_spacing(self, dist, axis, scales=None): local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) - def local_grids(self, dist, scales=None): - if scales == None: scales = (1, 1) - return (self.local_grid_azimuth(dist, scales[0]), - self.local_grid_colatitude(dist, scales[1])) + def local_grids(self, dist, scales): + first_axis = dist.first_axis(self) + return (self.local_grid_azimuth(dist, scales[first_axis]), + self.local_grid_colatitude(dist, scales[first_axis+1])) def local_grid_colatitude(self, dist, scale): local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] @@ -3463,8 +3459,8 @@ class RegularityBasis(SpinRecombinationBasis, MultidimensionalBasis): dims = ['azimuth', 'colatitude', 'radius'] subaxis_dependence = [False, False, True] - def __init__(self, coordsystem, radial_size, k, dealias, dtype): - self.coordsystem = coordsystem + def __init__(self, coordsys, radial_size, k, dealias, dtype): + self.coordsys = coordsys self.radial_size = radial_size self.shape = (1, 1, radial_size) self.k = k @@ -3475,7 +3471,7 @@ def __init__(self, coordsystem, radial_size, k, dealias, dtype): self.Nmax = radial_size - 1 self.dtype = dtype # Call at end because dealias is needed to build self.domain - Basis.__init__(self, coordsystem) + Basis.__init__(self, coordsys) if dtype == np.float64: self.group_shape = (2, 1, 1) elif dtype == np.complex128: @@ -3587,7 +3583,7 @@ def xi(self,mu,l): def radial_recombinations(self, tensorsig, ell_list): # For now only implement recombinations for sphere-only tensors for cs in tensorsig: - if self.coordsystem is not cs: + if self.coordsys is not cs: raise ValueError("Only supports tensors over spherical coords.") order = len(tensorsig) Q_matrices = {} @@ -3603,7 +3599,7 @@ def regularity_classes(self, tensorsig): Rb = np.array([-1, 1, 0], dtype=int) R = np.zeros([cs.dim for cs in tensorsig], dtype=int) for i, cs in enumerate(tensorsig): - if self.coordsystem is cs: # kludge before we decide how compound coordinate systems work + if self.coordsys is cs: # kludge before we decide how compound coordinate systems work R[axslice(i, 0, cs.dim)] += reshape_vector(Rb, dim=len(tensorsig), axis=i) #elif self.space in vs.spaces: # n = vs.get_index(self.space) @@ -3615,7 +3611,7 @@ def regularity_indices(self, tensorsig): indices = [] tshape = [cs.dim for cs in tensorsig] for i, cs in enumerate(tensorsig): - if self.coordsystem is cs: + if self.coordsys is cs: index = np.zeros(tshape) + reshape_vector(np.array([-1, 1, 0]), dim=len(tshape), axis=i) indices.append(index) return np.stack(indices, axis=-1) @@ -3719,8 +3715,8 @@ def n_slice(self, ell): class ShellRadialBasis(RegularityBasis, metaclass=CachedClass): - def __init__(self, coordsystem, radial_size, dtype, radii=(1,2), alpha=(-0.5,-0.5), dealias=(1,), k=0, radius_library=None): - super().__init__(coordsystem, radial_size, k=k, dealias=dealias, dtype=dtype) + def __init__(self, coordsys, radial_size, dtype, radii=(1,2), alpha=(-0.5,-0.5), dealias=(1,), k=0, radius_library=None): + super().__init__(coordsys, radial_size, k=k, dealias=dealias, dtype=dtype) if radii[0] <= 0: raise ValueError("Inner radius must be positive.") if radius_library is None: @@ -3734,7 +3730,7 @@ def __init__(self, coordsystem, radial_size, dtype, radii=(1,2), alpha=(-0.5,-0. self.rho = (self.radii[1] + self.radii[0])/self.dR self.alpha = alpha self.radius_library = radius_library - self.grid_params = (coordsystem, radii, alpha, self.dealias) + self.grid_params = (coordsys, radii, alpha, self.dealias) self.forward_transforms = [self.forward_transform_azimuth, self.forward_transform_colatitude, self.forward_transform_radius] @@ -3744,7 +3740,7 @@ def __init__(self, coordsystem, radial_size, dtype, radii=(1,2), alpha=(-0.5,-0. def __eq__(self, other): if isinstance(other, ShellRadialBasis): - if self.coordsystem == other.coordsystem: + if self.coordsys == other.coordsys: if self.grid_params == other.grid_params: if self.k == other.k: return True @@ -3774,9 +3770,9 @@ def __mul__(self, other): k = self.k + other.k return self.clone_with(radial_size=radial_size, k=k) if isinstance(other, SphereBasis): - unify((self.coordsystem, other.coordsystem)) + unify((self.coordsys, other.coordsys)) args = {} - args['coordsystem'] = self.coordsystem + args['coordsys'] = self.coordsys args['shape'] = other.shape + self.shape[-1:] # Because ShellRadialBasis shape is padded up to 3d args['radii'] = self.radii args['alpha'] = self.alpha @@ -3958,8 +3954,8 @@ class BallRadialBasis(RegularityBasis, metaclass=CachedClass): transforms = {} - def __init__(self, coordsystem, radial_size, dtype, radius=1, k=0, alpha=0, dealias=(1,), radius_library=None): - super().__init__(coordsystem, radial_size, k=k, dealias=dealias, dtype=dtype) + def __init__(self, coordsys, radial_size, dtype, radius=1, k=0, alpha=0, dealias=(1,), radius_library=None): + super().__init__(coordsys, radial_size, k=k, dealias=dealias, dtype=dtype) if radius <= 0: raise ValueError("Radius must be positive.") if radius_library is None: @@ -3969,7 +3965,7 @@ def __init__(self, coordsystem, radial_size, dtype, radius=1, k=0, alpha=0, deal self.alpha = alpha self.radial_COV = AffineCOV((0, 1), (0, radius)) self.radius_library = radius_library - self.grid_params = (coordsystem, radius, alpha, self.dealias) + self.grid_params = (coordsys, radius, alpha, self.dealias) self.forward_transforms = [self.forward_transform_azimuth, self.forward_transform_colatitude, self.forward_transform_radius] @@ -3979,7 +3975,7 @@ def __init__(self, coordsystem, radial_size, dtype, radius=1, k=0, alpha=0, deal def __eq__(self, other): if isinstance(other, BallRadialBasis): - if self.coordsystem == other.coordsystem: + if self.coordsys == other.coordsys: if self.grid_params == other.grid_params: if self.k == other.k: return True @@ -4186,8 +4182,8 @@ class Spherical3DBasis(MultidimensionalBasis): dims = ['azimuth', 'colatitude', 'radius'] subaxis_dependence = [False, True, True] - def __init__(self, coordsystem, shape_angular, dealias_angular, radial_basis, dtype, azimuth_library=None, colatitude_library=None): - self.coordsystem = coordsystem + def __init__(self, coordsys, shape_angular, dealias_angular, radial_basis, dtype, azimuth_library=None, colatitude_library=None): + self.coordsys = coordsys self.shape = tuple( (*shape_angular, radial_basis.shape[2] ) ) self.dtype = dtype if np.isscalar(dealias_angular): @@ -4223,7 +4219,7 @@ def __init__(self, coordsystem, shape_angular, dealias_angular, radial_basis, dt self.group_shape = (2, 1, 1) elif dtype == np.complex128: self.group_shape = (1, 1, 1) - Basis.__init__(self, coordsystem) + Basis.__init__(self, coordsys) @CachedAttribute def ell_reversed(self): @@ -4249,17 +4245,17 @@ def constant_mode_value(self): # TODO: check this is right for regtotal != 0? return self.radial_basis.constant_mode_value / np.sqrt(2) - def global_grids(self, scales=None): - if scales == None: scales = (1,1,1) - return (self.global_grid_azimuth(scales[0]), - self.global_grid_colatitude(scales[1]), - self.global_grid_radius(scales[2])) + def global_grids(self, dist, scales): + first_axis = dist.first_axis(self) + return (self.global_grid_azimuth(scales[first_axis]), + self.global_grid_colatitude(scales[first_axis+1]), + self.global_grid_radius(scales[first_axis+2])) - def local_grids(self, dist, scales=None): - if scales == None: scales = (1,1,1) - return (self.local_grid_azimuth(dist, scales[0]), - self.local_grid_colatitude(dist, scales[1]), - self.local_grid_radius(dist, scales[2])) + def local_grids(self, dist, scales): + first_axis = dist.first_axis(self) + return (self.local_grid_azimuth(dist, scales[first_axis]), + self.local_grid_colatitude(dist, scales[first_axis+1]), + self.local_grid_radius(dist, scales[first_axis+2])) @CachedMethod def global_grid_spacing(self, dist, axis, scales=None): @@ -4295,7 +4291,7 @@ def S2_basis(self, radius=None): radius = self.radius else: radius = max(self.radii) - return SphereBasis(self.coordsystem, self.shape[:2], self.dtype, radius=radius, dealias=self.dealias[:2], + return SphereBasis(self.coordsys, self.shape[:2], self.dtype, radius=radius, dealias=self.dealias[:2], azimuth_library=self.azimuth_library, colatitude_library=self.colatitude_library) @CachedMethod @@ -4383,11 +4379,11 @@ def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_b class ShellBasis(Spherical3DBasis, metaclass=CachedClass): @classmethod - def _preprocess_cache_args(cls, coordsystem, shape, dtype, radii, k, alpha, dealias, azimuth_library, colatitude_library, radius_library): + def _preprocess_cache_args(cls, coordsys, shape, dtype, radii, k, alpha, dealias, azimuth_library, colatitude_library, radius_library): """Preprocess arguments into canonical form for caching. Must accept and return __init__ arguments.""" - # coordsystem: SphericalCoordinates - if not isinstance(coordsystem, SphericalCoordinates): - raise ValueError("Shell coordsystem must be SphericalCoordinates.") + # coordsys: SphericalCoordinates + if not isinstance(coordsys, SphericalCoordinates): + raise ValueError("Shell coordsys must be SphericalCoordinates.") # shape: length-3 tuple shape = tuple(shape) if len(shape) != 3: @@ -4428,11 +4424,11 @@ def _preprocess_cache_args(cls, coordsystem, shape, dtype, radii, k, alpha, deal radius_library = Jacobi.default_dct else: radius_library = Jacobi.default_library - return (coordsystem, shape, dtype, radii, k, alpha, dealias, azimuth_library, colatitude_library, radius_library) + return (coordsys, shape, dtype, radii, k, alpha, dealias, azimuth_library, colatitude_library, radius_library) - def __init__(self, coordsystem, shape, dtype, radii=(1,2), k=0, alpha=(-0.5,-0.5), dealias=(1,1,1), azimuth_library=None, colatitude_library=None, radius_library=None): + def __init__(self, coordsys, shape, dtype, radii=(1,2), k=0, alpha=(-0.5,-0.5), dealias=(1,1,1), azimuth_library=None, colatitude_library=None, radius_library=None): # Save arguments without modification for caching - self.coordsystem = coordsystem + self.coordsys = coordsys self.shape = shape self.dtype = dtype self.radii = radii @@ -4445,9 +4441,9 @@ def __init__(self, coordsystem, shape, dtype, radii=(1,2), k=0, alpha=(-0.5,-0.5 # Other attributes self.volume = 4 / 3 * np.pi * (radii[1]**3 - radii[0]**3) self.radius_library = radius_library - self.radial_basis = ShellRadialBasis(coordsystem, shape[2], radii=radii, alpha=alpha, dealias=(dealias[2],), k=k, dtype=dtype, radius_library=radius_library) - Spherical3DBasis.__init__(self, coordsystem, shape[:2], dealias[:2], self.radial_basis, dtype=dtype, azimuth_library=azimuth_library, colatitude_library=colatitude_library) - self.grid_params = (coordsystem, dtype, radii, alpha, dealias, azimuth_library, colatitude_library, radius_library) + self.radial_basis = ShellRadialBasis(coordsys, shape[2], radii=radii, alpha=alpha, dealias=(dealias[2],), k=k, dtype=dtype, radius_library=radius_library) + Spherical3DBasis.__init__(self, coordsys, shape[:2], dealias[:2], self.radial_basis, dtype=dtype, azimuth_library=azimuth_library, colatitude_library=colatitude_library) + self.grid_params = (coordsys, dtype, radii, alpha, dealias, azimuth_library, colatitude_library, radius_library) # self.forward_transform_radius = self.radial_basis.forward_transform # self.backward_transform_radius = self.radial_basis.backward_transform self.forward_transforms = [self.forward_transform_azimuth, @@ -4461,7 +4457,7 @@ def __init__(self, coordsystem, shape, dtype, radii=(1,2), k=0, alpha=(-0.5,-0.5 def __eq__(self, other): if isinstance(other, ShellBasis): - if self.coordsystem == other.coordsystem: + if self.coordsys == other.coordsys: if self.grid_params == other.grid_params: if self.k == other.k: return True @@ -4625,11 +4621,11 @@ class BallBasis(Spherical3DBasis, metaclass=CachedClass): default_library = "matrix" @classmethod - def _preprocess_cache_args(cls, coordsystem, shape, dtype, radius, k, alpha, dealias, azimuth_library, colatitude_library, radius_library): + def _preprocess_cache_args(cls, coordsys, shape, dtype, radius, k, alpha, dealias, azimuth_library, colatitude_library, radius_library): """Preprocess arguments into canonical form for caching. Must accept and return __init__ arguments.""" - # coordsystem: SphericalCoordinates - if not isinstance(coordsystem, SphericalCoordinates): - raise ValueError("Ball coordsystem must be SphericalCoordinates.") + # coordsys: SphericalCoordinates + if not isinstance(coordsys, SphericalCoordinates): + raise ValueError("Ball coordsys must be SphericalCoordinates.") # shape: length-3 tuple shape = tuple(shape) if len(shape) != 3: @@ -4656,11 +4652,11 @@ def _preprocess_cache_args(cls, coordsystem, shape, dtype, radius, k, alpha, dea # radius_library: pick default if radius_library is None: radius_library = cls.default_library - return (coordsystem, shape, dtype, radius, k, alpha, dealias, azimuth_library, colatitude_library, radius_library) + return (coordsys, shape, dtype, radius, k, alpha, dealias, azimuth_library, colatitude_library, radius_library) - def __init__(self, coordsystem, shape, dtype, radius=1, k=0, alpha=0, dealias=(1,1,1), azimuth_library=None, colatitude_library=None, radius_library=None): + def __init__(self, coordsys, shape, dtype, radius=1, k=0, alpha=0, dealias=(1,1,1), azimuth_library=None, colatitude_library=None, radius_library=None): # Save arguments without modification for caching - self.coordsystem = coordsystem + self.coordsys = coordsys self.shape = shape self.dtype = dtype self.radius = radius @@ -4672,9 +4668,9 @@ def __init__(self, coordsystem, shape, dtype, radius=1, k=0, alpha=0, dealias=(1 self.radius_library = radius_library # Other attributes self.volume = 4 / 3 * np.pi * radius**3 - self.radial_basis = BallRadialBasis(coordsystem, shape[2], radius=radius, k=k, alpha=alpha, dealias=(dealias[2],), dtype=dtype, radius_library=radius_library) - Spherical3DBasis.__init__(self, coordsystem, shape[:2], dealias[:2], self.radial_basis, dtype=dtype, azimuth_library=azimuth_library, colatitude_library=colatitude_library) - self.grid_params = (coordsystem, dtype, radius, alpha, dealias, azimuth_library, colatitude_library, radius_library) + self.radial_basis = BallRadialBasis(coordsys, shape[2], radius=radius, k=k, alpha=alpha, dealias=(dealias[2],), dtype=dtype, radius_library=radius_library) + Spherical3DBasis.__init__(self, coordsys, shape[:2], dealias[:2], self.radial_basis, dtype=dtype, azimuth_library=azimuth_library, colatitude_library=colatitude_library) + self.grid_params = (coordsys, dtype, radius, alpha, dealias, azimuth_library, colatitude_library, radius_library) self.forward_transforms = [self.forward_transform_azimuth, self.forward_transform_colatitude, self.forward_transform_radius] @@ -4685,7 +4681,7 @@ def __init__(self, coordsystem, shape, dtype, radius=1, k=0, alpha=0, dealias=(1 def __eq__(self, other): if isinstance(other, BallBasis): - if self.coordsystem == other.coordsystem: + if self.coordsys == other.coordsys: if self.grid_params == other.grid_params: if self.k == other.k: return True @@ -5115,9 +5111,10 @@ def regindex_out(self, regindex_in): def subproblem_matrix(self, subproblem): operand = self.args[0] radial_basis = self.output_basis.radial_basis ## CHANGED RELATIVE TO POLARMOPERATOR + radial_axis = self.dist.last_axis(radial_basis) R_in = radial_basis.regularity_classes(operand.tensorsig) R_out = radial_basis.regularity_classes(self.tensorsig) # Should this use output_basis? - ell = subproblem.group[self.dist.last_axis(self) - 1] + ell = subproblem.group[radial_axis - 1] # Loop over components submatrices = [] for regindex_out, regtotal_out in np.ndenumerate(R_out): @@ -5130,7 +5127,7 @@ def subproblem_matrix(self, subproblem): if (regindex_out in self.regindex_out(regindex_in)) and radial_basis.regularity_allowed(ell, regindex_in) and radial_basis.regularity_allowed(ell, regindex_out): # Substitute factor for radial axis factors = [sparse.eye(m, n, format='csr') for m, n in zip(subshape_out, subshape_in)] - factors[dist.last_axis(self)] = self.radial_matrix(regindex_in, regindex_out, ell) + factors[radial_axis] = self.radial_matrix(regindex_in, regindex_out, ell) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5776,7 +5773,7 @@ def __init__(self, operand, coord, position, out=None): self.radial_basis = self.input_basis def subproblem_matrix(self, subproblem): - ell = subproblem.group[dist.last_axis(self) - 1] + ell = subproblem.group[self.dist.last_axis(self.radial_basis) - 1] matrix = super().subproblem_matrix(subproblem) radial_basis = self.radial_basis if self.tensorsig != (): diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index 84558e4f..9e7eb77b 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -90,10 +90,10 @@ def __init__(self, *names, right_handed=True): def __str__(self): return '{' + ','.join([c.name for c in self.coords]) + '}' - def forward_intertwiner(self, first_axis, axis, order, group): + def forward_intertwiner(self, subaxis, order, group): return np.identity(self.dim**order) - def backward_intertwiner(self, first_axis, axis, order, group): + def backward_intertwiner(self, subaxis, order, group): return np.identity(self.dim**order) @CachedMethod @@ -146,8 +146,7 @@ def _U_backward(cls, order): """Unitary transform from spin to coord components.""" return cls._U_forward(order).T.conj() - def forward_intertwiner(self, first_axis, axis, order, group): - subaxis = axis - first_axis + def forward_intertwiner(self, subaxis, order, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -157,8 +156,7 @@ def forward_intertwiner(self, first_axis, axis, order, group): else: raise ValueError("Invalid axis") - def backward_intertwiner(self, first_axis, axis, order, group): - subaxis = axis - first_axis + def backward_intertwiner(self, subaxis, order, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -200,8 +198,7 @@ def _U_backward(cls, order): """Unitary transform from spin to coord components.""" return cls._U_forward(order).T.conj() - def forward_intertwiner(self, first_axis, axis, order, group): - subaxis = axis - first_axis + def forward_intertwiner(self, subaxis, order, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -211,8 +208,7 @@ def forward_intertwiner(self, first_axis, axis, order, group): else: raise ValueError("Invalid axis") - def backward_intertwiner(self, first_axis, axis, order, group): - subaxis = axis - first_axis + def backward_intertwiner(self, subaxis, order, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -301,8 +297,7 @@ def cartesian(phi, theta, r): z = r * np.cos(theta) return x, y, z - def forward_intertwiner(self, first_axis, axis, order, group): - subaxis = axis - first_axis + def forward_intertwiner(self, subaxis, order, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -311,13 +306,12 @@ def forward_intertwiner(self, first_axis, axis, order, group): return self._U_forward(order) elif subaxis == 2: # Radius intertwiner is reg-Q, dependent on ell - ell = group[axis-1] + ell = group[subaxis-1] return self._Q_forward(ell, order) else: raise ValueError("Invalid axis") - def backward_intertwiner(self, first_axis, axis, order, group): - subaxis = axis - first_axis + def backward_intertwiner(self, subaxis, order, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group return np.identity(self.dim**order) @@ -326,7 +320,7 @@ def backward_intertwiner(self, first_axis, axis, order, group): return self._U_backward(order) elif subaxis == 2: # Radius intertwiner is reg-Q, dependent on ell - ell = group[axis-1] + ell = group[subaxis-1] return self._Q_backward(ell, order) else: raise ValueError("Invalid axis") diff --git a/dedalus/core/distributor.py b/dedalus/core/distributor.py index b3902171..655b6890 100644 --- a/dedalus/core/distributor.py +++ b/dedalus/core/distributor.py @@ -73,16 +73,16 @@ class Distributor: """ def __init__(self, coordsystems, comm=None, mesh=None, dtype=None): - # Accept single coordsystem in place of tuple/list + # Accept single coordsys in place of tuple/list if not isinstance(coordsystems, (tuple, list)): coordsystems = (coordsystems,) - # Note if only a single coordsystem for simplicity + # Note if only a single coordsys for simplicity if len(coordsystems) == 1: self.single_coordsys = coordsystems[0] else: self.single_coordsys = False # Get coords - self.coords = tuple([coord for coordsystem in coordsystems for coord in coordsystem.coords]) + self.coords = tuple([coord for coordsys in coordsystems for coord in coordsys.coords]) self.coordsystems = coordsystems # Defaults if comm is None: @@ -95,7 +95,7 @@ def __init__(self, coordsystems, comm=None, mesh=None, dtype=None): # Trim trailing ones mesh = 1 + np.trim_zeros(mesh - 1, trim='b') self.dim = dim = len(self.coords) -# self.dim = dim = sum(coordsystem.dim for coordsystem in coordsystems) +# self.dim = dim = sum(coordsys.dim for coordsys in coordsystems) self.comm = comm self.mesh = mesh = np.array(mesh) # Check mesh compatibility @@ -117,7 +117,8 @@ def cs_by_axis(self): cs_dict = {} for cs in self.coordsystems: for subaxis in range(cs.dim): - cs_dict[cs.axis+subaxis] = cs + axis = self.get_axis(cs) + cs_dict[axis+subaxis] = cs return cs_dict def get_coordsystem(self, axis): @@ -237,6 +238,8 @@ def IdentityTensor(self, coordsys): def local_grid(self, basis, scale=None): # TODO: remove from bases and do it all here? + if scale is None: + scale = 1 if basis.dim == 1: return basis.local_grid(self, scale=scale) else: @@ -271,6 +274,7 @@ def local_grid(self, basis, scale=None): # return tuple(grids) def local_grids(self, *bases, scales=None): + scales = self.remedy_scales(scales) # TODO: remove from bases and do it all here? return sum((basis.local_grids(self, scales=scales) for basis in bases), ()) diff --git a/dedalus/core/domain.py b/dedalus/core/domain.py index 56bb6d13..97764768 100644 --- a/dedalus/core/domain.py +++ b/dedalus/core/domain.py @@ -33,7 +33,7 @@ def _preprocess_args(cls, dist, bases): # Drop duplicate bases bases = tuple(OrderedSet(bases)) # Make sure coordsystems don't overlap - cs = [b.coordsystem for b in bases] + cs = [b.coordsys for b in bases] if len(set(cs)) < len(cs): raise ValueError("Overlapping bases specified.") # Sort by first axis @@ -76,7 +76,7 @@ def bases_by_coord(self): bases_by_coord[coord.cs] = None for basis in self.bases: bases_by_coord[basis.coords] = basis - #bases_by_coord[basis.coordsystem] = basis + #bases_by_coord[basis.coordsys] = basis return bases_by_coord @CachedAttribute diff --git a/dedalus/core/evaluator.py b/dedalus/core/evaluator.py index 2628f5a7..9202142b 100644 --- a/dedalus/core/evaluator.py +++ b/dedalus/core/evaluator.py @@ -552,10 +552,10 @@ def setup_file(self, file): else: subaxis = axis - self.dist.get_basis_axis(basis) if layout.grid_space[axis]: - sn = basis.coordsystem.coords[subaxis].name + sn = basis.coordsys.coords[subaxis].name data = basis.global_grids(self.dist, scales)[subaxis].ravel() else: - sn = 'k' + basis.coordsystem.coords[subaxis].name + sn = 'k' + basis.coordsys.coords[subaxis].name data = layout.global_group_arrays(op.domain, scales)[subaxis] scale_hash = hashlib.sha1(data).hexdigest() lookup = f"{sn}_hash_{scale_hash}" diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index 6a4b78de..748c552f 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -1076,7 +1076,7 @@ def _check_args(cls, operand, coord, position, out=None): # Dispatch by operand basis and subaxis if isinstance(operand, Operand): basis = operand.domain.get_basis(coord) - subaxis = basis.coordsystem.coords.index(coord) + subaxis = basis.coordsys.coords.index(coord) if isinstance(basis, cls.input_basis_type) and cls.basis_subaxis == subaxis: return True return False @@ -1137,7 +1137,7 @@ def _preprocess_args(cls, operand, coord=None): raise SkipDispatchException(output=0) # Integrate over all operand bases by default if coord is None: - coord = [basis.coordsystem for basis in operand.domain.bases] + coord = [basis.coordsys for basis in operand.domain.bases] # Split Cartesian coordinates if isinstance(coord, coords.CartesianCoordinates): coord = coord.coords @@ -1207,7 +1207,7 @@ def _preprocess_args(cls, operand, coord=None): raise SkipDispatchException(output=operand) # Average over all operand bases by default if coord is None: - coord = [basis.coordsystem for basis in operand.domain.bases] + coord = [basis.coordsys for basis in operand.domain.bases] # Split Cartesian coordinates if isinstance(coord, coords.CartesianCoordinates): coord = coord.coords @@ -4031,7 +4031,7 @@ def _preprocess_args(cls, operand, output_basis, n, out=None): def _check_args(cls, operand, output_basis, n, out=None): # Dispatch by output basis if isinstance(operand, Operand): - input_basis = operand.domain.get_basis(output_basis.coordsystem) + input_basis = operand.domain.get_basis(output_basis.coordsys) if (isinstance(input_basis, cls.input_basis_type) and isinstance(output_basis, cls.output_basis_type)): return True diff --git a/dedalus/tests/test_cartesian_operators.py b/dedalus/tests/test_cartesian_operators.py index a85ed7ef..719b12e6 100644 --- a/dedalus/tests/test_cartesian_operators.py +++ b/dedalus/tests/test_cartesian_operators.py @@ -26,8 +26,7 @@ def build_FF(N, dealias, dtype): xb = d3.RealFourier(c.coords[0], size=N, bounds=(0, Lx), dealias=dealias) yb = d3.RealFourier(c.coords[1], size=N, bounds=(0, Ly), dealias=dealias) b = (xb, yb) - x = xb.local_grid(dealias) - y = yb.local_grid(dealias) + x, y = d.local_grids(xb, yb, scales=dealias) r = (x, y) return c, d, b, r @@ -42,8 +41,7 @@ def build_FC(N, dealias, dtype): xb = d3.RealFourier(c.coords[0], size=N, bounds=(0, Lx), dealias=dealias) yb = d3.Chebyshev(c.coords[1], size=N, bounds=(0, Ly), dealias=dealias) b = (xb, yb) - x = xb.local_grid(dealias) - y = yb.local_grid(dealias) + x, y = d.local_grids(xb, yb, scales=dealias) r = (x, y) return c, d, b, r @@ -55,8 +53,7 @@ def build_CC(N, dealias, dtype): xb = d3.Chebyshev(c.coords[0], size=N, bounds=(0, Lx), dealias=dealias) yb = d3.Chebyshev(c.coords[1], size=N, bounds=(0, Ly), dealias=dealias) b = (xb, yb) - x = xb.local_grid(dealias) - y = yb.local_grid(dealias) + x, y = d.local_grids(xb, yb, scales=dealias) r = (x, y) return c, d, b, r @@ -74,9 +71,7 @@ def build_FFF(N, dealias, dtype): yb = d3.RealFourier(c.coords[1], size=N, bounds=(0, Ly), dealias=dealias) zb = d3.RealFourier(c.coords[2], size=N, bounds=(0, Lz), dealias=dealias) b = (xb, yb, zb) - x = xb.local_grid(dealias) - y = yb.local_grid(dealias) - z = zb.local_grid(dealias) + x, y, z = d.local_grids(xb, yb, zb, scales=dealias) r = (x, y, z) return c, d, b, r @@ -93,9 +88,7 @@ def build_FFC(N, dealias, dtype): yb = d3.RealFourier(c.coords[1], size=N, bounds=(0, Ly), dealias=dealias) zb = d3.ChebyshevT(c.coords[2], size=N, bounds=(0, Lz), dealias=dealias) b = (xb, yb, zb) - x = xb.local_grid(dealias) - y = yb.local_grid(dealias) - z = zb.local_grid(dealias) + x, y, z = d.local_grids(xb, yb, zb, scales=dealias) r = (x, y, z) return c, d, b, r diff --git a/dedalus/tests/test_cfl.py b/dedalus/tests/test_cfl.py index 34c2236e..09742144 100644 --- a/dedalus/tests/test_cfl.py +++ b/dedalus/tests/test_cfl.py @@ -103,7 +103,7 @@ def test_cfl_fourier(N, L, dealias, dtype): c = d3.CartesianCoordinates('x') d = d3.Distributor(c, dtype=dtype) b = d3.Fourier(c.coords[0], size=N, bounds=(0, L), dealias=dealias, dtype=dtype) - x = b.local_grid(1) + x = d.local_grid(b, scale=1) u = d.VectorField(c, bases=b) u.fill_random(layout='g') cfl = d3.AdvectiveCFL(u, c) @@ -121,7 +121,7 @@ def test_cfl_chebyshev(N, L, dealias, dtype): c = d3.CartesianCoordinates('x') d = d3.Distributor(c, dtype=dtype) b = d3.Chebyshev(c.coords[0], size=N, bounds=(0, L), dealias=dealias) - x = b.local_grid(1) + x = d.local_grid(b, scale=1) u = d.VectorField(c, bases=b) u.fill_random(layout='g') cfl = d3.AdvectiveCFL(u, c) diff --git a/dedalus/tests/test_evp.py b/dedalus/tests/test_evp.py index 7dfe82ba..7a0f6cf3 100644 --- a/dedalus/tests/test_evp.py +++ b/dedalus/tests/test_evp.py @@ -78,7 +78,7 @@ def test_laplace_jacobi_first_order(N, a, b, dtype): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Jacobi(c, size=N, a=a, b=b, bounds=(0, np.pi)) - x = b.local_grid(1) + x = d.local_grid(b, scale=1) # Fields u = d.Field(bases=b) s = d.Field() @@ -156,7 +156,7 @@ def test_ball_bessel_eigenfunction(Nphi, Ntheta, Nr, radius, alpha, dtype, ell): c = d3.SphericalCoordinates('phi', 'theta', 'r') d = d3.Distributor(c, dtype=dtype) b = d3.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius, alpha=alpha, dtype=dtype) - phi, theta, r = b.local_grids((1, 1, 1)) + phi, theta, r = d.local_grids(b, scales=(1, 1, 1)) # Fields u = d.Field(bases=b) tau = d.Field(bases=b.surface) diff --git a/dedalus/tests/test_fourier_operators.py b/dedalus/tests/test_fourier_operators.py index 40347866..018861e3 100644 --- a/dedalus/tests/test_fourier_operators.py +++ b/dedalus/tests/test_fourier_operators.py @@ -17,7 +17,7 @@ def build_fourier(N, bounds, dealias, dtype): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Fourier(c, size=N, bounds=bounds, dealias=dealias, dtype=dtype) - x = b.local_grid(1) + x = d.local_grid(b, scale=1) return c, d, b, x diff --git a/dedalus/tests/test_grid_operators.py b/dedalus/tests/test_grid_operators.py index c74daf98..96cef706 100644 --- a/dedalus/tests/test_grid_operators.py +++ b/dedalus/tests/test_grid_operators.py @@ -17,7 +17,7 @@ def build_ball(N, dealias, dtype, radius=1): c = d3.SphericalCoordinates('phi', 'theta', 'r') d = d3.Distributor(c, dtype=dtype) b = d3.BallBasis(c, (2*N, N, N), radius=radius, dtype=dtype, dealias=dealias) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -27,7 +27,7 @@ def build_shell(N, dealias, dtype, radii=(0.5,1)): c = d3.SphericalCoordinates('phi', 'theta', 'r') d = d3.Distributor(c, dtype=dtype) b = d3.ShellBasis(c, (2*N, N, N), radii=radii, dtype=dtype, dealias=dealias) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -42,7 +42,7 @@ def test_jacobi_ufunc_field(N, a, b, dealias, dtype, func): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Jacobi(c, size=N, a=a, b=b, bounds=(0, 1), dealias=dealias) - x = b.local_grid(1) + x = d.local_grid(b, scale=1) f = d.Field(bases=b) if func is np.arccosh: f['g'] = 1 + x**2 @@ -94,7 +94,7 @@ def test_jacobi_GeneralFunction_coord(N, a, b, dealias, dtype): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Jacobi(c, size=N, a=a, b=b, bounds=(0, 1), dealias=dealias) - x = b.local_grid(dealias) + x = d.local_grid(b, dealias) f = d.Field(bases=b) def F(x): return np.sin(x) @@ -112,7 +112,7 @@ def test_jacobi_GeneralFunction_field(N, a, b, dealias, dtype): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Jacobi(c, size=N, a=a, b=b, bounds=(0, 1), dealias=dealias) - x = b.local_grid(dealias) + x = d.local_grid(b, dealias) f = d.Field(bases=b) f.preset_scales(dealias) f['g'] = np.cos(x) diff --git a/dedalus/tests/test_ivp.py b/dedalus/tests/test_ivp.py index a9e4c2b4..1c225629 100644 --- a/dedalus/tests/test_ivp.py +++ b/dedalus/tests/test_ivp.py @@ -28,7 +28,7 @@ def test_heat_periodic(basis, N, dtype, dealias, timestepper): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = basis(c, size=N, bounds=(0, 2*np.pi), dealias=dealias) - x = b.local_grid(1) + x = d.local_grid(b, scale=1) # Fields u = d.Field(bases=b) F = d.Field(bases=b) diff --git a/dedalus/tests/test_jacobi_operators.py b/dedalus/tests/test_jacobi_operators.py index ebaf6863..1f852a60 100644 --- a/dedalus/tests/test_jacobi_operators.py +++ b/dedalus/tests/test_jacobi_operators.py @@ -18,7 +18,7 @@ def build_jacobi(N, a, b, k, bounds, dealias, dtype): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Jacobi(c, size=N, a0=a, b0=b, a=a+k, b=b+k, bounds=bounds, dealias=dealias) - x = b.local_grid(1) + x = d.local_grid(b, scale=1) return c, d, b, x diff --git a/dedalus/tests/test_lbvp.py b/dedalus/tests/test_lbvp.py index 0a194fff..2cd9459d 100644 --- a/dedalus/tests/test_lbvp.py +++ b/dedalus/tests/test_lbvp.py @@ -20,7 +20,7 @@ def build_disk(Nphi, Nr, radius, alpha, dealias, dtype): c = d3.PolarCoordinates('phi', 'r') d = d3.Distributor(c, dtype=dtype) b = d3.DiskBasis(c, (Nphi, Nr), radius=radius, alpha=alpha, dealias=dealias, dtype=dtype) - phi, r = b.local_grids() + phi, r = d.local_grids(b) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -30,7 +30,7 @@ def build_annulus(Nphi, Nr, radii, alpha, dealias, dtype): c = d3.PolarCoordinates('phi', 'r') d = d3.Distributor(c, dtype=dtype) b = d3.AnnulusBasis(c, (Nphi, Nr), radii=radii, alpha=alpha, dealias=dealias, dtype=dtype) - phi, r = b.local_grids() + phi, r = d.local_grids(b) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -306,7 +306,7 @@ def build_ball(Nphi, Ntheta, Nr, dealias, dtype): c = d3.SphericalCoordinates('phi', 'theta', 'r') d = d3.Distributor((c,), dtype=dtype) b = d3.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius_ball, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -376,7 +376,7 @@ def build_shell(Nphi, Ntheta, Nr, dealias, dtype): c = d3.SphericalCoordinates('phi', 'theta', 'r') d = d3.Distributor((c,), dtype=dtype) b = d3.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii_shell, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z diff --git a/dedalus/tests/test_nlbvp.py b/dedalus/tests/test_nlbvp.py index 25a8c458..c7d31a6a 100644 --- a/dedalus/tests/test_nlbvp.py +++ b/dedalus/tests/test_nlbvp.py @@ -19,7 +19,7 @@ def test_sin_jacobi(N, a, b, dealias, dtype): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Jacobi(c, size=N, bounds=(0, 1), a=a, b=b, dealias=dealias) - x = b.local_grid(1) + x = d.local_grid(b, scales=1) # Fields u = d.Field(bases=b) tau = d.Field() @@ -59,7 +59,7 @@ def test_heat_ball_nlbvp(Nr, dtype, dealias): d = d3.Distributor((c,)) b = d3.BallBasis(c, (1, 1, Nr), radius=radius, dtype=dtype, dealias=dealias) bs = b.S2_basis(radius=radius) - phi, theta, r = b.local_grids((1, 1, 1)) + phi, theta, r = d.local_grids(b, scales=(1, 1, 1)) # Fields u = d3.Field(name='u', dist=d, bases=(b,), dtype=dtype) τ = d3.Field(name='τ', dist=d, bases=(bs,), dtype=dtype) @@ -98,7 +98,7 @@ def test_lane_emden_floating_amp(Nr, dtype, dealias): d = d3.Distributor((c,)) b = d3.BallBasis(c, (1, 1, Nr), radius=1, dtype=dtype, dealias=dealias) bs = b.S2_basis(radius=1) - phi, theta, r = b.local_grids((1, 1, 1)) + phi, theta, r = d.local_grids(b, scales=(1, 1, 1)) # Fields f = d3.Field(dist=d, bases=(b,), dtype=dtype, name='f') τ = d3.Field(dist=d, bases=(bs,), dtype=dtype, name='τ') @@ -149,7 +149,7 @@ def test_lane_emden_floating_R(Nr, dtype, dealias): b = d3.BallBasis(c, (1, 1, Nr), radius=1, dtype=dtype, dealias=dealias) bs = b.S2_basis(radius=1) bs0 = b.S2_basis(radius=0) - phi, theta, r = b.local_grids((1, 1, 1)) + phi, theta, r = ds(b, scales=(1, 1, 1)) # Fields f = d3.Field(dist=d, bases=(b,), dtype=dtype, name='f') R = d3.Field(dist=d, dtype=dtype, name='R') @@ -205,7 +205,7 @@ def test_lane_emden_first_order(Nr, dtype, dealias): d = d3.Distributor((c,)) b = d3.BallBasis(c, (1, 1, Nr), radius=1, dtype=dtype, dealias=dealias) br = b.radial_basis - phi, theta, r = b.local_grids((1, 1, 1)) + phi, theta, r = d.local_grids(b, scales=(1, 1, 1)) # Fields p = d3.Field(dist=d, bases=(br,), dtype=dtype, name='p') ρ = d3.Field(dist=d, bases=(br,), dtype=dtype, name='ρ') diff --git a/dedalus/tests/test_polar_calculus.py b/dedalus/tests/test_polar_calculus.py index cde0ac72..417a94bc 100644 --- a/dedalus/tests/test_polar_calculus.py +++ b/dedalus/tests/test_polar_calculus.py @@ -18,7 +18,7 @@ def build_disk(Nphi, Nr, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) b = basis.DiskBasis(c, (Nphi, Nr), radius=radius_disk, dealias=(dealias, dealias), dtype=dtype) - phi, r = b.local_grids(b.domain.dealias) + phi, r = d.local_grids(b, scales=b.domain.dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -28,7 +28,7 @@ def build_annulus(Nphi, Nr, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) b = basis.AnnulusBasis(c, (Nphi, Nr), radii=radii_annulus, dealias=(dealias, dealias), dtype=dtype) - phi, r = b.local_grids(b.domain.dealias) + phi, r = d.local_grids(b, scales=b.domain.dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y diff --git a/dedalus/tests/test_polar_ncc.py b/dedalus/tests/test_polar_ncc.py index 0a4406d4..eaa7e526 100644 --- a/dedalus/tests/test_polar_ncc.py +++ b/dedalus/tests/test_polar_ncc.py @@ -20,7 +20,7 @@ def build_disk(Nphi, Nr, dealias, dtype=np.float64): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) b = basis.DiskBasis(c, (Nphi, Nr), radius=radius_disk[0], dealias=(dealias, dealias), dtype=dtype) - phi, r = b.local_grids() + phi, r = d.local_grids(b) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -29,7 +29,7 @@ def build_annulus(Nphi, Nr, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) b = basis.AnnulusBasis(c, (Nphi, Nr), radii=radii_annulus[0], dealias=(dealias, dealias), dtype=dtype) - phi, r = b.local_grids() + phi, r = d.local_grids(b) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y diff --git a/dedalus/tests/test_polar_operators.py b/dedalus/tests/test_polar_operators.py index bba4ab64..c34bb79a 100644 --- a/dedalus/tests/test_polar_operators.py +++ b/dedalus/tests/test_polar_operators.py @@ -21,7 +21,7 @@ def build_disk(Nphi, Nr, k, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,), dtype=dtype) b = basis.DiskBasis(c, (Nphi, Nr), radius=radius_disk, k=k, dealias=(dealias, dealias), dtype=dtype) - phi, r = b.local_grids(b.domain.dealias) + phi, r = d.local_grids(b, scales=b.domain.dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -31,7 +31,7 @@ def build_annulus(Nphi, Nr, k, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,), dtype=dtype) b = basis.AnnulusBasis(c, (Nphi, Nr), radii=radii_annulus, k=k, dealias=(dealias, dealias), dtype=dtype) - phi, r = b.local_grids(b.domain.dealias) + phi, r = d.local_grids(b, scales=b.domain.dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y diff --git a/dedalus/tests/test_sphere_calculus.py b/dedalus/tests/test_sphere_calculus.py index 277318c3..39b6d076 100644 --- a/dedalus/tests/test_sphere_calculus.py +++ b/dedalus/tests/test_sphere_calculus.py @@ -18,7 +18,7 @@ def build_sphere(Nphi, Ntheta, dealias, dtype): c = coords.S2Coordinates('phi', 'theta') d = distributor.Distributor(c, dtype=dtype) b = basis.SphereBasis(c, (Nphi, Ntheta), radius=radius, dealias=(dealias, dealias), dtype=dtype) - phi, theta = b.local_grids(b.domain.dealias) + phi, theta = d.local_grids(b, scales=b.domain.dealias) return c, d, b, phi, theta diff --git a/dedalus/tests/test_sphere_ncc.py b/dedalus/tests/test_sphere_ncc.py index 5fab8349..a8317e03 100644 --- a/dedalus/tests/test_sphere_ncc.py +++ b/dedalus/tests/test_sphere_ncc.py @@ -12,7 +12,7 @@ def build_sphere(Nphi, Ntheta, dealias, dtype): c = coords.S2Coordinates('phi', 'theta') d = distributor.Distributor(c, dtype=dtype) b = basis.SphereBasis(c, (Nphi, Ntheta), radius=1, dealias=(dealias, dealias), dtype=dtype) - phi, theta = b.local_grids() + phi, theta = d.local_grids(b) return c, d, b, phi, theta Nphi_range = [32] @@ -187,11 +187,11 @@ def test_vector_dot_vector(Nphi, Ntheta, ncc_first, dealias, dtype): else: w0 = dot(v, u) w1 = w0.reinitialize(ncc=True, ncc_vars=vars) - problem = problems.LBVP(vars) + problem = problems.LBVP(vars) problem.add_equation((dot(u,u)*v , 0)) solver = solvers.LinearBoundaryValueSolver(problem) w1.store_ncc_matrices(vars, solver.subproblems) - w0 = w0.evaluate() + w0 = w0.evaluate() w0.change_scales(1) w1 = w1.evaluate_as_ncc() assert np.allclose(w0['g'], w1['g']) diff --git a/dedalus/tests/test_spherical_arithmetic.py b/dedalus/tests/test_spherical_arithmetic.py index 21158b98..d5e67508 100644 --- a/dedalus/tests/test_spherical_arithmetic.py +++ b/dedalus/tests/test_spherical_arithmetic.py @@ -19,7 +19,7 @@ def build_ball(Nphi, Ntheta, Nr, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius_ball, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -29,7 +29,7 @@ def build_shell(Nphi, Ntheta, Nr, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii_shell, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -41,7 +41,7 @@ def test_S2_radial_scalar_scalar_multiplication(Nphi, Ntheta, Nr, dealias): c, d, b, phi, theta, r, x, y, z = build_shell(Nphi, Ntheta, Nr, dealias, np.complex128) f0 = field.Field(dist=d, bases=(b,), dtype=np.complex128) f0.preset_scales(b.domain.dealias) - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) f0['g'] = (r**2 - 0.5*r**3)*(5*np.cos(theta)**2-1)*np.sin(theta)*np.exp(1j*phi) b_S2 = b.S2_basis() @@ -64,7 +64,7 @@ def test_S2_radial_vector_scalar_multiplication(Nphi, Ntheta, Nr, dealias): c_S2 = c.S2coordsys v0 = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=np.complex128) v0.preset_scales(b.domain.dealias) - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) v0['g'][0] = (r**2 - 0.5*r**3)*(-1j * np.sin(theta)*np.exp(-2j*phi)) v0['g'][1] = (r**2 - 0.5*r**3)*(np.cos(theta)*np.sin(theta)*np.exp(-2j*phi)) v0['g'][2] = (r**2 - 0.5*r**3)*(5*np.cos(theta)**2-1)*np.sin(theta)*np.exp(1j*phi) @@ -153,7 +153,7 @@ def test_multiply_number_scalar(Nphi, Ntheta, Nr, dealias, basis): f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f['g'] = x**3 + 2*y**3 + 3*z**3 h = (2 * f).evaluate() - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) hg = 2*(x**3 + 2*y**3 + 3*z**3) assert np.allclose(h['g'], hg) @@ -168,7 +168,7 @@ def test_multiply_scalar_number(Nphi, Ntheta, Nr, dealias, basis): f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f['g'] = x**3 + 2*y**3 + 3*z**3 h = (f * 2).evaluate() - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) hg = 2*(x**3 + 2*y**3 + 3*z**3) assert np.allclose(h['g'], hg) @@ -183,7 +183,7 @@ def test_multiply_scalar_scalar(Nphi, Ntheta, Nr, dealias, basis): f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f['g'] = x**3 + 2*y**3 + 3*z**3 h = (f * f).evaluate() - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) hg = (x**3 + 2*y**3 + 3*z**3)**2 assert np.allclose(h['g'], hg) @@ -195,7 +195,7 @@ def test_multiply_scalar_scalar(Nphi, Ntheta, Nr, dealias, basis): @pytest.mark.parametrize('basis', [build_ball, build_shell]) def test_multiply_scalar_vector(Nphi, Ntheta, Nr, dealias, basis): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, np.complex128) - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f.preset_scales(b.domain.dealias) diff --git a/dedalus/tests/test_spherical_calculus.py b/dedalus/tests/test_spherical_calculus.py index 2bf12873..7c795cca 100644 --- a/dedalus/tests/test_spherical_calculus.py +++ b/dedalus/tests/test_spherical_calculus.py @@ -19,7 +19,7 @@ def build_ball(Nphi, Ntheta, Nr, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius_ball, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -29,7 +29,7 @@ def build_shell(Nphi, Ntheta, Nr, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii_shell, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z diff --git a/dedalus/tests/test_spherical_operators.py b/dedalus/tests/test_spherical_operators.py index bd9e46de..bda59d99 100644 --- a/dedalus/tests/test_spherical_operators.py +++ b/dedalus/tests/test_spherical_operators.py @@ -21,7 +21,7 @@ def build_ball(Nphi, Ntheta, Nr, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius_ball, k=k, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -31,7 +31,7 @@ def build_shell(Nphi, Ntheta, Nr, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii_shell, k=k, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids(b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=b.domain.dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z diff --git a/dedalus/tests/test_transforms.py b/dedalus/tests/test_transforms.py index 01930bb7..2e6e799b 100644 --- a/dedalus/tests/test_transforms.py +++ b/dedalus/tests/test_transforms.py @@ -64,7 +64,7 @@ def test_CF_scalar_roundtrip(N, dealias): c = coords.Coordinate('x') d = distributor.Distributor([c]) xb = basis.ComplexFourier(c, size=N, bounds=(0, 1), dealias=dealias) - x = xb.local_grid(dealias) + x = d.local_grid(xb, scales=dealias) # Scalar transforms u = field.Field(dist=d, bases=(xb,), dtype=np.complex128) u.preset_scales(dealias) @@ -82,7 +82,7 @@ def test_RF_scalar_roundtrip(N, dealias): c = coords.Coordinate('x') d = distributor.Distributor([c]) xb = basis.RealFourier(c, size=N, bounds=(0, 1), dealias=dealias) - x = xb.local_grid(dealias) + x = d.local_grid(xb, scales=dealias) # Scalar transforms u = field.Field(dist=d, bases=(xb,), dtype=np.float64) u.preset_scales(dealias) @@ -103,7 +103,7 @@ def test_J_scalar_roundtrip(a, b, N, dealias, dtype): c = coords.Coordinate('x') d = distributor.Distributor([c]) xb = basis.Jacobi(c, a=a, b=b, size=N, bounds=(0, 1), dealias=dealias) - x = xb.local_grid(dealias) + x = d.local_grid(xb, scales=dealias) # Scalar transforms u = field.Field(dist=d, bases=(xb,), dtype=dtype) u.preset_scales(dealias) @@ -164,8 +164,8 @@ def build_CF_CF(Nx, Ny, dealias_x, dealias_y): d = distributor.Distributor((c,)) xb = basis.ComplexFourier(c.coords[0], size=Nx, bounds=(0, np.pi), dealias=dealias_x) yb = basis.ComplexFourier(c.coords[1], size=Ny, bounds=(0, np.pi), dealias=dealias_y) - x = xb.local_grid(dealias_x) - y = yb.local_grid(dealias_y) + x = d.local_grid(xb, scales=dealias_x) + y = d.local_grid(yb, scales=dealias_y) return c, d, xb, yb, x, y @@ -188,8 +188,8 @@ def build_RF_RF(Nx, Ny, dealias_x, dealias_y): d = distributor.Distributor((c,)) xb = basis.RealFourier(c.coords[0], size=Nx, bounds=(0, np.pi), dealias=dealias_x) yb = basis.RealFourier(c.coords[1], size=Ny, bounds=(0, np.pi), dealias=dealias_y) - x = xb.local_grid(dealias_x) - y = yb.local_grid(dealias_y) + x = d.local_grid(xb, scales=dealias_x) + y = d.local_grid(yb, scales=dealias_y) return c, d, xb, yb, x, y @@ -212,8 +212,8 @@ def build_CF_J(a, b, Nx, Ny, dealias_x, dealias_y): d = distributor.Distributor((c,)) xb = basis.ComplexFourier(c.coords[0], size=Nx, bounds=(0, np.pi), dealias=dealias_x) yb = basis.Jacobi(c.coords[1], a=a, b=b, size=Ny, bounds=(0, 1), dealias=dealias_y) - x = xb.local_grid(dealias_x) - y = yb.local_grid(dealias_y) + x = d.local_grid(xb, scales=dealias_x) + y = d.local_grid(yb, scales=dealias_y) return c, d, xb, yb, x, y @@ -269,7 +269,7 @@ def build_sphere_2d(Nphi, Ntheta, radius, dealias, dtype): c = coords.S2Coordinates('phi', 'theta') d = distributor.Distributor((c,)) b = basis.SphereBasis(c, (Nphi, Ntheta), radius=radius, dealias=(dealias, dealias), dtype=dtype) - phi, theta = b.local_grids((dealias, dealias)) + phi, theta = d.local_grids(b, scales=(dealias, dealias)) return c, d, b, phi, theta @CachedMethod @@ -277,7 +277,7 @@ def build_sphere_3d(Nphi, Ntheta, radius, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.SphereBasis(c, (Nphi, Ntheta), radius=radius, dealias=(dealias, dealias), dtype=dtype) - phi, theta = b.local_grids((dealias, dealias, dealias)) + phi, theta = d.local_grids(b, scales=(dealias, dealias, dealias)) return c, d, b, phi, theta Nphi_range = [16] @@ -404,7 +404,7 @@ def build_annulus(Nphi, Nr, radius, alpha, k, dealias, dtype): @pytest.mark.parametrize('build_basis', [build_annulus, build_disk]) def test_polar_scalar_roundtrip(Nphi, Nr, radius, alpha, k, dealias, dtype, build_basis): c, d, b = build_basis(Nphi, Nr, radius, alpha, k, dealias, dtype) - phi, r = b.local_grids((dealias, dealias)) + phi, r = d.local_grids(b, scales=(dealias, dealias)) f = field.Field(dist=d, bases=(b,), dtype=dtype) f.preset_scales((dealias, dealias)) f['g'] = (r*np.cos(phi))**3 @@ -444,7 +444,7 @@ def test_polar_roundtrip_noise(Nphi, Nr, radius, alpha, k, dealias, dtype, layou @pytest.mark.parametrize('build_basis', [build_annulus, build_disk]) def test_polar_scalar_roundtrip_mmax0(Nr, radius, alpha, k, dealias, dtype, build_basis): c, d, b = build_basis(1, Nr, radius, alpha, k, dealias, dtype) - phi, r = b.local_grids((dealias, dealias)) + phi, r = d.local_grids(b, scales=(dealias, dealias)) f = field.Field(dist=d, bases=(b,), dtype=dtype) f.preset_scales((dealias, dealias)) f['g'] = r**4 + 0*phi @@ -463,7 +463,7 @@ def test_polar_scalar_roundtrip_mmax0(Nr, radius, alpha, k, dealias, dtype, buil @pytest.mark.parametrize('build_basis', [build_annulus, build_disk]) def test_polar_vector_roundtrip(Nphi, Nr, radius, alpha, k, dealias, dtype, build_basis): c, d, b = build_basis(Nphi, Nr, radius, alpha, k, dealias, dtype=dtype) - phi, r = b.local_grids((dealias, dealias)) + phi, r = d.local_grids(b, scales=(dealias, dealias)) x = r*np.cos(phi) y = r*np.sin(phi) ex = np.array([-np.sin(phi)+0*r, np.cos(phi)+0*r]) @@ -484,7 +484,7 @@ def test_polar_vector_roundtrip(Nphi, Nr, radius, alpha, k, dealias, dtype, buil @pytest.mark.parametrize('build_basis', [build_annulus, build_disk]) def test_polar_vector_roundtrip_mmax0(Nr, radius, alpha, k, dealias, dtype, build_basis): c, d, b = build_basis(1, Nr, radius, alpha, k, dealias, dtype=dtype) - phi, r = b.local_grids((dealias, dealias)) + phi, r = d.local_grids(b, scales=(dealias, dealias)) f = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) f.preset_scales((dealias, dealias)) f['g'][1] = 6*r**5 + 0*phi @@ -503,7 +503,7 @@ def test_polar_vector_roundtrip_mmax0(Nr, radius, alpha, k, dealias, dtype, buil @pytest.mark.parametrize('build_basis', [build_annulus, build_disk]) def test_polar_tensor_roundtrip(Nphi, Nr, radius, alpha, k, dealias, dtype, build_basis): c, d, b = build_basis(Nphi, Nr, radius, alpha, k, dealias, dtype=dtype) - phi, r = b.local_grids((dealias, dealias)) + phi, r = d.local_grids(b, scales=(dealias, dealias)) x = r*np.cos(phi) ex = np.array([-np.sin(phi)+0*r, np.cos(phi)+0*r]) exex = ex[None, :, ...] * ex[:, None, ...] @@ -524,7 +524,7 @@ def test_polar_tensor_roundtrip(Nphi, Nr, radius, alpha, k, dealias, dtype, buil @pytest.mark.parametrize('build_basis', [build_annulus, build_disk]) def test_polar_tensor_roundtrip_mmax0(Nr, radius, alpha, k, dealias, dtype, build_basis): c, d, b = build_basis(1, Nr, radius, alpha, k, dealias, dtype=dtype) - phi, r = b.local_grids((dealias, dealias)) + phi, r = d.local_grids(b, scales=(dealias, dealias)) f = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) f.preset_scales((dealias, dealias)) f['g'][1,1] = r**2 + 0*phi @@ -540,7 +540,7 @@ def build_shell(Nphi, Ntheta, Nr, radii, alpha, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii, alpha=alpha, k=k, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids((dealias, dealias, dealias)) + phi, theta, r = d.local_grids(b, scales=(dealias, dealias, dealias)) x = r * np.sin(theta) * np.cos(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(theta) @@ -605,7 +605,7 @@ def build_ball(Nphi, Ntheta, Nr, radius, alpha, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius, alpha=alpha, k=k, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = b.local_grids((dealias, dealias, dealias)) + phi, theta, r = d.local_grids(b, scales=(dealias, dealias, dealias)) x = r * np.sin(theta) * np.cos(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(theta) diff --git a/dedalus/tests_parallel/test_output_parallel.py b/dedalus/tests_parallel/test_output_parallel.py index 023b88d6..adfe7838 100644 --- a/dedalus/tests_parallel/test_output_parallel.py +++ b/dedalus/tests_parallel/test_output_parallel.py @@ -24,9 +24,7 @@ def test_cartesian_output(dtype, dealias, output_scales): xb = Fourier(c.coords[0], size=Nx, bounds=(0, Lx), dealias=dealias) yb = Fourier(c.coords[1], size=Ny, bounds=(0, Ly), dealias=dealias) zb = Fourier(c.coords[2], size=Nz, bounds=(0, Lz), dealias=dealias) - x = xb.local_grid(1) - y = yb.local_grid(1) - z = zb.local_grid(1) + x, y, z = d.local_grids(xb, yb, zb, scales=1) # Fields u = field.Field(name='u', dist=d, bases=(xb,yb,zb), dtype=dtype) v = field.Field(name='v', dist=d, bases=(xb,yb,zb), tensorsig=(c,), dtype=dtype) @@ -76,9 +74,7 @@ def test_cartesian_output_virtual(dtype, dealias, output_scales): xb = Fourier(c.coords[0], size=Nx, bounds=(0, Lx), dealias=dealias) yb = Fourier(c.coords[1], size=Ny, bounds=(0, Ly), dealias=dealias) zb = Fourier(c.coords[2], size=Nz, bounds=(0, Lz), dealias=dealias) - x = xb.local_grid(1) - y = yb.local_grid(1) - z = zb.local_grid(1) + x, y, z = d.local_grids(xb, yb, zb, scales=1) # Fields u = field.Field(name='u', dist=d, bases=(xb,yb,zb), dtype=dtype) v = field.Field(name='v', dist=d, bases=(xb,yb,zb), tensorsig=(c,), dtype=dtype) @@ -130,9 +126,7 @@ def test_cartesian_output_merged_virtual(dtype, dealias, output_scales): xb = Fourier(c.coords[0], size=Nx, bounds=(0, Lx), dealias=dealias) yb = Fourier(c.coords[1], size=Ny, bounds=(0, Ly), dealias=dealias) zb = Fourier(c.coords[2], size=Nz, bounds=(0, Lz), dealias=dealias) - x = xb.local_grid(1) - y = yb.local_grid(1) - z = zb.local_grid(1) + x, y, z = d.local_grids(xb, yb, zb, scales=1) # Fields u = field.Field(name='u', dist=d, bases=(xb,yb,zb), dtype=dtype) v = field.Field(name='v', dist=d, bases=(xb,yb,zb), tensorsig=(c,), dtype=dtype) @@ -153,7 +147,7 @@ def test_cartesian_output_merged_virtual(dtype, dealias, output_scales): solver.evaluator.evaluate_handlers([output]) # Check solution errors = [] - + post.merge_virtual_analysis('test_output', cleanup=True) d.comm.Barrier() @@ -187,9 +181,7 @@ def test_cartesian_output_merged(dtype, dealias, output_scales): xb = Fourier(c.coords[0], size=Nx, bounds=(0, Lx), dealias=dealias) yb = Fourier(c.coords[1], size=Ny, bounds=(0, Ly), dealias=dealias) zb = Fourier(c.coords[2], size=Nz, bounds=(0, Lz), dealias=dealias) - x = xb.local_grid(1) - y = yb.local_grid(1) - z = zb.local_grid(1) + x, y, z = d.local_grids(xb, yb, zb, scales=1) # Fields u = field.Field(name='u', dist=d, bases=(xb,yb,zb), dtype=dtype) v = field.Field(name='v', dist=d, bases=(xb,yb,zb), tensorsig=(c,), dtype=dtype) @@ -210,7 +202,7 @@ def test_cartesian_output_merged(dtype, dealias, output_scales): solver.evaluator.evaluate_handlers([output]) # Check solution errors = [] - + post.merge_analysis('test_output', cleanup=True) d.comm.Barrier() diff --git a/dedalus/tests_parallel/test_spherical3d_arithmetic_parallel.py b/dedalus/tests_parallel/test_spherical3d_arithmetic_parallel.py index 29d57c31..6460cbd6 100644 --- a/dedalus/tests_parallel/test_spherical3d_arithmetic_parallel.py +++ b/dedalus/tests_parallel/test_spherical3d_arithmetic_parallel.py @@ -22,7 +22,7 @@ def build_ball(Nphi, Ntheta, Nr, dtype, dealias, mesh=None): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,), mesh=mesh) b = basis.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius_ball, dtype=dtype, dealias=(dealias, dealias, dealias)) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -33,7 +33,7 @@ def build_shell(Nphi, Ntheta, Nr, dtype, dealias, mesh=None): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,), mesh=mesh) b = basis.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii_shell, dtype=dtype, dealias=(dealias, dealias, dealias)) - phi, theta, r = b.local_grids() + phi, theta, r = d.local_grids(b) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z From c6c33695a90df244c790635e6906074d58577348 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Tue, 12 Dec 2023 13:11:56 -0500 Subject: [PATCH 04/19] Distributor removed from bases, tests passing --- dedalus/core/basis.py | 81 +++++++++++----------- dedalus/core/coords.py | 11 ++- dedalus/core/distributor.py | 4 +- dedalus/core/operators.py | 16 ++--- dedalus/tests/test_clenshaw.py | 4 +- dedalus/tests/test_nlbvp.py | 4 +- dedalus/tests/test_polar_calculus.py | 28 ++++---- dedalus/tests/test_polar_operators.py | 50 ++++++------- dedalus/tests/test_sphere_calculus.py | 48 ++++++------- dedalus/tests/test_spherical_arithmetic.py | 26 +++---- dedalus/tests/test_spherical_calculus.py | 29 ++++---- dedalus/tests/test_spherical_operators.py | 56 +++++++-------- dedalus/tests/test_transforms.py | 26 +++---- 13 files changed, 191 insertions(+), 192 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 2b746be2..3d9743ec 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -2618,7 +2618,7 @@ class ConvertPolar(operators.Convert, operators.PolarMOperator): def __init__(self, operand, output_basis, out=None): operators.Convert.__init__(self, operand, output_basis, out=out) - self.radius_axis = dist.last_axis(self) + self.radius_axis = self.dist.last_axis(output_basis) def spinindex_out(self, spinindex_in): return (spinindex_in,) @@ -3040,8 +3040,8 @@ def ell_maps(self, dist): def global_grids(self, dist, scales): first_axis = dist.first_axis(self) - return (self.global_grid_azimuth(scales[first_axis]), - self.global_grid_colatitude(scales[first_axis+1])) + return (self.global_grid_azimuth(dist, scales[first_axis]), + self.global_grid_colatitude(dist, scales[first_axis+1])) def global_grid_colatitude(self, dist, scale): theta = self._native_colatitude_grid(scale) @@ -3874,7 +3874,7 @@ def backward_transform_radius(self, field, axis, cdata, gdata): plan.backward(cdata[i], temp[i], axis) np.copyto(gdata, temp) # Regularity recombination - self.backward_regularity_recombination(field.tensorsig, axis, gdata) + self.backward_regularity_recombination(field.tensorsig, axis, gdata, self.ell_maps(field.dist)) # Multiply by radial factor if self.k > 0: gdata *= self.radial_transform_factor(field.scales[axis], data_axis, self.k) @@ -4079,7 +4079,7 @@ def backward_transform_radius(self, field, axis, cdata, gdata): plan.backward(cdata[regindex], temp[regindex], axis) np.copyto(gdata, temp) # Apply recombinations - self.backward_regularity_recombination(field.tensorsig, axis, gdata) + self.backward_regularity_recombination(field.tensorsig, axis, gdata, self.ell_maps(field.dist)) @CachedMethod def operator_matrix(self, op, l, deg, size=None): @@ -4221,9 +4221,9 @@ def __init__(self, coordsys, shape_angular, dealias_angular, radial_basis, dtype self.group_shape = (1, 1, 1) Basis.__init__(self, coordsys) - @CachedAttribute - def ell_reversed(self): - return self.S2_basis().ell_reversed + @CachedMethod + def ell_reversed(self, dist): + return self.S2_basis().ell_reversed(dist) def matrix_dependence(self, matrix_coupling): matrix_dependence = matrix_coupling.copy() @@ -4247,9 +4247,9 @@ def constant_mode_value(self): def global_grids(self, dist, scales): first_axis = dist.first_axis(self) - return (self.global_grid_azimuth(scales[first_axis]), - self.global_grid_colatitude(scales[first_axis+1]), - self.global_grid_radius(scales[first_axis+2])) + return (self.global_grid_azimuth(dist, scales[first_axis]), + self.global_grid_colatitude(dist, scales[first_axis+1]), + self.global_grid_radius(dist, scales[first_axis+2])) def local_grids(self, dist, scales): first_axis = dist.first_axis(self) @@ -4558,7 +4558,7 @@ def backward_transform_radius(self, field, axis, cdata, gdata): gdata *= radial_basis.radial_transform_factor(field.scales[axis], data_axis, self.k) def build_ncc_matrix(self, product, subproblem, ncc_cutoff, max_ncc_terms): - axis = dist.last_axis(self) + axis = product.dist.last_axis(self) ncc_basis = product.ncc.domain.get_basis(axis) if ncc_basis is None: # NCC is constant @@ -4600,7 +4600,7 @@ def reg_NCC_matrix(radial_index): # Apply forward Q transformations m = subproblem.group[axis-2] ells = np.arange(abs(m), self.Lmax+1) - if S2_basis.ell_reversed[m]: + if S2_basis.ell_reversed(product.dist)[m]: ells = ells[::-1] ells = tuple(ells) Qout = self.radial_basis.radial_recombinations(product.tensorsig, ells) @@ -4945,7 +4945,7 @@ def __init__(self, operand, coord, position, out=None): operators.Interpolate.__init__(self, operand, coord, position, out=None) def subproblem_matrix(self, subproblem): - m = subproblem.group[dist.last_axis(self) - 1] + m = subproblem.group[self.last_axis - 1] matrix = super().subproblem_matrix(subproblem) radial_basis = self.input_basis if self.tensorsig != (): @@ -4959,7 +4959,7 @@ def operate(self, out): input_basis = self.input_basis output_basis = self.output_basis radial_basis = self.input_basis - axis = dist.last_axis(self) + axis = self.last_axis # Set output layout out.preset_layout(operand.layout) # Apply operator @@ -5009,7 +5009,8 @@ def subproblem_matrix(self, subproblem): radial_basis = self.output_basis ## CHANGED RELATIVE TO POLARMOPERATOR S_in = radial_basis.spin_weights(operand.tensorsig) S_out = radial_basis.spin_weights(self.tensorsig) # Should this use output_basis? - m = subproblem.group[dist.last_axis(self) - 1] + radial_axis = self.dist.last_axis(self.output_basis) + m = subproblem.group[radial_axis - 1] # Loop over components submatrices = [] for spinindex_out, spintotal_out in np.ndenumerate(S_out): @@ -5021,7 +5022,7 @@ def subproblem_matrix(self, subproblem): if spinindex_out in self.spinindex_out(spinindex_in): # Substitute factor for radial axis factors = [sparse.eye(i, j, format='csr') for i, j in zip(subshape_out, subshape_in)] - factors[dist.last_axis(self)] = self.radial_matrix(spinindex_in, spinindex_out, m) + factors[radial_axis] = self.radial_matrix(spinindex_in, spinindex_out, m) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5059,7 +5060,7 @@ def subproblem_matrix(self, subproblem): radial_basis = self.output_basis ## CHANGED RELATIVE TO POLARMOPERATOR S_in = radial_basis.spin_weights(operand.tensorsig) S_out = radial_basis.spin_weights(self.tensorsig) # Should this use output_basis? - m = subproblem.group[dist.last_axis(self) - 1] + m = subproblem.group[self.last_axis - 1] # Loop over components submatrices = [] for spinindex_out, spintotal_out in np.ndenumerate(S_out): @@ -5071,7 +5072,7 @@ def subproblem_matrix(self, subproblem): if spinindex_out in self.spinindex_out(spinindex_in): # Substitute factor for radial axis factors = [sparse.eye(i, j, format='csr') for i, j in zip(subshape_out, subshape_in)] - factors[dist.last_axis(self)] = self.radial_matrix(spinindex_in, spinindex_out, m) + factors[self.last_axis] = self.radial_matrix(spinindex_in, spinindex_out, m) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5168,7 +5169,7 @@ def subproblem_matrix(self, subproblem): radial_basis = self.output_basis R_in = radial_basis.regularity_classes(operand.tensorsig) R_out = radial_basis.regularity_classes(self.tensorsig) # Should this use output_basis? - ell = subproblem.group[dist.last_axis(self) - 1] + ell = subproblem.group[self.last_axis - 1] # Loop over components submatrices = [] for regindex_out, regtotal_out in np.ndenumerate(R_out): @@ -5181,7 +5182,7 @@ def subproblem_matrix(self, subproblem): if (regindex_out in self.regindex_out(regindex_in)) and radial_basis.regularity_allowed(ell, regindex_in) and radial_basis.regularity_allowed(ell, regindex_out): # Substitute factor for radial axis factors = [sparse.eye(m, n, format='csr') for m, n in zip(subshape_out, subshape_in)] - factors[dist.last_axis(self)] = self.radial_matrix(regindex_in, regindex_out, ell) + factors[self.last_axis] = self.radial_matrix(regindex_in, regindex_out, ell) comp_matrix = reduce(sparse.kron, factors, 1).tocsr() else: # Build zero matrix @@ -5224,11 +5225,11 @@ def regindex_out(self, regindex_in): def subproblem_matrix(self, subproblem): matrix = super().subproblem_matrix(subproblem) # Get relevant Qs - m = subproblem.group[dist.last_axis(self) - 2] - ell = subproblem.group[dist.last_axis(self) - 1] + m = subproblem.group[self.last_axis - 2] + ell = subproblem.group[self.last_axis - 1] if ell is None: ell_list = np.arange(abs(m), self.input_basis.Lmax + 1) - if self.input_basis.ell_reversed[m]: + if self.input_basis.ell_reversed(self.dist)[m]: ell_list = ell_list[::-1] else: ell_list = [ell] @@ -5599,7 +5600,7 @@ def _output_basis(input_basis, position): def check_conditions(self): """Check that arguments are in a proper layout.""" arg0 = self.args[0] - azimuth_axis = dist.first_axis(self) + azimuth_axis = self.first_axis # Require grid space and locality along azimuthal axis is_grid = arg0.layout.grid_space[azimuth_axis] is_local = arg0.layout.local[azimuth_axis] @@ -5608,7 +5609,7 @@ def check_conditions(self): def enforce_conditions(self): """Require arguments to be in a proper layout.""" arg0 = self.args[0] - azimuth_axis = dist.first_axis(self) + azimuth_axis = self.first_axis # Require grid space and locality along azimuthal axis arg0.require_grid_space(azimuth_axis) arg0.require_local(azimuth_axis) @@ -5637,9 +5638,9 @@ def operate(self, out): # Set output layout out.preset_layout(layout) # Set output lock - out.lock_axis_to_grid(dist.first_axis(self)) + out.lock_axis_to_grid(self.first_axis) # Apply matrix - data_axis = dist.first_axis(self) + len(arg.tensorsig) + data_axis = self.first_axis + len(arg.tensorsig) apply_matrix(self.interpolation_vector(), arg.data, data_axis, out=out.data) @@ -5667,7 +5668,7 @@ def _output_basis(input_basis, position): def check_conditions(self): """Check that arguments are in a proper layout.""" arg0 = self.args[0] - azimuth_axis = dist.first_axis(self) + azimuth_axis = self.first_axis colat_axis = azimuth_axis + 1 # Require azimuth coeff, colat grid, colat, local az_coeff = not arg0.layout.grid_space[azimuth_axis] @@ -5678,7 +5679,7 @@ def check_conditions(self): def enforce_conditions(self): """Require arguments to be in a proper layout.""" arg0 = self.args[0] - azimuth_axis = dist.first_axis(self) + azimuth_axis = self.first_axis colat_axis = azimuth_axis + 1 # Require azimuth coeff, colat grid, colat, local arg0.require_coeff_space(azimuth_axis) @@ -5718,7 +5719,7 @@ def operate(self, out): arg = self.args[0] basis = self.sphere_basis layout = arg.layout - azimuth_axis = dist.first_axis(self) + azimuth_axis = self.first_axis colat_axis = azimuth_axis + 1 Ntheta = arg.data.shape[len(arg.tensorsig) + colat_axis] # Set output layout @@ -5854,11 +5855,11 @@ def _output_basis(input_basis, position): def subproblem_matrix(self, subproblem): matrix = super().subproblem_matrix(subproblem) # Get relevant Qs - m = subproblem.group[dist.last_axis(self) - 2] - ell = subproblem.group[dist.last_axis(self) - 1] + m = subproblem.group[self.last_axis - 2] + ell = subproblem.group[self.last_axis - 1] if ell is None: ell_list = np.arange(abs(m), self.input_basis.Lmax + 1) - if self.input_basis.ell_reversed[m]: + if self.input_basis.ell_reversed(self.dist)[m]: ell_list = ell_list[::-1] else: ell_list = [ell] @@ -6139,14 +6140,14 @@ def cfl_spacing(self): #Assumes velocity is a 2-length vector over polar coordinates basis = self.input_basis dealias = basis.dealias - azimuth_spacing = basis.local_grid_spacing(0, scales=dealias) + azimuth_spacing = basis.local_grid_spacing(self.dist, 0, scales=dealias) if basis.mmax == 0: azimuth_spacing[:] = np.inf elif isinstance(basis, DiskBasis): azimuth_spacing[:] = basis.radius / basis.mmax elif isinstance(basis, AnnulusBasis): - azimuth_spacing = basis.local_grid_radius(dealias[1]) / basis.mmax - radial_spacing = dealias[1] * basis.local_grid_spacing(1, scales=dealias) + azimuth_spacing = basis.local_grid_radius(self.dist, dealias[1]) / basis.mmax + radial_spacing = dealias[1] * basis.local_grid_spacing(self.dist, 1, scales=dealias) return [azimuth_spacing, radial_spacing] def compute_cfl_frequency(self, velocity, out): @@ -6167,7 +6168,7 @@ def cfl_spacing(self, r=None): basis = self.input_basis dealias = basis.dealias if r is None: r = basis.radius - s2_spacing = basis.local_grid_spacing(0, scales=dealias) + s2_spacing = basis.local_grid_spacing(self.dist, 0, scales=dealias) if basis.Lmax == 0: s2_spacing[:] = np.inf else: @@ -6197,8 +6198,8 @@ def cfl_spacing(self): spacings = S2AdvectiveCFL.cfl_spacing(self, r=basis.radial_basis.radius) elif isinstance(basis, ShellBasis): spacings = S2AdvectiveCFL.cfl_spacing(self, r=1) - spacings[0] = spacings[0] * basis.local_grid_radius(dealias[2]) #get proper radial scaling for shell - spacings.append(basis.local_grid_spacing(2, scales=dealias) * dealias[2]) + spacings[0] = spacings[0] * basis.local_grid_radius(self.dist, dealias[2]) #get proper radial scaling for shell + spacings.append(basis.local_grid_spacing(self.dist, 2, scales=dealias) * dealias[2]) return spacings def compute_cfl_frequency(self, velocity, out): diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index 9e7eb77b..d20ac115 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -67,13 +67,10 @@ def check_bounds(self, bounds): class DirectProduct(CoordinateSystem): - def __init__(self, *coords): - print(coords) - self.coords = coords - self.dim = sum(coord.dim for coord in coords) - - - + def __init__(self, *coordsystems): + self.coordsystems = coordsystems + self.coords = sum((cs.coords for cs in coordsystems), ()) + self.dim = sum(cs.dim for cs in coordsystems) class CartesianCoordinates(CoordinateSystem): diff --git a/dedalus/core/distributor.py b/dedalus/core/distributor.py index 655b6890..228cc62b 100644 --- a/dedalus/core/distributor.py +++ b/dedalus/core/distributor.py @@ -82,7 +82,7 @@ def __init__(self, coordsystems, comm=None, mesh=None, dtype=None): else: self.single_coordsys = False # Get coords - self.coords = tuple([coord for coordsys in coordsystems for coord in coordsys.coords]) + self.coords = sum((cs.coords for cs in coordsystems), ()) self.coordsystems = coordsystems # Defaults if comm is None: @@ -185,7 +185,7 @@ def remedy_scales(self, scales): """Remedy different scale inputs.""" if scales is None: scales = 1 - if not hasattr(scales, "__len__"): + if not isinstance(scales, (list, tuple)): scales = [scales] * self.dim if 0 in scales: raise ValueError("Scales must be nonzero.") diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index 748c552f..8d6957f3 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -1753,7 +1753,7 @@ class SphericalTrace(Trace): def __init__(self, *args, **kw): super().__init__(*args, **kw) - self.radius_axis = self.coordsys.coords[2].axis + self.radius_axis = self.dist.get_axis(self.coordsys.coords[2]) self.radial_basis = self.input_basis.get_radial_basis() def subproblem_matrix(self, subproblem): @@ -1770,7 +1770,7 @@ def subproblem_matrix(self, subproblem): # Stack ells if ell is None: ell_list = np.arange(np.abs(m), input_basis.Lmax+1) - if input_basis.ell_reversed[m]: + if input_basis.ell_reversed(self.dist)[m]: ell_list = ell_list[::-1] else: ell_list = [ell] @@ -1799,7 +1799,7 @@ class CylindricalTrace(Trace): def __init__(self, *args, **kw): super().__init__(*args, **kw) - self.radius_axis = self.coordsys.coords[1].axis + self.radius_axis = self.dist.get_axis(self.coordsys.coords[1]) def subproblem_matrix(self, subproblem): m = subproblem.group[self.radius_axis - 1] @@ -1941,7 +1941,7 @@ class SphericalTransposeComponents(TransposeComponents): def __init__(self, *args, **kw): super().__init__(*args, **kw) - self.radius_axis = self.coordsys.coords[2].axis + self.radius_axis = self.dist.get_axis(self.coordsys.coords[2]) self.radial_basis = self.input_basis.get_radial_basis() def subproblem_matrix(self, subproblem): @@ -1954,7 +1954,7 @@ def subproblem_matrix(self, subproblem): # Stack ells if ell is None: ell_list = np.arange(np.abs(m), input_basis.Lmax+1) - if input_basis.ell_reversed[m]: + if input_basis.ell_reversed(self.dist)[m]: ell_list = ell_list[::-1] else: ell_list = [ell] @@ -2786,7 +2786,7 @@ class PolarMOperator(SpectralOperator): def __init__(self, operand, coordsys): self.coordsys = coordsys - self.radius_axis = coordsys.coords[1].axis + self.radius_axis = self.dist.get_axis(coordsys.coords[1]) input_basis = operand.domain.get_basis(coordsys) if input_basis is None: input_basis = operand.domain.get_basis(coordsys.radius) @@ -2847,7 +2847,7 @@ def subproblem_matrix(self, subproblem): factors = [sparse.eye(i, j, format='csr') for i, j in zip(subshape_out, subshape_in)] radial_matrix = self.radial_matrix(spinindex_in, spinindex_out, m) # Reverse matrices to match memory order for flipped groups - if radial_basis.ell_reversed[m]: + if radial_basis.ell_reversed(self.dist)[m]: radial_matrix = radial_matrix[::-1, ::-1] factors[self.last_axis] = radial_matrix comp_matrix = reduce(sparse.kron, factors, 1).tocsr() @@ -3081,7 +3081,7 @@ def _coupled_ell_matrices(self, regindex_in, regindex_out, m): # Get ordered list of ells basis = self.S2_basis ell_list = np.arange(np.abs(m), basis.Lmax+1) - if basis.ell_reversed[m]: + if basis.ell_reversed(self.dist)[m]: ell_list = ell_list[::-1] # Assemble block-diagonal matrix over ells ell_matrices = [self._wrap_radial_matrix(regindex_in, regindex_out, ell, return_zeros=True) for ell in ell_list] diff --git a/dedalus/tests/test_clenshaw.py b/dedalus/tests/test_clenshaw.py index 90cdaa52..ce538831 100644 --- a/dedalus/tests/test_clenshaw.py +++ b/dedalus/tests/test_clenshaw.py @@ -29,7 +29,7 @@ def test_ball_clenshaw_scalar(N, regtotal_in, k_ncc, k_arg, ell, dtype): d = d3.Distributor(c, dtype=dtype) ncc_basis = d3.BallBasis(c, (1, 1, N), dtype=dtype, k=k_ncc, radius=1) arg_basis = d3.BallBasis(c, (N, N, N), dtype=dtype, k=k_arg, radius=1) - phi, theta, r = arg_basis.local_grids((1, 1, 1)) + phi, theta, r = d.local_grids(arg_basis, scales=(1, 1, 1)) # Setup NCC to match Z operator ncc = d.Field(bases=ncc_basis.radial_basis) # TODO: cleanup when radial bases are fixed ncc['g'] = 2*r**2-1 @@ -59,7 +59,7 @@ def test_ball_clenshaw_vector(N, regtotal_in, k_ncc, k_arg, ell, dtype): d = d3.Distributor(c, dtype=dtype) ncc_basis = d3.BallBasis(c, (1, 1, N), dtype=dtype, k=k_ncc, radius=1) arg_basis = d3.BallBasis(c, (N, N, N), dtype=dtype, k=k_arg, radius=1) - phi, theta, r = arg_basis.local_grids((1, 1, 1)) + phi, theta, r = d.local_grids(arg_basis, scales=(1, 1, 1)) # Setup NCC to match Z operator ncc = d.VectorField(c, bases=ncc_basis.radial_basis) # TODO: cleanup when radial bases are fixed ncc['g'][2] = r*(2*r**2-1) diff --git a/dedalus/tests/test_nlbvp.py b/dedalus/tests/test_nlbvp.py index c7d31a6a..6d0446f3 100644 --- a/dedalus/tests/test_nlbvp.py +++ b/dedalus/tests/test_nlbvp.py @@ -19,7 +19,7 @@ def test_sin_jacobi(N, a, b, dealias, dtype): c = d3.Coordinate('x') d = d3.Distributor(c, dtype=dtype) b = d3.Jacobi(c, size=N, bounds=(0, 1), a=a, b=b, dealias=dealias) - x = d.local_grid(b, scales=1) + x = d.local_grid(b, scale=1) # Fields u = d.Field(bases=b) tau = d.Field() @@ -149,7 +149,7 @@ def test_lane_emden_floating_R(Nr, dtype, dealias): b = d3.BallBasis(c, (1, 1, Nr), radius=1, dtype=dtype, dealias=dealias) bs = b.S2_basis(radius=1) bs0 = b.S2_basis(radius=0) - phi, theta, r = ds(b, scales=(1, 1, 1)) + phi, theta, r = d.local_grids(b, scales=(1, 1, 1)) # Fields f = d3.Field(dist=d, bases=(b,), dtype=dtype, name='f') R = d3.Field(dist=d, dtype=dtype, name='R') diff --git a/dedalus/tests/test_polar_calculus.py b/dedalus/tests/test_polar_calculus.py index 417a94bc..da7f430c 100644 --- a/dedalus/tests/test_polar_calculus.py +++ b/dedalus/tests/test_polar_calculus.py @@ -18,7 +18,7 @@ def build_disk(Nphi, Nr, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) b = basis.DiskBasis(c, (Nphi, Nr), radius=radius_disk, dealias=(dealias, dealias), dtype=dtype) - phi, r = d.local_grids(b, scales=b.domain.dealias) + phi, r = d.local_grids(b, scales=dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -28,7 +28,7 @@ def build_annulus(Nphi, Nr, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) b = basis.AnnulusBasis(c, (Nphi, Nr), radii=radii_annulus, dealias=(dealias, dealias), dtype=dtype) - phi, r = d.local_grids(b, scales=b.domain.dealias) + phi, r = d.local_grids(b, scales=dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -41,7 +41,7 @@ def build_annulus(Nphi, Nr, dealias, dtype): def test_gradient_scalar(Nphi, Nr, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**2 + 2*y u = operators.Gradient(f, c).evaluate() ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) @@ -58,7 +58,7 @@ def test_gradient_radial_scalar(Nr, dealias, basis, dtype): Nphi = 1 c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**4 u = operators.Gradient(f, c).evaluate() ug = [0*r*phi, 4*r**3 + 0*phi] @@ -73,7 +73,7 @@ def test_gradient_radial_scalar(Nr, dealias, basis, dtype): def test_gradient_vector(Nphi, Nr, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**4 + 2*y*x grad = lambda A: operators.Gradient(A, c) T = grad(grad(f)).evaluate() @@ -95,7 +95,7 @@ def test_gradient_radial_vector(Nr, dealias, basis, dtype): Nphi = 1 c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**4 grad = lambda A: operators.Gradient(A, c) T = grad(grad(f)).evaluate() @@ -115,7 +115,7 @@ def test_gradient_radial_vector(Nr, dealias, basis, dtype): def test_divergence_vector(Nphi, Nr, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**4 + 2*y*x grad = lambda A: operators.Gradient(A, c) div = lambda A: operators.Divergence(A) @@ -132,7 +132,7 @@ def test_divergence_radial_vector(Nr, dealias, basis, dtype): Nphi = 1 c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype=dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**2 grad = lambda A: operators.Gradient(A, c) div = lambda A: operators.Divergence(A) @@ -149,7 +149,7 @@ def test_divergence_radial_vector(Nr, dealias, basis, dtype): def test_divergence_tensor(Nphi, Nr, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) v = field.Field(dist=d, tensorsig=(c,), bases=(b,), dtype=dtype) - v.preset_scales(b.domain.dealias) + v.preset_scales(dealias) ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) v['g'] = 4*x**3*ey + 3*y**2*ey @@ -168,7 +168,7 @@ def test_divergence_tensor(Nphi, Nr, dealias, basis, dtype): def test_curl_vector(Nphi, Nr, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) v = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - v.preset_scales(b.domain.dealias) + v.preset_scales(dealias) ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) v['g'] = 4*x**3*ey + 3*y**2*ey @@ -185,7 +185,7 @@ def test_curl_vector(Nphi, Nr, dealias, basis, dtype): def test_laplacian_scalar(Nphi, Nr, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 h = operators.Laplacian(f, c).evaluate() hg = 12*x**2+24*y**2 @@ -200,7 +200,7 @@ def test_laplacian_radial_scalar(Nr, dealias, basis, dtype): Nphi = 1 c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype=dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**2 h = operators.Laplacian(f, c).evaluate() hg = 4 @@ -215,7 +215,7 @@ def test_laplacian_radial_scalar(Nr, dealias, basis, dtype): def test_laplacian_vector(Nphi, Nr, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype) v = field.Field(dist=d, tensorsig=(c,), bases=(b,), dtype=dtype) - v.preset_scales(b.domain.dealias) + v.preset_scales(dealias) ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) v['g'] = 4*x**3*ey + 3*y**2*ey @@ -232,7 +232,7 @@ def test_laplacian_radial_vector(Nr, dealias, basis, dtype): Nphi = 1 c, d, b, phi, r, x, y = basis(Nphi, Nr, dealias, dtype=dtype) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][1] = 4 * r**3 v = operators.Laplacian(u, c).evaluate() vg = 0 * v['g'] diff --git a/dedalus/tests/test_polar_operators.py b/dedalus/tests/test_polar_operators.py index c34bb79a..d74243b6 100644 --- a/dedalus/tests/test_polar_operators.py +++ b/dedalus/tests/test_polar_operators.py @@ -21,7 +21,7 @@ def build_disk(Nphi, Nr, k, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,), dtype=dtype) b = basis.DiskBasis(c, (Nphi, Nr), radius=radius_disk, k=k, dealias=(dealias, dealias), dtype=dtype) - phi, r = d.local_grids(b, scales=b.domain.dealias) + phi, r = d.local_grids(b, scales=dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -31,7 +31,7 @@ def build_annulus(Nphi, Nr, k, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,), dtype=dtype) b = basis.AnnulusBasis(c, (Nphi, Nr), radii=radii_annulus, k=k, dealias=(dealias, dealias), dtype=dtype) - phi, r = d.local_grids(b, scales=b.domain.dealias) + phi, r = d.local_grids(b, scales=dealias) x, y = c.cartesian(phi, r) return c, d, b, phi, r, x, y @@ -47,13 +47,13 @@ def build_annulus(Nphi, Nr, k, dealias, dtype): # c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) # f = field.Field(dist=d, bases=(b,), dtype=dtype) # g = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) # f['g'] = 3*x**2 + 2*y*z # for ell, m_ind, ell_ind in b.ell_maps: # g['c'][m_ind, ell_ind, :] = (ell+3)*f['c'][m_ind, ell_ind, :] # func = lambda ell: ell+3 # h = operators.SphericalEllProduct(f, c, func).evaluate() -# g.preset_scales(b.domain.dealias) +# g.preset_scales(dealias) # assert np.allclose(h['g'], g['g']) @@ -67,14 +67,14 @@ def build_annulus(Nphi, Nr, k, dealias, dtype): # def test_spherical_ell_product_vector(Nphi, Ntheta, Nr, k, dealias, basis, dtype): # c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) # f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) # f['g'] = 3*x**2 + 2*y*z # u = operators.Gradient(f, c).evaluate() # uk0 = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) -# uk0.preset_scales(b.domain.dealias) +# uk0.preset_scales(dealias) # uk0['g'] = u['g'] # v = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) -# v.preset_scales(b.domain.dealias) +# v.preset_scales(dealias) # for ell, m_ind, ell_ind in b.ell_maps: # v['c'][0, m_ind, ell_ind, :] = (ell+2)*uk0['c'][0, m_ind, ell_ind, :] # v['c'][1, m_ind, ell_ind, :] = (ell+4)*uk0['c'][1, m_ind, ell_ind, :] @@ -123,7 +123,7 @@ def test_convert_constant_tensor(Nphi, Nr, k, dealias, basis, dtype): def test_convert_scalar(Nphi, Nr, k, dealias, basis, dtype, layout): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**2 + 2*y g = operators.Laplacian(f, c).evaluate() f.change_layout(layout) @@ -191,8 +191,8 @@ def test_skew_implicit(basis, Nphi, Nr, k, dealias, dtype): problem.add_equation("skew(u) = skew(f)") solver = problem.build_solver() solver.solve() - u.change_scales(b.domain.dealias) - f.change_scales(b.domain.dealias) + u.change_scales(dealias) + f.change_scales(dealias) assert np.allclose(u['g'], f['g']) @@ -206,7 +206,7 @@ def test_skew_implicit(basis, Nphi, Nr, k, dealias, dtype): def test_explicit_trace_tensor(Nphi, Nr, k, dealias, basis, dtype, layout): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) u['g'] = 4*x**3*ey + 3*y**2*ey @@ -276,8 +276,8 @@ def test_transpose_implicit(basis, Nphi, Nr, k, dealias, dtype): problem.add_equation("trans(u) = trans(f)") solver = problem.build_solver() solver.solve() - u.change_scales(b.domain.dealias) - f.change_scales(b.domain.dealias) + u.change_scales(dealias) + f.change_scales(dealias) assert np.allclose(u['g'], f['g']) @@ -290,7 +290,7 @@ def test_transpose_implicit(basis, Nphi, Nr, k, dealias, dtype): def test_azimuthal_average_scalar(Nphi, Nr, k, dealias, dtype, basis): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**2 + x h = operators.Average(f, c.coords[0]).evaluate() hg = r**2 @@ -307,7 +307,7 @@ def test_azimuthal_average_scalar(Nphi, Nr, k, dealias, dtype, basis): def test_integrate_scalar(Nphi, Nr, k, dealias, dtype, basis, n): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**(2*n) h = operators.Integrate(f, c).evaluate() if isinstance(b, DiskBasis): @@ -328,7 +328,7 @@ def test_integrate_scalar(Nphi, Nr, k, dealias, dtype, basis, n): def test_interpolate_azimuth_scalar(Nphi, Nr, k, dealias, basis, dtype, phi_interp): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 h = operators.interpolate(f, phi=phi_interp).evaluate() x, y = c.cartesian(np.array([[phi_interp]]), r) @@ -346,7 +346,7 @@ def test_interpolate_azimuth_scalar(Nphi, Nr, k, dealias, basis, dtype, phi_inte def test_interpolate_radius_scalar(Nphi, Nr, k, dealias, basis, dtype, r_interp): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 h = operators.interpolate(f, r=r_interp).evaluate() x, y = c.cartesian(phi, np.array([[r_interp]])) @@ -364,7 +364,7 @@ def test_interpolate_radius_scalar(Nphi, Nr, k, dealias, basis, dtype, r_interp) def test_interpolate_azimuth_vector(Nphi, Nr, k, dealias, basis, dtype, phi_interp): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 u = operators.Gradient(f, c) v = u(phi=phi_interp).evaluate() @@ -386,7 +386,7 @@ def test_interpolate_azimuth_vector(Nphi, Nr, k, dealias, basis, dtype, phi_inte def test_interpolate_radius_vector(Nphi, Nr, k, dealias, basis, dtype, r_interp): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 u = operators.Gradient(f, c) v = u(r=r_interp).evaluate() @@ -408,7 +408,7 @@ def test_interpolate_radius_vector(Nphi, Nr, k, dealias, basis, dtype, r_interp) def test_interpolate_azimuth_tensor(Nphi, Nr, k, dealias, basis, dtype, phi_interp): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 u = operators.Gradient(f, c) T = operators.Gradient(u, c) @@ -433,7 +433,7 @@ def test_interpolate_azimuth_tensor(Nphi, Nr, k, dealias, basis, dtype, phi_inte def test_interpolate_radius_tensor(Nphi, Nr, k, dealias, basis, dtype, r_interp): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 u = operators.Gradient(f, c) T = operators.Gradient(u, c) @@ -459,7 +459,7 @@ def test_radial_component_vector(Nphi, Nr, k, dealias, dtype, basis, radius): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) cp, sp = np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) ex = np.array([-np.sin(phi), np.cos(phi)]) ey = np.array([np.cos(phi), np.sin(phi)]) u['g'] = (x**2*y - 2*x*y**5)*ex + (x**2*y + 7*x**3*y**2)*ey @@ -479,7 +479,7 @@ def test_radial_component_tensor(Nphi, Nr, k, dealias, dtype, basis, radius): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) cp, sp = np.cos(phi), np.sin(phi) T = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) - T.preset_scales(b.domain.dealias) + T.preset_scales(dealias) ex = np.array([-np.sin(phi), np.cos(phi)]) ey = np.array([np.cos(phi), np.sin(phi)]) exex = ex[:,None, ...] * ex[None,...] @@ -503,7 +503,7 @@ def test_azimuthal_component_vector(Nphi, Nr, k, dealias, dtype, basis, radius): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) cp, sp = np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) ex = np.array([-np.sin(phi), np.cos(phi)]) ey = np.array([np.cos(phi), np.sin(phi)]) u['g'] = (x**2*y - 2*x*y**5)*ex + (x**2*y + 7*x**3*y**2)*ey @@ -523,7 +523,7 @@ def test_azimuthal_component_tensor(Nphi, Nr, k, dealias, dtype, basis, radius): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) cp, sp = np.cos(phi), np.sin(phi) T = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) - T.preset_scales(b.domain.dealias) + T.preset_scales(dealias) ex = np.array([-np.sin(phi), np.cos(phi)]) ey = np.array([np.cos(phi), np.sin(phi)]) exex = ex[:,None, ...] * ex[None,...] diff --git a/dedalus/tests/test_sphere_calculus.py b/dedalus/tests/test_sphere_calculus.py index 39b6d076..6812b086 100644 --- a/dedalus/tests/test_sphere_calculus.py +++ b/dedalus/tests/test_sphere_calculus.py @@ -18,7 +18,7 @@ def build_sphere(Nphi, Ntheta, dealias, dtype): c = coords.S2Coordinates('phi', 'theta') d = distributor.Distributor(c, dtype=dtype) b = basis.SphereBasis(c, (Nphi, Ntheta), radius=radius, dealias=(dealias, dealias), dtype=dtype) - phi, theta = d.local_grids(b, scales=b.domain.dealias) + phi, theta = d.local_grids(b, scales=dealias) return c, d, b, phi, theta @@ -56,8 +56,8 @@ def test_skew_implicit(Nphi, Ntheta, dealias, dtype): problem.add_equation("skew(u) = skew(f)") solver = problem.build_solver() solver.solve() - u.change_scales(b.domain.dealias) - f.change_scales(b.domain.dealias) + u.change_scales(dealias) + f.change_scales(dealias) assert np.allclose(u['g'], f['g']) @@ -94,8 +94,8 @@ def test_transpose_implicit(Nphi, Ntheta, dealias, dtype): problem.add_equation("trans(u) = trans(f)") solver = problem.build_solver() solver.solve() - u.change_scales(b.domain.dealias) - f.change_scales(b.domain.dealias) + u.change_scales(dealias) + f.change_scales(dealias) assert np.allclose(u['g'], f['g']) @@ -122,7 +122,7 @@ def test_convert_constant_scalar_explicit(Nphi, Ntheta, dealias, dtype): def test_sphere_average_scalar_explicit(Nphi, Ntheta, dealias, dtype): c, d, b, phi, theta = build_sphere(Nphi, Ntheta, dealias, dtype) f = d.Field(bases=b) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta) @@ -155,7 +155,7 @@ def test_gradient_scalar_explicit(Nphi, Ntheta, dealias, dtype): # Spherical harmonic input m, l = 2, 2 f = d.Field(bases=b) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) if np.iscomplexobj(dtype()): f['g'] = sph_harm(m, l, phi, theta) else: @@ -183,8 +183,8 @@ def test_cosine_explicit(Nphi, Ntheta, dealias, dtype, rank): f.low_pass_filter(scales=0.75) # Cosine operator g = operators.MulCosine(f).evaluate() - g.change_scales(b.domain.dealias) - f.change_scales(b.domain.dealias) + g.change_scales(dealias) + f.change_scales(dealias) assert np.allclose(g['g'], np.cos(theta) * f['g']) @@ -205,8 +205,8 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): problem.add_equation("u + MulCosine(u) = f + MulCosine(f)") solver = problem.build_solver() solver.solve() - u.change_scales(b.domain.dealias) - f.change_scales(b.domain.dealias) + u.change_scales(dealias) + f.change_scales(dealias) assert np.allclose(u['g'], f['g']) @@ -218,7 +218,7 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): # Nphi = 1 # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype) # f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) # f['g'] = r**4 # u = operators.Gradient(f, c).evaluate() # ug = [0*r*phi, 4*r**3 + 0*phi] @@ -233,7 +233,7 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): # def test_gradient_vector(Nphi, Ntheta, dealias, basis, dtype): # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype) # f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) # f['g'] = 3*x**4 + 2*y*x # grad = lambda A: operators.Gradient(A, c) # T = grad(grad(f)).evaluate() @@ -255,7 +255,7 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): # Nphi = 1 # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype) # f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) # f['g'] = r**4 # grad = lambda A: operators.Gradient(A, c) # T = grad(grad(f)).evaluate() @@ -275,7 +275,7 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): # def test_divergence_vector(Nphi, Ntheta, dealias, basis, dtype): # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype) # f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) # f['g'] = 3*x**4 + 2*y*x # grad = lambda A: operators.Gradient(A, c) # div = lambda A: operators.Divergence(A) @@ -292,7 +292,7 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): # Nphi = 1 # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype=dtype) # f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) # f['g'] = r**2 # grad = lambda A: operators.Gradient(A, c) # div = lambda A: operators.Divergence(A) @@ -309,7 +309,7 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): # def test_divergence_tensor(Nphi, Ntheta, dealias, basis, dtype): # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype) # v = field.Field(dist=d, tensorsig=(c,), bases=(b,), dtype=dtype) -# v.preset_scales(b.domain.dealias) +# v.preset_scales(dealias) # ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) # ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) # v['g'] = 4*x**3*ey + 3*y**2*ey @@ -328,7 +328,7 @@ def test_cosine_implicit(Nphi, Ntheta, dealias, dtype, rank): # def test_curl_vector(Nphi, Ntheta, dealias, basis, dtype): # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype) # v = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) -# v.preset_scales(b.domain.dealias) +# v.preset_scales(dealias) # ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) # ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) # v['g'] = 4*x**3*ey + 3*y**2*ey @@ -346,7 +346,7 @@ def test_laplacian_scalar_explicit(Nphi, Ntheta, dealias, dtype): # Spherical harmonic input m, l = 6, 10 f = d.Field(bases=b) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) if np.iscomplexobj(dtype()): f['g'] = sph_harm(m, l, phi, theta) else: @@ -365,7 +365,7 @@ def test_laplacian_scalar_implicit(Nphi, Ntheta, dealias, dtype): # Spherical harmonic forcing m, l = 5, 10 f = d.Field(bases=b) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) if np.iscomplexobj(dtype()): f['g'] = sph_harm(m, l, phi, theta) else: @@ -391,8 +391,8 @@ def test_laplacian_scalar_implicit(Nphi, Ntheta, dealias, dtype): # c, d, b, phi, theta = build_sphere(Nphi, Ntheta, dealias, dtype) # u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) # f = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) -# f.preset_scales(b.domain.dealias) -# u.preset_scales(b.domain.dealias) +# f.preset_scales(dealias) +# u.preset_scales(dealias) # m0 = 1 # l0 = 1 # m1 = 1 @@ -418,7 +418,7 @@ def test_laplacian_scalar_implicit(Nphi, Ntheta, dealias, dtype): # def test_laplacian_vector(Nphi, Ntheta, dealias, basis, dtype): # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype) # v = field.Field(dist=d, tensorsig=(c,), bases=(b,), dtype=dtype) -# v.preset_scales(b.domain.dealias) +# v.preset_scales(dealias) # ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) # ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) # v['g'] = 4*x**3*ey + 3*y**2*ey @@ -435,7 +435,7 @@ def test_laplacian_scalar_implicit(Nphi, Ntheta, dealias, dtype): # Nphi = 1 # c, d, b, phi, r, x, y = basis(Nphi, Ntheta, dealias, dtype=dtype) # u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) -# u.preset_scales(b.domain.dealias) +# u.preset_scales(dealias) # u['g'][1] = 4 * r**3 # v = operators.Laplacian(u, c).evaluate() # vg = 0 * v['g'] diff --git a/dedalus/tests/test_spherical_arithmetic.py b/dedalus/tests/test_spherical_arithmetic.py index d5e67508..31754554 100644 --- a/dedalus/tests/test_spherical_arithmetic.py +++ b/dedalus/tests/test_spherical_arithmetic.py @@ -40,17 +40,17 @@ def build_shell(Nphi, Ntheta, Nr, dealias, dtype): def test_S2_radial_scalar_scalar_multiplication(Nphi, Ntheta, Nr, dealias): c, d, b, phi, theta, r, x, y, z = build_shell(Nphi, Ntheta, Nr, dealias, np.complex128) f0 = field.Field(dist=d, bases=(b,), dtype=np.complex128) - f0.preset_scales(b.domain.dealias) - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + f0.preset_scales(dealias) + phi, theta, r = d.local_grids(b, scales=dealias) f0['g'] = (r**2 - 0.5*r**3)*(5*np.cos(theta)**2-1)*np.sin(theta)*np.exp(1j*phi) b_S2 = b.S2_basis() - phi, theta = b_S2.local_grids() + phi, theta = d.local_grids(b_S2) g = field.Field(dist=d, bases=(b_S2,), dtype=np.complex128) g['g'] = (5*np.cos(theta)**2-1)*np.sin(theta)*np.exp(1j*phi) h = field.Field(dist=d, bases=(b.radial_basis,), dtype=np.complex128) - h.preset_scales(b.domain.dealias) + h.preset_scales(dealias) h['g'] = (r**2 - 0.5*r**3) f = (g * h).evaluate() assert np.allclose(f['g'], f0['g']) @@ -63,21 +63,21 @@ def test_S2_radial_vector_scalar_multiplication(Nphi, Ntheta, Nr, dealias): c, d, b, phi, theta, r, x, y, z = build_shell(Nphi, Ntheta, Nr, dealias, np.complex128) c_S2 = c.S2coordsys v0 = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=np.complex128) - v0.preset_scales(b.domain.dealias) - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + v0.preset_scales(dealias) + phi, theta, r = d.local_grids(b, scales=dealias) v0['g'][0] = (r**2 - 0.5*r**3)*(-1j * np.sin(theta)*np.exp(-2j*phi)) v0['g'][1] = (r**2 - 0.5*r**3)*(np.cos(theta)*np.sin(theta)*np.exp(-2j*phi)) v0['g'][2] = (r**2 - 0.5*r**3)*(5*np.cos(theta)**2-1)*np.sin(theta)*np.exp(1j*phi) b_S2 = b.S2_basis() - phi, theta = b_S2.local_grids() + phi, theta = d.local_grids(b_S2) u = field.Field(dist=d, bases=(b_S2,), tensorsig=(c,), dtype=np.complex128) u['g'][0] = (-1j * np.sin(theta)*np.exp(-2j*phi)) u['g'][1] = (np.cos(theta)*np.sin(theta)*np.exp(-2j*phi)) u['g'][2] = (5*np.cos(theta)**2-1)*np.sin(theta)*np.exp(1j*phi) h = field.Field(dist=d, bases=(b.radial_basis,), dtype=np.complex128) - h.preset_scales(b.domain.dealias) + h.preset_scales(dealias) h['g'] = (r**2 - 0.5*r**3) v = (h * u).evaluate() assert np.allclose(v['g'], v0['g']) @@ -153,7 +153,7 @@ def test_multiply_number_scalar(Nphi, Ntheta, Nr, dealias, basis): f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f['g'] = x**3 + 2*y**3 + 3*z**3 h = (2 * f).evaluate() - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) hg = 2*(x**3 + 2*y**3 + 3*z**3) assert np.allclose(h['g'], hg) @@ -168,7 +168,7 @@ def test_multiply_scalar_number(Nphi, Ntheta, Nr, dealias, basis): f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f['g'] = x**3 + 2*y**3 + 3*z**3 h = (f * 2).evaluate() - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) hg = 2*(x**3 + 2*y**3 + 3*z**3) assert np.allclose(h['g'], hg) @@ -183,7 +183,7 @@ def test_multiply_scalar_scalar(Nphi, Ntheta, Nr, dealias, basis): f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f['g'] = x**3 + 2*y**3 + 3*z**3 h = (f * f).evaluate() - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) hg = (x**3 + 2*y**3 + 3*z**3)**2 assert np.allclose(h['g'], hg) @@ -195,10 +195,10 @@ def test_multiply_scalar_scalar(Nphi, Ntheta, Nr, dealias, basis): @pytest.mark.parametrize('basis', [build_ball, build_shell]) def test_multiply_scalar_vector(Nphi, Ntheta, Nr, dealias, basis): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, np.complex128) - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) f = field.Field(dist=d, bases=(b,), dtype=np.complex128) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**3 + 2*y**3 + 3*z**3 u = operators.Gradient(f, c).evaluate() v = (f * u).evaluate() diff --git a/dedalus/tests/test_spherical_calculus.py b/dedalus/tests/test_spherical_calculus.py index 7c795cca..5b74beb6 100644 --- a/dedalus/tests/test_spherical_calculus.py +++ b/dedalus/tests/test_spherical_calculus.py @@ -19,7 +19,7 @@ def build_ball(Nphi, Ntheta, Nr, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius_ball, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -29,7 +29,7 @@ def build_shell(Nphi, Ntheta, Nr, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii_shell, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -43,7 +43,7 @@ def build_shell(Nphi, Ntheta, Nr, dealias, dtype): def test_gradient_scalar(Nphi, Ntheta, Nr, dealias, basis, dtype): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = fg = 3*x**2 + 2*y*z u = operators.Gradient(f, c).evaluate() ug = 0 * u['g'] @@ -61,7 +61,7 @@ def test_gradient_radial_scalar(Nr, dealias, basis, dtype): Nphi = Ntheta = 1 c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = fg = r**4/3 u = operators.Gradient(f, c).evaluate() ug = 0 * u['g'] @@ -78,7 +78,7 @@ def test_gradient_radial_scalar(Nr, dealias, basis, dtype): def test_gradient_vector(Nphi, Ntheta, Nr, dealias, basis, dtype): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**2 + 2*y*z grad = lambda A: operators.Gradient(A, c) T = grad(grad(f)).evaluate() @@ -100,7 +100,7 @@ def test_gradient_radial_vector(Nr, dealias, basis, dtype): Nphi = Ntheta = 1 c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype=dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**4 / 3 grad = lambda A: operators.Gradient(A, c) T = grad(grad(f)).evaluate() @@ -120,7 +120,7 @@ def test_gradient_radial_vector(Nr, dealias, basis, dtype): def test_divergence_vector(Nphi, Ntheta, Nr, dealias, basis, dtype): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**3 + 2*y**3 + 3*z**3 u = operators.Gradient(f, c) h = operators.Divergence(u).evaluate() @@ -133,9 +133,10 @@ def test_divergence_vector(Nphi, Ntheta, Nr, dealias, basis, dtype): @pytest.mark.parametrize('basis', [build_ball, build_shell]) @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) def test_divergence_radial_vector(Nr, dealias, basis, dtype): - c, d, b, phi, theta, r, x, y, z = basis(1, 1, Nr, 1, dtype=dtype) + c, d, b, phi, theta, r, x, y, z = basis(1, 1, Nr, dealias, dtype=dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) + print(f['g'].shape, r.shape) f['g'] = r**4/3 u = operators.Gradient(f, c) h = operators.Divergence(u).evaluate() @@ -152,7 +153,7 @@ def test_divergence_radial_vector(Nr, dealias, basis, dtype): def test_curl_vector(Nphi, Ntheta, Nr, dealias, basis, dtype): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) @@ -174,7 +175,7 @@ def test_curl_vector(Nphi, Ntheta, Nr, dealias, basis, dtype): def test_laplacian_scalar(Nphi, Ntheta, Nr, dealias, basis, dtype): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 + 3*z**4 h = operators.Laplacian(f, c).evaluate() hg = 12*x**2+24*y**2+36*z**2 @@ -189,7 +190,7 @@ def test_laplacian_radial_scalar(Nr, basis, dealias, dtype): Nphi = Ntheta = 1 c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype=dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**4 / 3 h = operators.Laplacian(f, c).evaluate() hg = 20/3 * r**2 @@ -205,7 +206,7 @@ def test_laplacian_radial_scalar(Nr, basis, dealias, dtype): def test_laplacian_vector(Nphi, Ntheta, Nr, dealias, basis, dtype): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) @@ -226,7 +227,7 @@ def test_laplacian_radial_vector(Nr, dealias, basis, dtype): Nphi = Ntheta = 1 c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, dealias, dtype=dtype) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][2] = 4/3 * r**3 v = operators.Laplacian(u, c).evaluate() vg = 0 * v['g'] diff --git a/dedalus/tests/test_spherical_operators.py b/dedalus/tests/test_spherical_operators.py index bda59d99..5739c2a0 100644 --- a/dedalus/tests/test_spherical_operators.py +++ b/dedalus/tests/test_spherical_operators.py @@ -21,7 +21,7 @@ def build_ball(Nphi, Ntheta, Nr, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.BallBasis(c, (Nphi, Ntheta, Nr), radius=radius_ball, k=k, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -31,7 +31,7 @@ def build_shell(Nphi, Ntheta, Nr, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) b = basis.ShellBasis(c, (Nphi, Ntheta, Nr), radii=radii_shell, k=k, dealias=(dealias, dealias, dealias), dtype=dtype) - phi, theta, r = d.local_grids(b, scales=b.domain.dealias) + phi, theta, r = d.local_grids(b, scales=dealias) x, y, z = c.cartesian(phi, theta, r) return c, d, b, phi, theta, r, x, y, z @@ -47,13 +47,13 @@ def test_spherical_ell_product_scalar(Nphi, Ntheta, Nr, k, dealias, basis, dtype c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) g = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**2 + 2*y*z for ell, m_ind, ell_ind in b.ell_maps(d): g['c'][m_ind, ell_ind, :] = (ell+3)*f['c'][m_ind, ell_ind, :] func = lambda ell: ell+3 h = operators.SphericalEllProduct(f, c, func).evaluate() - g.preset_scales(b.domain.dealias) + g.preset_scales(dealias) assert np.allclose(h['g'], g['g']) @@ -67,14 +67,14 @@ def test_spherical_ell_product_scalar(Nphi, Ntheta, Nr, k, dealias, basis, dtype def test_spherical_ell_product_vector(Nphi, Ntheta, Nr, k, dealias, basis, dtype): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**2 + 2*y*z u = operators.Gradient(f, c).evaluate() uk0 = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - uk0.preset_scales(b.domain.dealias) + uk0.preset_scales(dealias) uk0['g'] = u['g'] v = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - v.preset_scales(b.domain.dealias) + v.preset_scales(dealias) for ell, m_ind, ell_ind in b.ell_maps(d): v['c'][0, m_ind, ell_ind, :] = (ell+2)*uk0['c'][0, m_ind, ell_ind, :] v['c'][1, m_ind, ell_ind, :] = (ell+4)*uk0['c'][1, m_ind, ell_ind, :] @@ -126,7 +126,7 @@ def test_convert_constant_tensor(Nphi, Ntheta, Nr, k, dealias, basis, dtype): def test_convert_scalar(Nphi, Ntheta, Nr, k, dealias, basis, dtype, layout): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = 3*x**2 + 2*y*z g = operators.Laplacian(f, c).evaluate() f.change_layout(layout) @@ -147,7 +147,7 @@ def test_convert_vector(Nphi, Ntheta, Nr, k, dealias, basis, dtype, layout): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) @@ -170,7 +170,7 @@ def test_explicit_trace_tensor(Nphi, Ntheta, Nr, k, dealias, basis, dtype, layou c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) @@ -240,7 +240,7 @@ def test_explicit_transpose_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis, l c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) @@ -263,7 +263,7 @@ def test_implicit_transpose_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) @@ -289,7 +289,7 @@ def test_implicit_transpose_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis): def test_azimuthal_average_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**2 + x + z h = operators.Average(f, c.coords[0]).evaluate() hg = r**2 + z @@ -306,7 +306,7 @@ def test_azimuthal_average_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis): def test_spherical_average_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**2 + x + z h = operators.Average(f, c.S2coordsys).evaluate() hg = r**2 @@ -324,7 +324,7 @@ def test_spherical_average_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis): def test_integrate_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis, n): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = r**(2*n) h = operators.Integrate(f, c).evaluate() if isinstance(b, BallBasis): @@ -346,7 +346,7 @@ def test_integrate_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis, n): def test_interpolate_azimuth_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis, phi_interp): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 + 3*z**4 h = operators.interpolate(f, phi=phi_interp).evaluate() x, y, z = c.cartesian(np.array([[[phi_interp]]]), theta, r) @@ -365,7 +365,7 @@ def test_interpolate_azimuth_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis, def test_interpolate_colatitude_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis, theta_interp): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 + 3*z**4 h = operators.interpolate(f, theta=theta_interp).evaluate() x, y, z = c.cartesian(phi, np.array([[[theta_interp]]]), r) @@ -384,7 +384,7 @@ def test_interpolate_colatitude_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basi def test_interpolate_radius_scalar(Nphi, Ntheta, Nr, k, dealias, dtype, basis, r_interp): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) - f.preset_scales(b.domain.dealias) + f.preset_scales(dealias) f['g'] = x**4 + 2*y**4 + 3*z**4 h = operators.interpolate(f, r=r_interp).evaluate() x, y, z = c.cartesian(phi, theta, np.array([[[r_interp]]])) @@ -404,7 +404,7 @@ def test_interpolate_azimuth_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basis, c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) @@ -430,7 +430,7 @@ def test_interpolate_colatitude_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basi c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) @@ -456,7 +456,7 @@ def test_interpolate_radius_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basis, r c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) @@ -480,7 +480,7 @@ def test_interpolate_radius_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basis, r def test_interpolate_azimuth_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis, phi_interp): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) T = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) - T.preset_scales(b.domain.dealias) + T.preset_scales(dealias) T['g'][2,2] = (6*x**2+4*y*z)/r**2 T['g'][2,1] = T['g'][1,2] = -2*(y**3+x**2*(y-3*z)-y*z**2)/(r**3*np.sin(theta)) T['g'][2,0] = T['g'][0,2] = 2*x*(z-3*y)/(r**2*np.sin(theta)) @@ -511,7 +511,7 @@ def test_interpolate_azimuth_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis, def test_interpolate_colatitude_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis, theta_interp): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) T = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) - T.preset_scales(b.domain.dealias) + T.preset_scales(dealias) T['g'][2,2] = (6*x**2+4*y*z)/r**2 T['g'][2,1] = T['g'][1,2] = -2*(y**3+x**2*(y-3*z)-y*z**2)/(r**3*np.sin(theta)) T['g'][2,0] = T['g'][0,2] = 2*x*(z-3*y)/(r**2*np.sin(theta)) @@ -542,7 +542,7 @@ def test_interpolate_colatitude_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basi def test_interpolate_radius_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis, r_interp): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) T = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) - T.preset_scales(b.domain.dealias) + T.preset_scales(dealias) T['g'][2,2] = (6*x**2+4*y*z)/r**2 T['g'][2,1] = T['g'][1,2] = -2*(y**3+x**2*(y-3*z)-y*z**2)/(r**3*np.sin(theta)) T['g'][2,0] = T['g'][0,2] = 2*x*(z-3*y)/(r**2*np.sin(theta)) @@ -574,7 +574,7 @@ def test_radial_component_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basis, rad c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) @@ -594,7 +594,7 @@ def test_radial_component_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basis, rad def test_radial_component_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis, radius): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) T = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) - T.preset_scales(b.domain.dealias) + T.preset_scales(dealias) T['g'][2,2] = (6*x**2+4*y*z)/r**2 T['g'][2,1] = T['g'][1,2] = -2*(y**3+x**2*(y-3*z)-y*z**2)/(r**3*np.sin(theta)) T['g'][2,0] = T['g'][0,2] = 2*x*(z-3*y)/(r**2*np.sin(theta)) @@ -621,7 +621,7 @@ def test_angular_component_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basis, ra c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi) u = field.Field(dist=d, bases=(b,), tensorsig=(c,), dtype=dtype) - u.preset_scales(b.domain.dealias) + u.preset_scales(dealias) u['g'][2] = r**2*st*(2*ct**2*cp-r*ct**3*sp+r**3*cp**3*st**5*sp**3+r*ct*st**2*(cp**3+sp**3)) u['g'][1] = r**2*(2*ct**3*cp-r*cp**3*st**4+r**3*ct*cp**3*st**5*sp**3-1/16*r*np.sin(2*theta)**2*(-7*sp+np.sin(3*phi))) u['g'][0] = r**2*sp*(-2*ct**2+r*ct*cp*st**2*sp-r**3*cp**2*st**5*sp**3) @@ -643,7 +643,7 @@ def test_angular_component_vector(Nphi, Ntheta, Nr, k, dealias, dtype, basis, ra def test_angular_component_tensor(Nphi, Ntheta, Nr, k, dealias, dtype, basis, radius): c, d, b, phi, theta, r, x, y, z = basis(Nphi, Ntheta, Nr, k, dealias, dtype) T = field.Field(dist=d, bases=(b,), tensorsig=(c,c), dtype=dtype) - T.preset_scales(b.domain.dealias) + T.preset_scales(dealias) T['g'][2,2] = (6*x**2+4*y*z)/r**2 T['g'][2,1] = T['g'][1,2] = -2*(y**3+x**2*(y-3*z)-y*z**2)/(r**3*np.sin(theta)) T['g'][2,0] = T['g'][0,2] = 2*x*(z-3*y)/(r**2*np.sin(theta)) diff --git a/dedalus/tests/test_transforms.py b/dedalus/tests/test_transforms.py index 2e6e799b..5d4f236e 100644 --- a/dedalus/tests/test_transforms.py +++ b/dedalus/tests/test_transforms.py @@ -64,7 +64,7 @@ def test_CF_scalar_roundtrip(N, dealias): c = coords.Coordinate('x') d = distributor.Distributor([c]) xb = basis.ComplexFourier(c, size=N, bounds=(0, 1), dealias=dealias) - x = d.local_grid(xb, scales=dealias) + x = d.local_grid(xb, scale=dealias) # Scalar transforms u = field.Field(dist=d, bases=(xb,), dtype=np.complex128) u.preset_scales(dealias) @@ -82,7 +82,7 @@ def test_RF_scalar_roundtrip(N, dealias): c = coords.Coordinate('x') d = distributor.Distributor([c]) xb = basis.RealFourier(c, size=N, bounds=(0, 1), dealias=dealias) - x = d.local_grid(xb, scales=dealias) + x = d.local_grid(xb, scale=dealias) # Scalar transforms u = field.Field(dist=d, bases=(xb,), dtype=np.float64) u.preset_scales(dealias) @@ -103,7 +103,7 @@ def test_J_scalar_roundtrip(a, b, N, dealias, dtype): c = coords.Coordinate('x') d = distributor.Distributor([c]) xb = basis.Jacobi(c, a=a, b=b, size=N, bounds=(0, 1), dealias=dealias) - x = d.local_grid(xb, scales=dealias) + x = d.local_grid(xb, scale=dealias) # Scalar transforms u = field.Field(dist=d, bases=(xb,), dtype=dtype) u.preset_scales(dealias) @@ -164,8 +164,8 @@ def build_CF_CF(Nx, Ny, dealias_x, dealias_y): d = distributor.Distributor((c,)) xb = basis.ComplexFourier(c.coords[0], size=Nx, bounds=(0, np.pi), dealias=dealias_x) yb = basis.ComplexFourier(c.coords[1], size=Ny, bounds=(0, np.pi), dealias=dealias_y) - x = d.local_grid(xb, scales=dealias_x) - y = d.local_grid(yb, scales=dealias_y) + x = d.local_grid(xb, scale=dealias_x) + y = d.local_grid(yb, scale=dealias_y) return c, d, xb, yb, x, y @@ -188,8 +188,8 @@ def build_RF_RF(Nx, Ny, dealias_x, dealias_y): d = distributor.Distributor((c,)) xb = basis.RealFourier(c.coords[0], size=Nx, bounds=(0, np.pi), dealias=dealias_x) yb = basis.RealFourier(c.coords[1], size=Ny, bounds=(0, np.pi), dealias=dealias_y) - x = d.local_grid(xb, scales=dealias_x) - y = d.local_grid(yb, scales=dealias_y) + x = d.local_grid(xb, scale=dealias_x) + y = d.local_grid(yb, scale=dealias_y) return c, d, xb, yb, x, y @@ -212,8 +212,8 @@ def build_CF_J(a, b, Nx, Ny, dealias_x, dealias_y): d = distributor.Distributor((c,)) xb = basis.ComplexFourier(c.coords[0], size=Nx, bounds=(0, np.pi), dealias=dealias_x) yb = basis.Jacobi(c.coords[1], a=a, b=b, size=Ny, bounds=(0, 1), dealias=dealias_y) - x = d.local_grid(xb, scales=dealias_x) - y = d.local_grid(yb, scales=dealias_y) + x = d.local_grid(xb, scale=dealias_x) + y = d.local_grid(yb, scale=dealias_y) return c, d, xb, yb, x, y @@ -298,7 +298,7 @@ def test_sphere_complex_scalar_backward(Nphi, Ntheta, radius, basis, dealias): c, d, b, phi, theta = basis(Nphi, Ntheta, radius, dealias, np.complex128) f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f.preset_scales(dealias) - m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain, scales=1) + m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain(d), scales=1) f['c'][(m == -2) * (ell == 2)] = 1 fg = np.sqrt(15) / 4 * np.sin(theta)**2 * np.exp(-2j*phi) assert np.allclose(fg, f['g']) @@ -312,7 +312,7 @@ def test_sphere_complex_scalar_forward(Nphi, Ntheta, radius, basis, dealias): c, d, b, phi, theta = basis(Nphi, Ntheta, radius, dealias, np.complex128) f = field.Field(dist=d, bases=(b,), dtype=np.complex128) f.preset_scales(dealias) - m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain, scales=1) + m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain(d), scales=1) f['g'] = np.sqrt(15) / 4 * np.sin(theta)**2 * np.exp(-2j*phi) fc = np.zeros_like(f['c']) fc[(m == -2) * (ell == 2)] = 1 @@ -327,7 +327,7 @@ def test_sphere_real_scalar_backward(Nphi, Ntheta, radius, basis, dealias): c, d, b, phi, theta = basis(Nphi, Ntheta, radius, dealias, np.float64) f = field.Field(dist=d, bases=(b,), dtype=np.float64) f.preset_scales(dealias) - m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain, scales=1) + m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain(d), scales=1) f['c'][(m == 2) * (ell == 2)] = 1 fg = np.sqrt(15) / 4 * np.sin(theta)**2 * (np.cos(2*phi) - np.sin(2*phi)) assert np.allclose(fg, f['g']) @@ -341,7 +341,7 @@ def test_sphere_real_scalar_forward(Nphi, Ntheta, radius, basis, dealias): c, d, b, phi, theta = basis(Nphi, Ntheta, radius, dealias, np.float64) f = field.Field(dist=d, bases=(b,), dtype=np.float64) f.preset_scales(dealias) - m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain, scales=1) + m, ell, *_ = d.coeff_layout.local_group_arrays(b.domain(d), scales=1) f['g'] = np.sqrt(15) / 4 * np.sin(theta)**2 * (np.cos(2*phi) - np.sin(2*phi)) fc = np.zeros_like(f['c']) fc[(m == 2) * (ell == 2)] = 1 From b3c57019d87c856f5a86190d8b27c035551dbce2 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Wed, 13 Dec 2023 15:36:06 -0500 Subject: [PATCH 05/19] Change spin basis to get spin recomb from coords. Complex cylinder transforms now passing. --- dedalus/core/basis.py | 54 +++++++--------- dedalus/core/coords.py | 106 ++++++++++++++++++++++++------- dedalus/tests/test_transforms.py | 78 +++++++++++++++++++++++ 3 files changed, 183 insertions(+), 55 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 3d9743ec..885a2b6f 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -1567,35 +1567,27 @@ def reduced_view_5(data, axis1, axis2): class SpinRecombinationBasis: @CachedMethod - def spin_recombination_matrices(self, tensorsig): - """Build matrices for appling spin recombination to each tensor rank.""" - # Setup unitary spin recombination - # [azimuth, colatitude] -> [-, +] - Us = {2:np.array([[-1j, 1], [1j, 1]]) / np.sqrt(2), - 3:np.array([[-1j, 1, 0], [1j, 1, 0], [0, 0, np.sqrt(2)]]) / np.sqrt(2)} - # Perform unitary spin recombination along relevant tensor indeces - U = [] + def spin_recombination_factors(self, tensorsig): + """Build matrices for applying spin recombination to each tensor rank.""" + # Spin recombinations are separable over tensor index + factors = [] + coord1 = self.coordsys.coords[1] for i, cs in enumerate(tensorsig): - if (cs is self.coordsys or - (type(cs) is SphericalCoordinates and self.coordsys is cs.S2coordsys) or - (type(self.coordsys) is SphericalCoordinates and self.coordsys.S2coordsys is cs)): - U.append(Us[cs.dim]) - #if self.coordsys is vs: # kludge before we decide how compound coordinate systems work - # Ui = np.identity(vs.dim, dtype=np.complex128) - # Ui[:self.dim, :self.dim] = Us - # U.append(Ui) - #elif self.coordsys in vs.spaces: - # n = vector_space.get_index(self.space) - # Ui = np.identity(vector_space.dim, dtype=np.complex128) - # Ui[n:n+self.dim, n:n+self.dim] = Us - # U.append(Ui) + if coord1 in cs.coords: + subaxis = cs.coords.index(coord1) + factors.append(cs.forward_intertwiner(subaxis, order=1, group=None)) # no group dependence else: - U.append(None) - return U + factors.append(None) + return factors def spin_recombination_matrix(self, tensorsig): - U = self.spin_recombination_matrices(tensorsig) - matrix = kron(*U) + # Combine factors, replacing None placeholders + factors = self.spin_recombination_factors(tensorsig) + for i, cs in enumerate(tensorsig): + if factors[i] is None: + factors[i] = np.identity(cs.dim) + matrix = kron(*factors) + # Expand for cos and msin parts if self.dtype == np.float64: #matrix = np.array([[matrix.real,-matrix.imag],[matrix.imag,matrix.real]]) matrix = (np.kron(matrix.real,np.array([[1,0],[0,1]])) @@ -1610,11 +1602,11 @@ def forward_spin_recombination(self, tensorsig, colat_axis, gdata, out): np.copyto(out, gdata) else: azimuth_axis = colat_axis - 1 - U = self.spin_recombination_matrices(tensorsig) + factors = self.spin_recombination_factors(tensorsig) if gdata.dtype == np.complex128: # HACK: just copying the data so we can apply_matrix repeatedly np.copyto(out, gdata) - for i, Ui in enumerate(U): + for i, Ui in enumerate(factors): if Ui is not None: # Directly apply U apply_matrix(Ui, out, axis=i, out=out) @@ -1622,7 +1614,7 @@ def forward_spin_recombination(self, tensorsig, colat_axis, gdata, out): # Recombinations alternate between using gdata/out as input/output # For an even number of transforms, we need a final copy num_recombinations = 0 - for i, Ui in enumerate(U): + for i, Ui in enumerate(factors): if Ui is not None: dim = Ui.shape[0] if num_recombinations % 2 == 0: @@ -1647,11 +1639,11 @@ def backward_spin_recombination(self, tensorsig, colat_axis, gdata, out): np.copyto(out, gdata) else: azimuth_axis = colat_axis - 1 - U = self.spin_recombination_matrices(tensorsig) + factors = self.spin_recombination_factors(tensorsig) if gdata.dtype == np.complex128: # HACK: just copying the data so we can apply_matrix repeatedly np.copyto(out, gdata) - for i, Ui in enumerate(U): + for i, Ui in enumerate(factors): if Ui is not None: # Directly apply U apply_matrix(Ui.T.conj(), out, axis=i, out=out) @@ -1659,7 +1651,7 @@ def backward_spin_recombination(self, tensorsig, colat_axis, gdata, out): # Recombinations alternate between using gdata/out as input/output # For an even number of transforms, we need a final copy num_recombinations = 0 - for i, Ui in enumerate(U): + for i, Ui in enumerate(factors): if Ui is not None: dim = Ui.shape[0] if num_recombinations % 2 == 0: diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index d20ac115..6aa472ec 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -4,7 +4,7 @@ from ..libraries.dedalus_sphere import jacobi from ..libraries import dedalus_sphere -from ..tools.array import nkron +from ..tools.array import nkron, sparse_block_diag from ..tools.cache import CachedMethod # Public interface @@ -39,8 +39,31 @@ def __getitem__(self, key): def check_bounds(self, coord, bounds): pass + def forward_intertwiner(self, subaxis, order, group): + raise NotImplementedError("Subclasses must implement.") + + def backward_intertwiner(self, subaxis, order, group): + raise NotImplementedError("Subclasses must implement.") + + +class SeparableIntertwiners: + + def forward_vector_intertwiner(self, subaxis, group): + raise NotImplementedError("Subclasses must implement.") + + def backward_vector_intertwiner(self, subaxis, group): + raise NotImplementedError("Subclasses must implement.") + + def forward_intertwiner(self, subaxis, order, group): + vector = self.forward_vector_intertwiner(subaxis, group) + return nkron(vector, order) -class Coordinate: + def backward_intertwiner(self, subaxis, order, group): + vector = self.backward_vector_intertwiner(subaxis, group) + return nkron(vector, order) + + +class Coordinate(SeparableIntertwiners): dim = 1 default_nonconst_groups = (1,) curvilinear = False @@ -64,16 +87,47 @@ def check_bounds(self, bounds): if self.cs == None: return else: self.cs.check_bounds(self, bounds) + def forward_vector_intertwiner(self, subaxis, group): + return np.array([[1]]) + + def backward_vector_intertwiner(self, subaxis, group): + return np.array([[1]]) -class DirectProduct(CoordinateSystem): + +class DirectProduct(SeparableIntertwiners, CoordinateSystem): def __init__(self, *coordsystems): + for cs in coordsystems: + if not isinstance(cs, SeparableIntertwiners): + raise NotImplementedError("Direct products only implemented for separable intertwiners.") self.coordsystems = coordsystems self.coords = sum((cs.coords for cs in coordsystems), ()) self.dim = sum(cs.dim for cs in coordsystems) + def forward_vector_intertwiner(self, subaxis, group): + factors = [] + start_axis = 0 + for cs in self.coordsystems: + if start_axis <= subaxis < start_axis + cs.dim: + factors.append(cs.forward_vector_intertwiner(subaxis-start_axis, group)) + else: + factors.append(np.identity(cs.dim)) + start_axis += cs.dim + return sparse_block_diag(factors).A + + def backward_vector_intertwiner(self, subaxis, group): + factors = [] + start_axis = 0 + for cs in self.coordsystems: + if start_axis <= subaxis < start_axis + cs.dim: + factors.append(cs.backward_vector_intertwiner(subaxis-start_axis, group)) + else: + factors.append(np.identity(cs.dim)) + start_axis += cs.dim + return sparse_block_diag(factors).A + -class CartesianCoordinates(CoordinateSystem): +class CartesianCoordinates(SeparableIntertwiners, CoordinateSystem): curvilinear = False @@ -87,11 +141,11 @@ def __init__(self, *names, right_handed=True): def __str__(self): return '{' + ','.join([c.name for c in self.coords]) + '}' - def forward_intertwiner(self, subaxis, order, group): - return np.identity(self.dim**order) + def forward_vector_intertwiner(self, subaxis, group): + return np.identity(self.dim) - def backward_intertwiner(self, subaxis, order, group): - return np.identity(self.dim**order) + def backward_vector_intertwiner(self, subaxis, group): + return np.identity(self.dim) @CachedMethod def unit_vector_fields(self, dist): @@ -112,7 +166,7 @@ class CurvilinearCoordinateSystem(CoordinateSystem): curvilinear = True -class S2Coordinates(CurvilinearCoordinateSystem): +class S2Coordinates(SeparableIntertwiners, CurvilinearCoordinateSystem): """ S2 coordinate system: (azimuth, colatitude) Coord component ordering: (azimuth, colatitude) @@ -136,35 +190,37 @@ def _U_forward(cls, order): Ui = {+1: np.array([+1j, 1]) / np.sqrt(2), -1: np.array([-1j, 1]) / np.sqrt(2)} U = np.array([Ui[spin] for spin in cls.spin_ordering]) - return nkron(U, order) + if order > 1: + U = nkron(U, order) + return U @classmethod def _U_backward(cls, order): """Unitary transform from spin to coord components.""" return cls._U_forward(order).T.conj() - def forward_intertwiner(self, subaxis, order, group): + def forward_vector_intertwiner(self, subaxis, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group - return np.identity(self.dim**order) + return np.identity(self.dim) elif subaxis == 1: # Colatitude intertwiner is spin-U, independent of group - return self._U_forward(order) + return self._U_forward(1) else: raise ValueError("Invalid axis") - def backward_intertwiner(self, subaxis, order, group): + def backward_vector_intertwiner(self, subaxis, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group - return np.identity(self.dim**order) + return np.identity(self.dim) elif subaxis == 1: # Colatitude intertwiner is spin-U, independent of group - return self._U_backward(order) + return self._U_backward(1) else: raise ValueError("Invalid axis") -class PolarCoordinates(CurvilinearCoordinateSystem): +class PolarCoordinates(SeparableIntertwiners, CurvilinearCoordinateSystem): """ Polar coordinate system: (azimuth, radius) Coord component ordering: (azimuth, radius) @@ -188,30 +244,32 @@ def _U_forward(cls, order): Ui = {+1: np.array([+1j, 1]) / np.sqrt(2), -1: np.array([-1j, 1]) / np.sqrt(2)} U = np.array([Ui[spin] for spin in cls.spin_ordering]) - return nkron(U, order) + if order > 1: + U = nkron(U, order) + return U @classmethod def _U_backward(cls, order): """Unitary transform from spin to coord components.""" return cls._U_forward(order).T.conj() - def forward_intertwiner(self, subaxis, order, group): + def forward_vector_intertwiner(self, subaxis, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group - return np.identity(self.dim**order) + return np.identity(self.dim) elif subaxis == 1: # Radial intertwiner is spin-U, independent of group - return self._U_forward(order) + return self._U_forward(1) else: raise ValueError("Invalid axis") - def backward_intertwiner(self, subaxis, order, group): + def backward_vector_intertwiner(self, subaxis, group): if subaxis == 0: # Azimuth intertwiner is identity, independent of group - return np.identity(self.dim**order) + return np.identity(self.dim) elif subaxis == 1: # Radial intertwiner is spin-U, independent of group - return self._U_backward(order) + return self._U_backward(1) else: raise ValueError("Invalid axis") diff --git a/dedalus/tests/test_transforms.py b/dedalus/tests/test_transforms.py index 5d4f236e..ac0186f0 100644 --- a/dedalus/tests/test_transforms.py +++ b/dedalus/tests/test_transforms.py @@ -533,6 +533,84 @@ def test_polar_tensor_roundtrip_mmax0(Nr, radius, alpha, k, dealias, dtype, buil assert np.allclose(f['g'], fg) +## Cylinders + +@CachedMethod +def build_periodic_cylinder(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.DiskBasis(cp, (Nphi, Nr), dtype=dtype, radius=radius, alpha=alpha, k=k, dealias=(dealias, dealias)) + return c, d, (bz, bp) + +@CachedMethod +def build_periodic_cylindrical_annulus(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.AnnulusBasis(cp, (Nphi, Nr), dtype=dtype, radii=(radius,radius+1.3), alpha=alpha, k=k, dealias=(dealias, dealias)) + return c, d, (bz, bp) + +Nz_range = [8] +Nphi_range = [16] +Nr_range = [16] +length_range = [1.7] +radius_range = [2.5] +alpha_range = [0, 1] +k_range = [0, 1, 2] +dealias_range = [1/2, 1, 3/2] +dtype_range = [np.float64, np.complex128] +layout_range = ['g', 'c'] +rank_range = [0, 1, 2] + +@pytest.mark.parametrize('Nz', Nr_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('length', length_range) +@pytest.mark.parametrize('radius', radius_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('layout', layout_range) +@pytest.mark.parametrize('rank', rank_range) +@pytest.mark.parametrize('build_basis', [build_periodic_cylinder, build_periodic_cylindrical_annulus]) +def test_cylinder_roundtrip_noise(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype, layout, rank, build_basis): + c, d, b = build_basis(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype) + tensorsig = (c,) * rank + f = field.Field(dist=d, bases=b, tensorsig=tensorsig, dtype=dtype) + f.preset_scales(dealias) + other = {'g':'c', 'c':'g'}[layout] + f[other] = np.random.randn(*f[other].shape) + f_layout = f[layout].copy() + f[other] + assert np.allclose(f_layout, f[layout]) + +@pytest.mark.parametrize('Nz', Nr_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('length', length_range) +@pytest.mark.parametrize('radius', radius_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('layout', layout_range) +@pytest.mark.parametrize('build_basis', [build_periodic_cylinder, build_periodic_cylindrical_annulus]) +def test_cylinder_axial_vector_roundtrip_noise(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype, layout, build_basis): + c, d, b = build_basis(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype) + tensorsig = (c,) + f = field.Field(dist=d, bases=b, tensorsig=tensorsig, dtype=dtype) + f.preset_scales(dealias) + other = {'g':'c', 'c':'g'}[layout] + f[other][0] = np.random.randn(*f[other][0].shape) + assert np.allclose(f[layout][1:], 0) + + ## Shell @CachedMethod From 6b3862928e328abcb46a9e7648b82f8bbe06d3fc Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Wed, 13 Dec 2023 16:04:23 -0500 Subject: [PATCH 06/19] Forward cylinder transforms not failing. Need actual tests though. --- dedalus/core/basis.py | 16 ++-- dedalus/libraries/spin_recombination.pyx | 94 ++++++++---------------- 2 files changed, 38 insertions(+), 72 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 885a2b6f..265ef059 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -1614,19 +1614,17 @@ def forward_spin_recombination(self, tensorsig, colat_axis, gdata, out): # Recombinations alternate between using gdata/out as input/output # For an even number of transforms, we need a final copy num_recombinations = 0 + coord0 = self.coordsys.coords[0] for i, Ui in enumerate(factors): if Ui is not None: - dim = Ui.shape[0] + subaxis = tensorsig[i].coords.index(coord0) if num_recombinations % 2 == 0: input_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) output_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) else: input_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) output_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) - if dim == 3: - spin_recombination.recombine_forward_dim3(input_view, output_view) - elif dim == 2: - spin_recombination.recombine_forward_dim2(input_view, output_view) + spin_recombination.recombine_forward(subaxis, input_view, output_view) num_recombinations += 1 if num_recombinations % 2 == 0: np.copyto(out, gdata) @@ -1651,19 +1649,17 @@ def backward_spin_recombination(self, tensorsig, colat_axis, gdata, out): # Recombinations alternate between using gdata/out as input/output # For an even number of transforms, we need a final copy num_recombinations = 0 + coord0 = self.coordsys.coords[0] for i, Ui in enumerate(factors): if Ui is not None: - dim = Ui.shape[0] + subaxis = tensorsig[i].coords.index(coord0) if num_recombinations % 2 == 0: input_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) output_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) else: input_view = reduced_view_5(out, i, azimuth_axis+len(tensorsig)) output_view = reduced_view_5(gdata, i, azimuth_axis+len(tensorsig)) - if dim == 3: - spin_recombination.recombine_backward_dim3(input_view, output_view) - elif dim == 2: - spin_recombination.recombine_backward_dim2(input_view, output_view) + spin_recombination.recombine_backward(subaxis, input_view, output_view) num_recombinations += 1 if num_recombinations % 2 == 0: np.copyto(out, gdata) diff --git a/dedalus/libraries/spin_recombination.pyx b/dedalus/libraries/spin_recombination.pyx index 8611331b..a584acea 100644 --- a/dedalus/libraries/spin_recombination.pyx +++ b/dedalus/libraries/spin_recombination.pyx @@ -3,85 +3,55 @@ cimport cython cdef double invsqr2 = 2**(-1/2) -@cython.boundscheck(False) -@cython.wraparound(False) -def recombine_forward_dim3(double [:,:,:,:,::1] input, double [:,:,:,:,::1] output): - - cdef int size0 = input.shape[0] - cdef int size2 = input.shape[2] - cdef int size3 = input.shape[3]//2 - cdef int size4 = input.shape[4] - cdef int i, j, k, l - - for i in range(size0): - for j in range(size2): - for k in range(size3): - for l in range(size4): - output[i,0,j,2*k ,l] = (input[i,1,j,2*k ,l] + input[i,0,j,2*k+1,l])*invsqr2 - output[i,1,j,2*k+1,l] = (input[i,1,j,2*k+1,l] + input[i,0,j,2*k ,l])*invsqr2 - output[i,1,j,2*k ,l] = (input[i,1,j,2*k ,l] - input[i,0,j,2*k+1,l])*invsqr2 - output[i,0,j,2*k+1,l] = (input[i,1,j,2*k+1,l] - input[i,0,j,2*k ,l])*invsqr2 - output[i,2,j,2*k ,l] = input[i,2,j,2*k ,l] - output[i,2,j,2*k+1,l] = input[i,2,j,2*k+1,l] - -@cython.boundscheck(False) -@cython.wraparound(False) -def recombine_forward_dim2(double [:,:,:,:,::1] input, double [:,:,:,:,::1] output): - - cdef int size0 = input.shape[0] - cdef int size2 = input.shape[2] - cdef int size3 = input.shape[3]//2 - cdef int size4 = input.shape[4] - cdef int i, j, k, l - - for i in range(size0): - for j in range(size2): - for k in range(size3): - for l in range(size4): - output[i,0,j,2*k ,l] = (input[i,1,j,2*k ,l] + input[i,0,j,2*k+1,l])*invsqr2 - output[i,1,j,2*k+1,l] = (input[i,1,j,2*k+1,l] + input[i,0,j,2*k ,l])*invsqr2 - output[i,1,j,2*k ,l] = (input[i,1,j,2*k ,l] - input[i,0,j,2*k+1,l])*invsqr2 - output[i,0,j,2*k+1,l] = (input[i,1,j,2*k+1,l] - input[i,0,j,2*k ,l])*invsqr2 - @cython.boundscheck(False) @cython.wraparound(False) -def recombine_backward_dim3(double [:,:,:,:,::1] input, double [:,:,:,:,::1] output): +def recombine_forward(int s, double [:,:,:,:,::1] input, double [:,:,:,:,::1] output): cdef int size0 = input.shape[0] + cdef int size1 = input.shape[1] cdef int size2 = input.shape[2] - cdef int size3 = input.shape[3]//2 + cdef int size3 = input.shape[3] cdef int size4 = input.shape[4] - cdef int i, j, k, l + cdef int size3_2 = size3 // 2 + cdef int i, j, k, l, m for i in range(size0): - for j in range(size2): - for k in range(size3): - for l in range(size4): - output[i,0,j,2*k ,l] = (input[i,1,j,2*k+1,l] - input[i,0,j,2*k+1,l])*invsqr2 - output[i,0,j,2*k+1,l] = (input[i,0,j,2*k ,l] - input[i,1,j,2*k ,l])*invsqr2 - output[i,1,j,2*k ,l] = (input[i,0,j,2*k ,l] + input[i,1,j,2*k ,l])*invsqr2 - output[i,1,j,2*k+1,l] = (input[i,0,j,2*k+1,l] + input[i,1,j,2*k+1,l])*invsqr2 - output[i,2,j,2*k ,l] = input[i,2,j,2*k ,l] - output[i,2,j,2*k+1,l] = input[i,2,j,2*k+1,l] + for j in range(s): + output[i,j,:,:,:] = input[i,j,:,:,:] + for k in range(size2): + for l in range(size3_2): + for m in range(size4): + output[i,s+0,k,2*l ,m] = (input[i,s+1,k,2*l ,m] + input[i,s+0,k,2*l+1,m])*invsqr2 + output[i,s+1,k,2*l+1,m] = (input[i,s+1,k,2*l+1,m] + input[i,s+0,k,2*l ,m])*invsqr2 + output[i,s+1,k,2*l ,m] = (input[i,s+1,k,2*l ,m] - input[i,s+0,k,2*l+1,m])*invsqr2 + output[i,s+0,k,2*l+1,m] = (input[i,s+1,k,2*l+1,m] - input[i,s+0,k,2*l ,m])*invsqr2 + for j in range(s+2, size1): + output[i,j,:,:,:] = input[i,j,:,:,:] @cython.boundscheck(False) @cython.wraparound(False) -def recombine_backward_dim2(double [:,:,:,:,::1] input, double [:,:,:,:,::1] output): +def recombine_backward(int s, double [:,:,:,:,::1] input, double [:,:,:,:,::1] output): cdef int size0 = input.shape[0] + cdef int size1 = input.shape[1] cdef int size2 = input.shape[2] - cdef int size3 = input.shape[3]//2 + cdef int size3 = input.shape[3] cdef int size4 = input.shape[4] - cdef int i, j, k, l + cdef int size3_2 = size3 // 2 + cdef int i, j, k, l, m for i in range(size0): - for j in range(size2): - for k in range(size3): - for l in range(size4): - output[i,0,j,2*k ,l] = (input[i,1,j,2*k+1,l] - input[i,0,j,2*k+1,l])*invsqr2 - output[i,0,j,2*k+1,l] = (input[i,0,j,2*k ,l] - input[i,1,j,2*k ,l])*invsqr2 - output[i,1,j,2*k ,l] = (input[i,0,j,2*k ,l] + input[i,1,j,2*k ,l])*invsqr2 - output[i,1,j,2*k+1,l] = (input[i,0,j,2*k+1,l] + input[i,1,j,2*k+1,l])*invsqr2 + for j in range(s): + output[i,j,:,:,:] = input[i,j,:,:,:] + for k in range(size2): + for l in range(size3_2): + for m in range(size4): + output[i,s+0,k,2*l ,m] = (input[i,s+1,k,2*l+1,m] - input[i,s+0,k,2*l+1,m])*invsqr2 + output[i,s+0,k,2*l+1,m] = (input[i,s+0,k,2*l ,m] - input[i,s+1,k,2*l ,m])*invsqr2 + output[i,s+1,k,2*l ,m] = (input[i,s+0,k,2*l ,m] + input[i,s+1,k,2*l ,m])*invsqr2 + output[i,s+1,k,2*l+1,m] = (input[i,s+0,k,2*l+1,m] + input[i,s+1,k,2*l+1,m])*invsqr2 + for j in range(s+2, size1): + output[i,j,:,:,:] = input[i,j,:,:,:] From dfbd83c483fde50afa9f9f1fc7948e9f56826332 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Fri, 15 Dec 2023 15:36:07 -0500 Subject: [PATCH 07/19] Scalar gradient working in cylinder. Passes 6k tests. --- dedalus/core/basis.py | 84 +++++++++------------ dedalus/core/coords.py | 4 + dedalus/core/operators.py | 123 ++++++++++++++++++++++++++----- dedalus/tests/test_transforms.py | 18 ++--- 4 files changed, 152 insertions(+), 77 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 265ef059..fdea7470 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -232,7 +232,7 @@ def backward_transform(self, field, axis, cdata, gdata): # return matrix[flags, :] @classmethod - def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, cutoff): + def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff): # Simple matrix method by default return cls.ncc_matrix(ncc_basis, arg_basis, out_basis, coeffs.ravel(), cutoff=cutoff) @@ -303,7 +303,7 @@ def enum_indices(tensorsig): raise NotImplementedError() matrix = coeffs[ncc_comp].ravel()[0] * sparse.eye(M, N) else: - matrix = cls._last_axis_component_ncc_matrix(subproblem, ncc_basis, arg_basis, out_basis, coeffs[ncc_comp].squeeze(), ncc_comp, arg_comp, out_comp, cutoff=ncc_cutoff) + matrix = cls._last_axis_component_ncc_matrix(subproblem, ncc_basis, arg_basis, out_basis, coeffs[ncc_comp].squeeze(), ncc_comp, arg_comp, out_comp, ncc.tensorsig, operand.tensorsig, product.tensorsig, cutoff=ncc_cutoff) # Domains with real Fourier bases require kroneckering the Jacobi NCC matrix up to match the subsystem shape including the sin and cos parts of RealFourier data # This fix assumes the Jacobi basis is on the last axis if matrix.shape != (M,N): @@ -605,10 +605,10 @@ def product_matrix(self, arg_basis, out_basis, i): return super().product_matrix(arg_basis, out_basis, i) coeffs = np.zeros(i+1) coeffs[i] = 1 - return self._last_axis_component_ncc_matrix(None, self, arg_basis, out_basis, coeffs, None, None, None, 0) + return self._last_axis_component_ncc_matrix(None, self, arg_basis, out_basis, coeffs, None, None, None, None, None, None, 0) @classmethod - def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, cutoff): + def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff): if arg_basis is None: return super().ncc_matrix(ncc_basis, arg_basis, out_basis, coeffs.ravel(), cutoff=cutoff) # Jacobi parameters @@ -1703,27 +1703,20 @@ def local_elements(self): @CachedMethod def spin_weights(self, tensorsig): - # Spin-component ordering: [-, +, 0] - Ss = {2:np.array([-1, 1], dtype=int), 3:np.array([-1, 1, 0], dtype=int)} S = np.zeros([cs.dim for cs in tensorsig], dtype=int) + coord1 = self.coordsys.coords[1] + spin_order = np.array(self.coordsys.spin_ordering) for i, cs in enumerate(tensorsig): - if (self.coordsys == cs or - (type(cs) is SphericalCoordinates and self.coordsys == cs.S2coordsys) or - (type(self.coordsys) is SphericalCoordinates and self.coordsys.S2coordsys == cs)): - S[axslice(i, 0, cs.dim)] += reshape_vector(Ss[cs.dim], dim=len(tensorsig), axis=i) - #if self.coordsys is vs: # kludge before we decide how compound coordinate systems work - # S[axslice(i, 0, self.dim)] += reshape_vector(Ss, dim=len(tensorsig), axis=i) - #elif self.coordsys in vs: - # n = vs.get_index(self.coordsys) - # S[axslice(i, n, n+self.dim)] += reshape_vector(Ss, dim=len(tensorsig), axis=i) + if coord1 in cs.coords: + start = cs.coords.index(coord1) - 1 + S[axslice(i, start, start+self.coordsys.dim)] += reshape_vector(spin_order[:cs.dim], dim=len(tensorsig), axis=i) + # The slice in spin_order is a hack so that 2-vectors on S2 with 3D spherical coordinates work + # Really it is more like cs.spin_ordering, but that fails for DirectProducts return S - @staticmethod @CachedMethod - def spintotal(spinindex): - spinorder = [-1, 1, 0] - spin = lambda index: spinorder[index] - return sum(spin(index) for index in spinindex) + def spintotal(self, tensorsig, spinindex): + return self.spin_weights(tensorsig)[spinindex] class PolarBasis(SpinBasis): @@ -2285,9 +2278,9 @@ def conversion_matrix(self, m, spintotal, dk): return operator(self.n_size(m), self.k).square.astype(np.float64) @classmethod - def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, cutoff=1e-6): + def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff=1e-6): m = subproblem.group[0] # HACK - spintotal_arg = cls.spintotal(arg_comp) + spintotal_arg = out_basis.spintotal(arg_tensorsig, arg_comp) # Jacobi parameters a_ncc = ncc_basis.k + ncc_basis.alpha[0] b_ncc = ncc_basis.k + ncc_basis.alpha[1] @@ -2562,11 +2555,11 @@ def radius_multiplication_matrix(self, m, spintotal, order, d): return operator(self.n_size(m), self.alpha + self.k, abs(m + spintotal)).square.astype(np.float64) @classmethod - def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, cutoff=1e-6): + def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff=1e-6): m = subproblem.group[0] # HACK - spintotal_ncc = cls.spintotal(ncc_comp) - spintotal_arg = cls.spintotal(arg_comp) - spintotal_out = cls.spintotal(out_comp) + spintotal_ncc = out_basis.spintotal(ncc_tensorsig, ncc_comp) + spintotal_arg = out_basis.spintotal(arg_tensorsig, arg_comp) + spintotal_out = out_basis.spintotal(out_tensorsig, out_comp) regtotal_ncc = abs(spintotal_ncc) regtotal_arg = abs(m + spintotal_arg) regtotal_out = abs(m + spintotal_out) @@ -2613,7 +2606,7 @@ def spinindex_out(self, spinindex_in): def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.input_basis - spintotal = radial_basis.spintotal(spinindex_in) + spintotal = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) dk = self.output_basis.k - radial_basis.k if spinindex_in == spinindex_out: return radial_basis.conversion_matrix(m, spintotal, dk) @@ -2637,7 +2630,6 @@ def spinindex_out(self, spinindex_in): def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.output_basis - spintotal = radial_basis.spintotal(spinindex_in) coeff_size = radial_basis.shape[-1] if m == 0 and spinindex_in == spinindex_out: unit_amplitude = 1 / self.output_basis.constant_mode_value @@ -2664,7 +2656,7 @@ def spinindex_out(self, spinindex_in): def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.output_basis - spintotal = radial_basis.spintotal(spinindex_in) + spintotal = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) coeff_size = radial_basis.shape[-1] if m == 0 and spinindex_in == spinindex_out: # Convert to k=0 @@ -3252,11 +3244,11 @@ def sine_multiplication_matrix(self, m, spintotal, order, size=None): return (-1)**(max(0,-order))*operator(size - 1 + max(abs(m), abs(spintotal)), m, spintotal).square.astype(np.float64) @classmethod - def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, cutoff): + def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff): m = subproblem.group[0] # HACK - spintotal_arg = cls.spintotal(arg_comp) - spintotal_ncc = cls.spintotal(ncc_comp) - spintotal_out = cls.spintotal(out_comp) + spintotal_arg = out_basis.spintotal(arg_tensorsig, arg_comp) + spintotal_ncc = out_basis.spintotal(ncc_tensorsig, ncc_comp) + spintotal_out = out_basis.spintotal(out_tensorsig, out_comp) # Jacobi parameters a_ncc = abs(spintotal_ncc) b_ncc = abs(spintotal_ncc) @@ -3309,7 +3301,7 @@ def spinindex_out(self, spinindex_in): return (spinindex_in,) @staticmethod - def symbol(spinindex_in, spinindex_out, ell, radius): + def symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, ell, radius): unit_amplitude = 1 / SphereBasis.constant_mode_value return unit_amplitude * (ell == 0) * (spinindex_in == spinindex_out) @@ -3352,8 +3344,8 @@ def spinindex_out(self, spinindex_in): return tuple() @staticmethod - def symbol(spinindex_in, spinindex_out, ell, radius): - return SphereGradient.symbol(spinindex_in, spinindex_out, ell, radius) + def symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, ell, radius): + return SphereGradient.symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, ell, radius) class SphereGradient(operators.Gradient, operators.SeparableSphereOperator): @@ -3387,9 +3379,7 @@ def spinindex_out(self, spinindex_in): return ((0,) + spinindex_in, (1,) + spinindex_in) @staticmethod - def symbol(spinindex_in, spinindex_out, ell, radius): - spintotal_in = SphereBasis.spintotal(spinindex_in) - spintotal_out = SphereBasis.spintotal(spinindex_out) + def symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, ell, radius): mu = spintotal_out - spintotal_in k = SphereBasis.k(ell, spintotal_in, mu) k[np.abs(spintotal_in) > ell] = 0 @@ -3426,9 +3416,7 @@ def spinindex_out(self, spinindex_in): return (spinindex_in,) @staticmethod - def symbol(spinindex_in, spinindex_out, ell, radius): - spintotal_in = SphereBasis.spintotal(spinindex_in) - spintotal_out = SphereBasis.spintotal(spinindex_out) + def symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, ell, radius): k = SphereBasis.k kp = k(ell, spintotal_in, +1) km = k(ell, spintotal_in, -1) @@ -3900,7 +3888,7 @@ def _nmin(ell): return 0 * ell # To have same array shape as ell @classmethod - def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, cutoff=1e-6): + def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff=1e-6): ell = 0 # HACK, independent of ell for shell arg_radial_basis = arg_basis.radial_basis regtotal_arg = cls.regtotal(arg_comp) @@ -4115,7 +4103,7 @@ def _nmin(ell): return ell // 2 @classmethod - def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, cutoff=1e-6): + def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff=1e-6): ell = subproblem.group[1] # HACK if isinstance(arg_basis, BallBasis): arg_radial_basis = arg_basis.radial_basis @@ -4599,8 +4587,8 @@ def reg_NCC_matrix(radial_index): subcoeff_vals = DeferredTuple(reg_NCC_matrix, size=len(subcoeff_norms)) # Call last axis Clenshaw via ShellRadialBasis subcoeffs = (subcoeff_vals, subcoeff_norms) - ncc_comp = arg_comp = out_comp = tuple() - return self.radial_basis._last_axis_component_ncc_matrix(subproblem, ncc_basis, arg_basis, out_basis, subcoeffs, ncc_comp, arg_comp, out_comp, cutoff=ncc_cutoff) + ncc_comp = arg_comp = out_comp = ncc_tensorsig = arg_tensorsig = out_tensorsig = tuple() + return self.radial_basis._last_axis_component_ncc_matrix(subproblem, ncc_basis, arg_basis, out_basis, subcoeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff=ncc_cutoff) class BallBasis(Spherical3DBasis, metaclass=CachedClass): @@ -4971,7 +4959,7 @@ def radial_matrix(self, spinindex_in, spinindex_out, m): position = self.position basis = self.input_basis if spinindex_in == spinindex_out: - return self._radial_matrix(basis, m, basis.spintotal(spinindex_in), position) + return self._radial_matrix(basis, m, basis.spintotal(self.operand.tensorsig, spinindex_in), position) else: return np.zeros((1,basis.n_size(m))) @@ -5356,7 +5344,7 @@ def spinindex_out(self, spinindex_in): return (spinindex_in,) @staticmethod - def symbol(spinindex_in, spinindex_out, ell, radius): + def symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, ell, radius): return 1.0 * (ell == 0) * (spinindex_in == spinindex_out) diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index 6aa472ec..1a2d5a52 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -102,6 +102,8 @@ def __init__(self, *coordsystems): raise NotImplementedError("Direct products only implemented for separable intertwiners.") self.coordsystems = coordsystems self.coords = sum((cs.coords for cs in coordsystems), ()) + if len(set(self.coords)) < len(self.coords): + raise ValueError("Cannot repeat coordinates in DirectProduct.") self.dim = sum(cs.dim for cs in coordsystems) def forward_vector_intertwiner(self, subaxis, group): @@ -132,6 +134,8 @@ class CartesianCoordinates(SeparableIntertwiners, CoordinateSystem): curvilinear = False def __init__(self, *names, right_handed=True): + if len(set(names)) < len(names): + raise ValueError("Must specify unique names.") self.names = names self.dim = len(names) self.coords = tuple(Coordinate(name, cs=self) for name in names) diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index 8d6957f3..ea8efdfd 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -2300,10 +2300,15 @@ def new_operand(self, operand, **kw): class CartesianGradient(Gradient): - cs_type = coords.CartesianCoordinates + cs_type = (coords.CartesianCoordinates, coords.Coordinate) def __init__(self, operand, coordsys, out=None): + # Wrap to handle gradient wrt single coordinate + if isinstance(coordsys, coords.Coordinate): + coordsys = coords.CartesianCoordinates(coordsys.name) + # Assemble partial derivatives along each coordinate args = [Differentiate(operand, coord) for coord in coordsys.coords] + # TODO: get rid of this hack for i in range(len(args)): if args[i] == 0: args[i] = 2*operand @@ -2364,6 +2369,86 @@ def operate(self, out): out.data[i] = 0 +class DirectProductGradient(Gradient): + + cs_type = coords.DirectProduct + + def __init__(self, operand, coordsys, out=None): + args = [Gradient(operand, cs) for cs in coordsys.coordsystems] + bases = self._build_bases(operand, *args) + args = [convert(arg, bases) for arg in args] + LinearOperator.__init__(self, *args, out=out) + self.coordsys = coordsys + # LinearOperator requirements + self.operand = operand + # FutureField requirements + self.domain = Domain(operand.dist, bases) + self.tensorsig = (coordsys,) + operand.tensorsig + self.dtype = operand.dtype + + def _build_bases(self, *args): + """Build output bases.""" + # Taken from Add operator + dist = unify_attributes(args, 'dist') + bases = [] + for coord in args[0].domain.bases_by_coord: + ax_bases = tuple(arg.domain.bases_by_coord.get(coord, None) for arg in args) + # All constant bases yields constant basis + if all(basis is None for basis in ax_bases): + bases.append(None) + # Combine any constant bases to avoid adding None to None + elif any(basis is None for basis in ax_bases): + ax_bases = [basis for basis in ax_bases if basis is not None] + bases.append(np.sum(ax_bases) + None) + # Add all bases + else: + bases.append(np.sum(ax_bases)) + return tuple(bases) + + def matrix_dependence(self, *vars): + arg_vals = [arg.matrix_dependence(self, *vars) for arg in self.args] + return np.logical_or.reduce(arg_vals) + + def matrix_coupling(self, *vars): + arg_vals = [arg.matrix_coupling(self, *vars) for arg in self.args] + return np.logical_or.reduce(arg_vals) + + def subproblem_matrix(self, subproblem): + """Build operator matrix for a specific subproblem.""" + return sparse.vstack(arg.expression_matrices(subproblem, [self.operand])[self.operand] for arg in self.args) + + def check_conditions(self): + """Check that operands are in a proper layout.""" + # Require operands to be in same layout + layouts = [operand.layout for operand in self.args if operand] + all_layouts_equal = (len(set(layouts)) == 1) + return all_layouts_equal + + def enforce_conditions(self): + """Require operands to be in a proper layout.""" + # Require operands to be in same layout + # Take coeff layout arbitrarily + layout = self.dist.coeff_layout + for arg in self.args: + if arg: + arg.change_layout(layout) + + def operate(self, out): + """Perform operation.""" + operands = self.args + layouts = [operand.layout for operand in self.args if operand] + # Set output layout + out.preset_layout(layouts[0]) + # Copy operand data to output components + i0 = 0 + for cs_grad, cs in zip(operands, self.coordsys.coordsystems): + if cs_grad: + out.data[i0:i0+cs.dim] = cs_grad.data + else: + out.data[i0:i0+cs.dim] = 0 + i0 += cs.dim + + # class S2Gradient(Gradient, SpectralOperator): # cs_type = coords.S2Coordinates @@ -2514,8 +2599,6 @@ def subproblem_matrix(self, subproblem): elif not m_dep: ell_list = [l] blocks = [] - spintotal_in = basis.spintotal(spinindex_in) - spintotal_out = basis.spintotal(spinindex_out) for ell in ell_list: if abs(spintotal_in) <= ell and abs(spintotal_out) <= ell: block = self.l_matrix(self.input_basis, self.output_basis, spinindex_in, spinindex_out, ell) @@ -2606,7 +2689,7 @@ class SeparableSphereOperator(SpectralOperator): subaxis_coupling = [False, False] # No coupling @CachedMethod - def local_symbols(self, layout, spinindex_in, spinindex_out): + def local_symbols(self, layout, spinindex_in, spinindex_out, spintotal_in, spintotal_out): # TODO: improve caching specificity (e.g. for operators that depend only on spintotals) operand = self.args[0] if self.input_basis is None: @@ -2620,12 +2703,12 @@ def local_symbols(self, layout, spinindex_in, spinindex_out): elif self.subaxis_dependence[1]: colat_axis = self.first_axis + 1 local_ell = layout.local_group_arrays(domain, scales=domain.dealias)[colat_axis] - return self.symbol(spinindex_in, spinindex_out, local_ell, radius) + return self.symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, local_ell, radius) else: - return self.symbol(spinindex_in, spinindex_out, radius) + return self.symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, radius) @staticmethod - def symbol(spinindex_in, spinindex_out, ell): + def symbol(spinindex_in, spinindex_out, spintotal_in, spintotal_out, ell): raise NotImplementedError() def subproblem_matrix(self, subproblem): @@ -2657,7 +2740,7 @@ def subproblem_matrix(self, subproblem): for spinindex_in, spintotal_in in np.ndenumerate(S_in): if (prod(subshape) > 0) and (spinindex_out in self.spinindex_out(spinindex_in)): # Get symbols for overlapping data - symbols = self.local_symbols(layout, spinindex_in, spinindex_out) + symbols = self.local_symbols(layout, spinindex_in, spinindex_out, spintotal_in, spintotal_out) if np.isscalar(symbols): symbols = symbols * np.ones(prod(subshape)) else: @@ -2721,7 +2804,8 @@ def operate(self, out): comp_in = data_in[spinindex_in] for spinindex_out in self.spinindex_out(spinindex_in): # Get symbols for overlapping data - symbols = self.local_symbols(layout, spinindex_in, spinindex_out) + spintotal_out = basis.spintotal(out.tensorsig, spinindex_out) + symbols = self.local_symbols(layout, spinindex_in, spinindex_out, spintotal_in, spintotal_out) if slices and not np.isscalar(symbols): symbols = symbols[slices] # Multiply by symbols @@ -2769,7 +2853,7 @@ def __init__(self, operand, coordsys, ell_r_func, out=None): self.tensorsig = operand.tensorsig self.dtype = operand.dtype - def symbol(self, spinindex_in, spinindex_out, local_ell, radius): + def symbol(self, spinindex_in, spinindex_out, spintotal_in, spintotal_out, local_ell, radius): return self.ell_r_func(local_ell, radius) def new_operand(self, operand, **kw): @@ -2907,9 +2991,9 @@ def new_operand(self, operand, **kw): @CachedMethod def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.input_basis - spintotal = radial_basis.spintotal(spinindex_in) + spintotal_in = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) if spinindex_out in self.spinindex_out(spinindex_in): - return self._radial_matrix(radial_basis.Lmax, spintotal, m, self.dtype) + return self._radial_matrix(radial_basis.Lmax, spintotal_in, m, self.dtype) else: raise ValueError("This should never happen") @@ -2960,9 +3044,9 @@ def spinindex_out(self, spinindex_in): @CachedMethod def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.input_basis - spintotal = radial_basis.spintotal(spinindex_in) + spintotal_in = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) if spinindex_out in self.spinindex_out(spinindex_in): - return self._radial_matrix(radial_basis, spinindex_out[0], spintotal, m) + return self._radial_matrix(radial_basis, spinindex_out[0], spintotal_in, m) else: raise ValueError("This should never happen") @@ -3411,9 +3495,9 @@ def spinindex_out(self, spinindex_in): @CachedMethod def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.input_basis - spintotal = radial_basis.spintotal(spinindex_in) + spintotal_in = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) if spinindex_in[0] != 2 and spinindex_in[1:] == spinindex_out: - return self._radial_matrix(radial_basis, spinindex_in[0], spintotal, m) + return self._radial_matrix(radial_basis, spinindex_in[0], spintotal_in, m) else: raise ValueError("This should never happen") @@ -3697,8 +3781,7 @@ def spinindex_out(self, spinindex_in): @CachedMethod def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.input_basis - spintotal_in = radial_basis.spintotal(spinindex_in) - + spintotal_in = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) if spinindex_in[1:] == spinindex_out: return self._radial_matrix(radial_basis, spinindex_in[0], spintotal_in, m) else: @@ -4004,9 +4087,9 @@ def spinindex_out(self, spinindex_in): @CachedMethod def radial_matrix(self, spinindex_in, spinindex_out, m): radial_basis = self.input_basis - spintotal = radial_basis.spintotal(spinindex_in) + spintotal_in = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) if spinindex_in == spinindex_out: - return self._radial_matrix(radial_basis, spintotal, m) + return self._radial_matrix(radial_basis, spintotal_in, m) else: raise ValueError("This should never happen") diff --git a/dedalus/tests/test_transforms.py b/dedalus/tests/test_transforms.py index ac0186f0..c576ae95 100644 --- a/dedalus/tests/test_transforms.py +++ b/dedalus/tests/test_transforms.py @@ -2,7 +2,7 @@ import pytest import numpy as np from dedalus.core import coords, distributor, basis, field, operators -from dedalus.tools.cache import CachedMethod, CachedFunction +from dedalus.tools.cache import CachedFunction from mpi4py import MPI comm = MPI.COMM_WORLD @@ -264,7 +264,7 @@ def test_CF_J_1d_vector_roundtrip(a, b, Nx, Ny, dealias_x, dealias_y): ## Sphere -@CachedMethod +@CachedFunction def build_sphere_2d(Nphi, Ntheta, radius, dealias, dtype): c = coords.S2Coordinates('phi', 'theta') d = distributor.Distributor((c,)) @@ -272,7 +272,7 @@ def build_sphere_2d(Nphi, Ntheta, radius, dealias, dtype): phi, theta = d.local_grids(b, scales=(dealias, dealias)) return c, d, b, phi, theta -@CachedMethod +@CachedFunction def build_sphere_3d(Nphi, Ntheta, radius, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) @@ -377,7 +377,7 @@ def test_sphere_roundtrip_noise(Nphi, Ntheta, radius, basis, dealias, dtype, lay dealias_range = [0.5, 1, 1.5] -@CachedMethod +@CachedFunction def build_disk(Nphi, Nr, radius, alpha, k, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) @@ -385,7 +385,7 @@ def build_disk(Nphi, Nr, radius, alpha, k, dealias, dtype): return c, d, b -@CachedMethod +@CachedFunction def build_annulus(Nphi, Nr, radius, alpha, k, dealias, dtype): c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) @@ -535,7 +535,7 @@ def test_polar_tensor_roundtrip_mmax0(Nr, radius, alpha, k, dealias, dtype, buil ## Cylinders -@CachedMethod +@CachedFunction def build_periodic_cylinder(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype): cz = coords.Coordinate('z') cp = coords.PolarCoordinates('phi', 'r') @@ -545,7 +545,7 @@ def build_periodic_cylinder(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dty bp = basis.DiskBasis(cp, (Nphi, Nr), dtype=dtype, radius=radius, alpha=alpha, k=k, dealias=(dealias, dealias)) return c, d, (bz, bp) -@CachedMethod +@CachedFunction def build_periodic_cylindrical_annulus(Nz, Nphi, Nr, length, radius, alpha, k, dealias, dtype): cz = coords.Coordinate('z') cp = coords.PolarCoordinates('phi', 'r') @@ -613,7 +613,7 @@ def test_cylinder_axial_vector_roundtrip_noise(Nz, Nphi, Nr, length, radius, alp ## Shell -@CachedMethod +@CachedFunction def build_shell(Nphi, Ntheta, Nr, radii, alpha, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) @@ -678,7 +678,7 @@ def test_shell_roundtrip_noise(Nphi, Ntheta, Nr, radii, alpha, k, dealias, dtype ## Ball -@CachedMethod +@CachedFunction def build_ball(Nphi, Ntheta, Nr, radius, alpha, k, dealias, dtype): c = coords.SphericalCoordinates('phi', 'theta', 'r') d = distributor.Distributor((c,)) From d722eca9d389f38b8d07af25858888f38abb6dbe Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Fri, 15 Dec 2023 17:12:12 -0500 Subject: [PATCH 08/19] Other direct product operators --- dedalus/core/basis.py | 23 ++- dedalus/core/coords.py | 6 +- dedalus/core/operators.py | 197 +++++++++++++++++++--- dedalus/tests/test_cartesian_operators.py | 6 +- 4 files changed, 197 insertions(+), 35 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index fdea7470..5ad8859d 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -1580,12 +1580,16 @@ def spin_recombination_factors(self, tensorsig): factors.append(None) return factors - def spin_recombination_matrix(self, tensorsig): + def spin_recombination_matrix(self, tensorsig, n_upstream): # Combine factors, replacing None placeholders factors = self.spin_recombination_factors(tensorsig) for i, cs in enumerate(tensorsig): if factors[i] is None: factors[i] = np.identity(cs.dim) + # Add space for upstream groups + if n_upstream > 1: + # Create new list to avoid modifying cached list + factors = factors + [np.identity(n_upstream)] matrix = kron(*factors) # Expand for cos and msin parts if self.dtype == np.float64: @@ -4925,7 +4929,8 @@ def subproblem_matrix(self, subproblem): matrix = super().subproblem_matrix(subproblem) radial_basis = self.input_basis if self.tensorsig != (): - U = radial_basis.spin_recombination_matrix(self.tensorsig) + n_upstream = prod(subproblem.coeff_shape(self.domain)[:self.last_axis-1]) + U = radial_basis.spin_recombination_matrix(self.tensorsig, n_upstream=n_upstream) matrix = U.T.conj() @ matrix return matrix @@ -4987,14 +4992,14 @@ def subproblem_matrix(self, subproblem): S_out = radial_basis.spin_weights(self.tensorsig) # Should this use output_basis? radial_axis = self.dist.last_axis(self.output_basis) m = subproblem.group[radial_axis - 1] + subshape_in = subproblem.coeff_shape(self.operand.domain) + subshape_out = subproblem.coeff_shape(self.domain) # Loop over components submatrices = [] for spinindex_out, spintotal_out in np.ndenumerate(S_out): submatrix_row = [] for spinindex_in, spintotal_in in np.ndenumerate(S_in): # Build identity matrices for each axis - subshape_in = subproblem.coeff_shape(self.operand.domain) - subshape_out = subproblem.coeff_shape(self.domain) if spinindex_out in self.spinindex_out(spinindex_in): # Substitute factor for radial axis factors = [sparse.eye(i, j, format='csr') for i, j in zip(subshape_out, subshape_in)] @@ -5009,7 +5014,8 @@ def subproblem_matrix(self, subproblem): matrix.tocsr() # Convert tau to spin first if self.tensorsig: - U = radial_basis.spin_recombination_matrix(self.tensorsig) + n_upstream = prod(subshape_out[:radial_axis-1]) + U = radial_basis.spin_recombination_matrix(self.tensorsig, n_upstream=n_upstream) matrix = (matrix @ sparse.csr_matrix(U)).tocsr() return matrix @@ -5037,14 +5043,14 @@ def subproblem_matrix(self, subproblem): S_in = radial_basis.spin_weights(operand.tensorsig) S_out = radial_basis.spin_weights(self.tensorsig) # Should this use output_basis? m = subproblem.group[self.last_axis - 1] + subshape_in = subproblem.coeff_shape(self.operand.domain) + subshape_out = subproblem.coeff_shape(self.domain) # Loop over components submatrices = [] for spinindex_out, spintotal_out in np.ndenumerate(S_out): submatrix_row = [] for spinindex_in, spintotal_in in np.ndenumerate(S_in): # Build identity matrices for each axis - subshape_in = subproblem.coeff_shape(self.operand.domain) - subshape_out = subproblem.coeff_shape(self.domain) if spinindex_out in self.spinindex_out(spinindex_in): # Substitute factor for radial axis factors = [sparse.eye(i, j, format='csr') for i, j in zip(subshape_out, subshape_in)] @@ -5059,7 +5065,8 @@ def subproblem_matrix(self, subproblem): matrix.tocsr() # Convert tau to spin first if self.tensorsig: - U = radial_basis.spin_recombination_matrix(self.tensorsig) + n_upstream = prod(subshape_out[:self.last_axis-1]) + U = radial_basis.spin_recombination_matrix(self.tensorsig, n_upstream=n_upstream) matrix = (matrix @ sparse.csr_matrix(U)).tocsr() return matrix diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index 1a2d5a52..be9a135c 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -5,7 +5,7 @@ from ..libraries import dedalus_sphere from ..tools.array import nkron, sparse_block_diag -from ..tools.cache import CachedMethod +from ..tools.cache import CachedMethod, CachedAttribute # Public interface __all__ = ['Coordinate', @@ -128,6 +128,10 @@ def backward_vector_intertwiner(self, subaxis, group): start_axis += cs.dim return sparse_block_diag(factors).A + @CachedAttribute + def default_nonconst_groups(self): + return sum((cs.default_nonconst_groups for cs in self.coordsystems), ()) + class CartesianCoordinates(SeparableIntertwiners, CoordinateSystem): diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index ea8efdfd..c64a8be5 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -3253,29 +3253,13 @@ class CartCompBase(LinearOperator, metaclass=MultiClass): name = 'Comp' - def __init__(self, operand, index, coord, out=None): - super().__init__(operand, out=out) - self.index = index - self.coord = coord - self.coordsys = operand.tensorsig[index] - self.coord_subaxis = self.dist.get_axis(coord) - self.dist.get_axis(self.coordsys) - # LinearOperator requirements - self.operand = operand - # FutureField requirements - self.domain = operand.domain - self.tensorsig = operand.tensorsig[:index] + operand.tensorsig[index+1:] - self.dtype = operand.dtype - @classmethod - def _check_args(cls, operand, index, coord, out=None): + def _check_args(cls, operand, index, comp, out=None): # Dispatch by coordinate system return isinstance(operand.tensorsig[index], cls.cs_type) def new_operand(self, operand, **kw): - return CartCompBase(operand, self.index, self.coord, **kw) - - # def separability(self, *vars): - # return self.operand.separability(*vars) + return CartCompBase(operand, self.index, self.comp, **kw) def matrix_dependence(self, *vars): return self.operand.matrix_dependence(*vars) @@ -3286,7 +3270,20 @@ def matrix_coupling(self, *vars): class CartesianComponent(CartCompBase): - cs_type = coords.CartesianCoordinates + cs_type = (coords.CartesianCoordinates, coords.Coordinate) + + def __init__(self, operand, index, comp, out=None): + super().__init__(operand, out=out) + self.index = index + self.comp = comp + self.coordsys = operand.tensorsig[index] + self.coord_subaxis = self.dist.get_axis(comp) - self.dist.get_axis(self.coordsys) + # LinearOperator requirements + self.operand = operand + # FutureField requirements + self.domain = operand.domain + self.tensorsig = operand.tensorsig[:index] + operand.tensorsig[index+1:] + self.dtype = operand.dtype def check_conditions(self): """Check that operands are in a proper layout.""" @@ -3319,6 +3316,59 @@ def operate(self, out): out.data[:] = arg0.data[take_comp] +class DirectProductComponent(CartCompBase): + + cs_type = coords.DirectProduct + + def __init__(self, operand, index, comp, out=None): + super().__init__(operand, out=out) + self.index = index + self.comp = comp + self.coordsys = operand.tensorsig[index] + self.comp_subaxis = self.dist.get_axis(comp) - self.dist.get_axis(self.coordsys) + # LinearOperator requirements + self.operand = operand + # FutureField requirements + self.domain = operand.domain + tensorsig = list(operand.tensorsig) + tensorsig[index] = comp + self.tensorsig = tuple(tensorsig) + self.dtype = operand.dtype + # Slicing for component + comp_slice = slice(self.comp_subaxis, self.comp_subaxis+comp.dim) + self.comp_slices = tuple([None]*index + [comp_slice]) + + def check_conditions(self): + """Check that operands are in a proper layout.""" + # Any layout + return True + + def enforce_conditions(self): + """Require operands to be in a proper layout.""" + # Any layout + pass + + def subproblem_matrix(self, subproblem): + # Build identities for each tangent space + factors = [sparse.identity(cs.dim, format='csr') for cs in self.operand.tensorsig] + factors.append(sparse.identity(subproblem.coeff_size(self.domain), format='csr')) + # Build selection matrix for selected coord + index_factor = np.zeros((self.comp.dim, self.coordsys.dim)) + for i in range(self.comp.dim): + index_factor[i, self.comp_subaxis+i] = 1 + # Replace indexed factor with selection matrix + factors[self.index] = index_factor + return reduce(sparse.kron, factors, 1).tocsr() + + def operate(self, out): + """Perform operation.""" + arg0 = self.args[0] + # Set output layout + out.preset_layout(arg0.layout) + # Copy specified comonent + out.data[:] = arg0.data[self.comp_slices] + + @alias("div") class Divergence(LinearOperator, metaclass=MultiClass): @@ -3345,12 +3395,15 @@ def new_operand(self, operand, **kw): class CartesianDivergence(Divergence): - cs_type = coords.CartesianCoordinates + cs_type = (coords.CartesianCoordinates, coords.Coordinate) def __init__(self, operand, index=0, out=None): coordsys = operand.tensorsig[index] + # Wrap to handle gradient wrt single coordinate + if isinstance(coordsys, coords.Coordinate): + coordsys = coords.CartesianCoordinates(coordsys.name) # Get components - comps = [CartesianComponent(operand, index=index, coord=c) for c in coordsys.coords] + comps = [CartesianComponent(operand, index=index, comp=c) for c in coordsys.coords] comps = [Differentiate(comp, c) for comp, c in zip(comps, coordsys.coords)] arg = sum(comps) LinearOperator.__init__(self, arg, out=out) @@ -3392,6 +3445,55 @@ def operate(self, out): np.copyto(out.data, arg0.data) +class DirectProductDivergence(Divergence): + + cs_type = coords.DirectProduct + + def __init__(self, operand, index=0, out=None): + coordsys = operand.tensorsig[index] + # Get components + comps = [DirectProductComponent(operand, index=index, comp=cs) for cs in coordsys.coordsystems] + comps = [Divergence(comp, index) for comp, cs in zip(comps, coordsys.coordsystems)] + arg = sum(comps) + LinearOperator.__init__(self, arg, out=out) + self.index = index + self.coordsys = coordsys + # LinearOperator requirements + self.operand = operand + # FutureField requirements + self.domain = arg.domain + self.tensorsig = arg.tensorsig + self.dtype = arg.dtype + + def matrix_dependence(self, *vars): + return self.args[0].matrix_dependence(*vars) + + def matrix_coupling(self, *vars): + return self.args[0].matrix_coupling(*vars) + + def check_conditions(self): + """Check that operands are in a proper layout.""" + # Any layout (addition is done) + return True + + def enforce_conditions(self): + """Require operands to be in a proper layout.""" + # Any layout (addition is done) + pass + + def subproblem_matrix(self, subproblem): + """Build operator matrix for a specific subproblem.""" + return self.args[0].expression_matrices(subproblem, [self.operand])[self.operand] + + def operate(self, out): + """Perform operation.""" + # OPTIMIZE: this has an extra copy + arg0 = self.args[0] + # Set output layout + out.preset_layout(arg0.layout) + np.copyto(out.data, arg0.data) + + class SphericalDivergence(Divergence, SphericalEllOperator): cs_type = coords.SphericalCoordinates @@ -3534,6 +3636,7 @@ def _check_args(cls, operand, index=0, out=None): def new_operand(self, operand, **kw): return Curl(operand, index=self.index, **kw) + class CartesianCurl(Curl): cs_type = coords.CartesianCoordinates @@ -3543,7 +3646,7 @@ def __init__(self, operand, index=0, out=None): if coordsys.dim != 3: raise ValueError("CartesianCurl is only implemented for 3D vector fields. For 2D, use skew gradient.") # Get components - comps = [CartesianComponent(operand, index=index, coord=c) for c in coordsys.coords] + comps = [CartesianComponent(operand, index=index, comp=c) for c in coordsys.coords] x_comp = Differentiate(comps[2], coordsys.coords[1]) - Differentiate(comps[1], coordsys.coords[2]) y_comp = Differentiate(comps[0], coordsys.coords[2]) - Differentiate(comps[2], coordsys.coords[0]) z_comp = Differentiate(comps[1], coordsys.coords[0]) - Differentiate(comps[0], coordsys.coords[1]) @@ -3902,9 +4005,12 @@ def new_operand(self, operand, **kw): class CartesianLaplacian(Laplacian): - cs_type = coords.CartesianCoordinates + cs_type = (coords.CartesianCoordinates, coords.Coordinate) def __init__(self, operand, coordsys, out=None): + # Wrap to handle gradient wrt single coordinate + if isinstance(coordsys, coords.Coordinate): + coordsys = coords.CartesianCoordinates(coordsys.name) parts = [Differentiate(Differentiate(operand, c), c) for c in coordsys.coords] arg = sum(parts) LinearOperator.__init__(self, arg, out=out) @@ -3945,6 +4051,51 @@ def operate(self, out): np.copyto(out.data, arg0.data) +class DirectProductLaplacian(Laplacian): + + cs_type = coords.DirectProduct + + def __init__(self, operand, coordsys, out=None): + parts = [Laplacian(operand, cs) for cs in coordsys.coordsystems] + arg = sum(parts) + LinearOperator.__init__(self, arg, out=out) + self.coordsys = coordsys + # LinearOperator requirements + self.operand = operand + # FutureField requirements + self.domain = arg.domain + self.tensorsig = arg.tensorsig + self.dtype = arg.dtype + + def matrix_dependence(self, *vars): + return self.args[0].matrix_dependence(*vars) + + def matrix_coupling(self, *vars): + return self.args[0].matrix_coupling(*vars) + + def subproblem_matrix(self, subproblem): + """Build operator matrix for a specific subproblem.""" + return self.args[0].expression_matrices(subproblem, [self.operand])[self.operand] + + def check_conditions(self): + """Check that operands are in a proper layout.""" + # Any layout (addition is done) + return True + + def enforce_conditions(self): + """Require operands to be in a proper layout.""" + # Any layout (addition is done) + pass + + def operate(self, out): + """Perform operation.""" + # OPTIMIZE: this has an extra copy + arg0 = self.args[0] + # Set output layout + out.preset_layout(arg0.layout) + np.copyto(out.data, arg0.data) + + class SphericalLaplacian(Laplacian, SphericalEllOperator): cs_type = coords.SphericalCoordinates diff --git a/dedalus/tests/test_cartesian_operators.py b/dedalus/tests/test_cartesian_operators.py index 719b12e6..555ec547 100644 --- a/dedalus/tests/test_cartesian_operators.py +++ b/dedalus/tests/test_cartesian_operators.py @@ -301,9 +301,9 @@ def test_curl_implicit_FFF(basis, N, dealias, dtype): problem.add_equation("curl(u) + grad(phi) + tau1 = g") problem.add_equation("div(u) + tau2 = 0") problem.add_equation("integ(phi) = 0") - problem.add_equation("integ(comp(u,index=0,coord=c['x'])) = 0") - problem.add_equation("integ(comp(u,index=0,coord=c['y'])) = 0") - problem.add_equation("integ(comp(u,index=0,coord=c['z'])) = 0") + problem.add_equation("integ(comp(u,index=0,comp=c['x'])) = 0") + problem.add_equation("integ(comp(u,index=0,comp=c['y'])) = 0") + problem.add_equation("integ(comp(u,index=0,comp=c['z'])) = 0") solver = problem.build_solver() solver.solve() assert np.allclose(u['c'], f['c']) From d04cf48b9dd2d57892556833e9c54afc2308e7a5 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Fri, 15 Dec 2023 19:44:09 -0500 Subject: [PATCH 09/19] Minor tweaks for NCCs --- dedalus/core/basis.py | 3 ++- dedalus/core/coords.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 5ad8859d..a4c5852e 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -2560,7 +2560,8 @@ def radius_multiplication_matrix(self, m, spintotal, order, d): @classmethod def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff=1e-6): - m = subproblem.group[0] # HACK + first_axis = subproblem.dist.first_axis(out_basis) + m = subproblem.group[first_axis] spintotal_ncc = out_basis.spintotal(ncc_tensorsig, ncc_comp) spintotal_arg = out_basis.spintotal(arg_tensorsig, arg_comp) spintotal_out = out_basis.spintotal(out_tensorsig, out_comp) diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index be9a135c..debdb533 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -96,6 +96,8 @@ def backward_vector_intertwiner(self, subaxis, group): class DirectProduct(SeparableIntertwiners, CoordinateSystem): + curvilinear = True + def __init__(self, *coordsystems): for cs in coordsystems: if not isinstance(cs, SeparableIntertwiners): From 6e46136dab53c9d08cc5e1742b6be5b268a69af2 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Tue, 19 Dec 2023 20:45:03 -0500 Subject: [PATCH 10/19] Cylinder curl working. Some cylinder tests and NCC cleanup. --- dedalus/core/basis.py | 72 ++----- dedalus/core/coords.py | 34 +++- dedalus/core/distributor.py | 23 ++- dedalus/core/operators.py | 104 +++++++++-- dedalus/tests/test_cylinder_calculus.py | 228 +++++++++++++++++++++++ dedalus/tests/test_cylinder_operators.py | 174 +++++++++++++++++ dedalus/tests/test_jacobi_operators.py | 2 +- 7 files changed, 562 insertions(+), 75 deletions(-) create mode 100644 dedalus/tests/test_cylinder_calculus.py create mode 100644 dedalus/tests/test_cylinder_operators.py diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index a4c5852e..59b64300 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -117,6 +117,15 @@ def clone_with(self, **new_kw): def constant(self): return tuple(False for i in range(self.dim)) + def __matmul__(self, other): + # NCC (self) * operand (other) + if other is None: + # All multiplications by constants return same basis + return self + else: + # Call __rmatmul__ codepath for operand basis + return other.__rmatmul__(self) + # def __repr__(self): # return '<%s %i>' %(self.__class__.__name__, id(self)) @@ -537,13 +546,6 @@ def __mul__(self, other): return other.__mul__(self) return NotImplemented - def __matmul__(self, other): - # NCC (self) * operand (other) - if other is None: - return self.__rmatmul__(other) - else: - return other.__rmatmul__(self) - def __rmatmul__(self, other): # NCC (other) * operand (self) if other is None or other is self: @@ -872,12 +874,6 @@ def __mul__(self, other): # TODO: support different sizes return NotImplemented - def __matmul__(self, other): - if other is None: - return self.__rmatmul__(other) - else: - return other.__rmatmul__(self) - def __rmatmul__(self, other): if other is None or other is self: return self @@ -1720,7 +1716,7 @@ def spin_weights(self, tensorsig): @CachedMethod def spintotal(self, tensorsig, spinindex): - return self.spin_weights(tensorsig)[spinindex] + return int(self.spin_weights(tensorsig)[spinindex]) class PolarBasis(SpinBasis): @@ -2117,8 +2113,8 @@ def __mul__(self, other): return self.clone_with(shape=shape, k=k) return NotImplemented - def __matmul__(self, other): - # NCC (self) * operand (other) + def __rmatmul__(self, other): + # NCC (other) * operand (self) # Same as __mul__ since conversion only needs to be upwards in k return self.__mul__(other) @@ -2398,15 +2394,15 @@ def __mul__(self, other): return self.clone_with(shape=shape, k=k) return NotImplemented - def __matmul__(self, other): - # NCC (self) * operand (other) + def __rmatmul__(self, other): + # NCC (other) * operand (self) if other is None: return self if isinstance(other, DiskBasis): if self.grid_params == other.grid_params: # Everything matches except shape and k shape = tuple(np.maximum(self.shape, other.shape)) - k = other.k # use operand's k value to minimize conversions + k = self.k # use operand's k value to minimize conversions return self.clone_with(shape=shape, k=k) return NotImplemented @@ -2931,16 +2927,9 @@ def __mul__(self, other): return self.clone_with(shape=shape) return NotImplemented - def __matmul__(self, other): - """NCC is self. - - NB: This does not support NCCs with different number of modes than the fields. - """ - if other is None: - return self - if isinstance(other, type(self)): - return other - return NotImplemented + def __rmatmul__(self, other): + # NCC (other) * operand (self) + return self.__mul__(other) # @staticmethod # @CachedAttribute @@ -3766,10 +3755,8 @@ def __mul__(self, other): return ShellBasis(**args) return NotImplemented - def __matmul__(self, other): - return other.__rmatmul__(self) - def __rmatmul__(self, other): + # NCC (other) * operand (self) if other is None: return self if isinstance(other, ShellRadialBasis): @@ -3987,13 +3974,8 @@ def __mul__(self, other): return self.clone_with(radial_size=radial_size, k=k) return NotImplemented - def __matmul__(self, other): - if other is None: - return self - else: - return other.__rmatmul__(self) - def __rmatmul__(self, other): + # NCC (other) * operand (self) if other is None: return self if isinstance(other, BallRadialBasis): @@ -4472,13 +4454,6 @@ def __mul__(self, other): return self.clone_with(k=k) return NotImplemented - def __matmul__(self, other): - # NCC (self) * operand (other) - if other is None: - return self.__rmatmul__(other) - else: - return other.__rmatmul__(self) - def __rmatmul__(self, other): # NCC (other) * operand (self) if other is None: @@ -4696,13 +4671,6 @@ def __mul__(self, other): return self.clone_with(k=k) return NotImplemented - def __matmul__(self, other): - # NCC (self) * operand (other) - if other is None: - return self.__rmatmul__(other) - else: - return other.__rmatmul__(self) - def __rmatmul__(self, other): # NCC (other) * operand (self) if other is None: diff --git a/dedalus/core/coords.py b/dedalus/core/coords.py index debdb533..b4d78b6b 100644 --- a/dedalus/core/coords.py +++ b/dedalus/core/coords.py @@ -77,8 +77,10 @@ def __str__(self): return self.name def __eq__(self, other): - if self.name == other.name: return True - else: return False + if type(self) is type(other): + if self.name == other.name: + return True + return False def __hash__(self): return id(self) @@ -96,9 +98,7 @@ def backward_vector_intertwiner(self, subaxis, group): class DirectProduct(SeparableIntertwiners, CoordinateSystem): - curvilinear = True - - def __init__(self, *coordsystems): + def __init__(self, *coordsystems, right_handed=None): for cs in coordsystems: if not isinstance(cs, SeparableIntertwiners): raise NotImplementedError("Direct products only implemented for separable intertwiners.") @@ -107,6 +107,27 @@ def __init__(self, *coordsystems): if len(set(self.coords)) < len(self.coords): raise ValueError("Cannot repeat coordinates in DirectProduct.") self.dim = sum(cs.dim for cs in coordsystems) + if self.dim == 3: + if self.curvilinear: + if right_handed is None: + right_handed = False + else: + if right_handed is None: + right_handed = True + self.right_handed = right_handed + + @CachedAttribute + def subaxis_by_cs(self): + subaxis_dict = {} + subaxis = 0 + for cs in self.coordsystems: + subaxis_dict[cs] = subaxis + subaxis += cs.dim + return subaxis_dict + + @CachedAttribute + def curvilinear(self): + return any(cs.curvilinear for cs in self.coordsystems) def forward_vector_intertwiner(self, subaxis, group): factors = [] @@ -145,7 +166,8 @@ def __init__(self, *names, right_handed=True): self.names = names self.dim = len(names) self.coords = tuple(Coordinate(name, cs=self) for name in names) - self.right_handed = right_handed + if self.dim == 3: + self.right_handed = right_handed self.default_nonconst_groups = (1,) * self.dim def __str__(self): diff --git a/dedalus/core/distributor.py b/dedalus/core/distributor.py index 228cc62b..3ba95ed2 100644 --- a/dedalus/core/distributor.py +++ b/dedalus/core/distributor.py @@ -9,7 +9,7 @@ from collections import OrderedDict from math import prod -from .coords import CoordinateSystem +from .coords import CoordinateSystem, DirectProduct from ..tools.array import reshape_vector from ..tools.cache import CachedMethod, CachedAttribute from ..tools.config import config @@ -228,12 +228,25 @@ def TensorField(self, *args, **kw): from .field import TensorField return TensorField(self, *args, **kw) - def IdentityTensor(self, coordsys): + def IdentityTensor(self, coordsys_in, coordsys_out=None, dtype=None): """Identity tensor field.""" + if coordsys_out is None: + coordsys_out = coordsys_in from .field import TensorField - I = TensorField(self, (coordsys, coordsys)) - for i in range(coordsys.dim): - I['g'][i, i] = 1 + I = TensorField(self, (coordsys_out, coordsys_in), dtype=dtype) + if coordsys_in is coordsys_out: + for i in range(coordsys_in.dim): + I['g'][i, i] = 1 + elif isinstance(coordsys_in, DirectProduct) and (coordsys_out in coordsys_in.coordsystems): + i0 = coordsys_in.subaxis_by_cs[coordsys_out] + for i in range(coordsys_out.dim): + I['g'][i, i0+i] = 1 + elif isinstance(coordsys_out, DirectProduct) and (coordsys_in in coordsys_out.coordsystems): + i0 = coordsys_out.subaxis_by_cs[coordsys_in] + for i in range(coordsys_in.dim): + I['g'][i0+i, i] = 1 + else: + raise ValueError("Unsupported coordinate systems.") return I def local_grid(self, basis, scale=None): diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index c64a8be5..f4a98e68 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -27,7 +27,7 @@ from ..tools.exceptions import SymbolicParsingError from ..tools.exceptions import UndefinedParityError from ..tools.exceptions import SkipDispatchException -from ..tools.general import unify, unify_attributes +from ..tools.general import unify, unify_attributes, is_complex_dtype # Public interface __all__ = ['GeneralFunction', @@ -1141,6 +1141,9 @@ def _preprocess_args(cls, operand, coord=None): # Split Cartesian coordinates if isinstance(coord, coords.CartesianCoordinates): coord = coord.coords + # Split DirectProduct coordinates + if isinstance(coord, coords.DirectProduct): + coord = coord.coordsystems # Recurse over multiple coordinates if isinstance(coord, (tuple, list)): if len(coord) > 1: @@ -1830,6 +1833,11 @@ def subproblem_matrix(self, subproblem): return matrix +class DirectProductTrace(Trace): + + cs_type = coords.DirectProduct + + @alias("transpose", "trans") class TransposeComponents(LinearOperator, metaclass=MultiClass): @@ -2059,10 +2067,7 @@ def subproblem_matrix(self, subproblem): factors = [sparse.identity(cs.dim, format='csr') for cs in self.operand.tensorsig] factors.append(sparse.identity(subproblem.coeff_size(self.domain), format='csr')) # Substitute skew matrix - if self.coordsys.right_handed: - skew = np.array([[0, -1,], [1, 0]]) - else: - skew = np.array([[0, 1,], [-1, 0]]) + skew = np.array([[0, -1,], [1, 0]]) factors[self.index] = skew return reduce(sparse.kron, factors, 1).tocsr() @@ -2075,12 +2080,8 @@ def operate(self, out): if arg.data.size: sx = axslice(self.index, 0, 1) sy = axslice(self.index, 1, 2) - out.data[sx] = arg.data[sy] + out.data[sx] = - arg.data[sy] out.data[sy] = arg.data[sx] - if self.coordsys.right_handed: - out.data[sx] *= -1 - else: - out.data[sy] *= -1 class SpinSkew(Skew): @@ -2133,7 +2134,7 @@ def operate(self, out): arg_minus = arg.data[minus] out_plus = out.data[plus] out_minus = out.data[minus] - if np.iscomplexobj(self.dtype()): + if is_complex_dtype(self.dtype): # out = 1j * s * arg np.multiply(arg_plus, 1j, out=out_plus) np.multiply(arg_minus, -1j, out=out_minus) @@ -3694,7 +3695,88 @@ def operate(self, out): """Perform operation.""" # OPTIMIZE: this has an extra copy arg0 = self.args[0] + # Set output layout + out.preset_layout(arg0.layout) + np.copyto(out.data, arg0.data) + + +class DirectProductCurl(Curl): + cs_type = coords.DirectProduct + + def __init__(self, operand, index=0, out=None): + coordsys = operand.tensorsig[index] + if coordsys.dim != 3: + raise ValueError("DirectProductCurl is only implemented for 3D vector fields.") + if len(operand.tensorsig) > 1 or index != 0: + raise ValueError("DirectProductCurl is only implemented for vector fields.") + # Get components + comps = [DirectProductComponent(operand, index=index, comp=cs) for cs in coordsys.coordsystems] + if comps[0].tensorsig[index].dim == 1 and comps[1].tensorsig[index].dim == 2: + az = 0 + uz, uh = comps + cz, ch = coordsys.coordsystems + elif comps[0].dim == 2 and comps[1].dim == 1: + az = 2 + uh, uz = comps + ch, cz = coordsys.coordsystems + else: + raise ValueError("DirectProductCurl is only implemented for direct product of 1D and 2D coordinate systems.") + # Compute curl components + # curl = ex*(dy(uz) - dz(uy)) + ey*(dz(ux) - dx(uz)) + ez*(dx(uy) - dy(ux)) + # = ex*dy(uz) - ex*dz(uy) + ey*dz(ux) - ey*dx(uz) + ez*dx(uy) - ez*dy(ux) + # = dz(ux*ey - uy*ex) + (ex*dy - ey*dx)(uz) + # = dz(skew(uh)) - skew(grad_h(uz)) - ez*div(skew(uh)) + ez1 = operand.dist.VectorField(cz, name='ez', dtype=operand.dtype) + ez1['g'][0] = 1 + ez3 = operand.dist.VectorField(coordsys, name='ez', dtype=operand.dtype) + ez3['g'][az] = 1 + # This requires transposing different coordsystems, which is not yet supported + #curl_h = Differentiate(Skew(uh, index=index), cz) - ez1@TransposeComponents(Skew(Gradient(uz, ch), index=0), indices=(0,index+1)) + #curl_z = - Divergence(TransposeComponents(ez3*Skew(uh, index=index), indices=(0,index+1)), index=0) + curl_h = Differentiate(Skew(uh), cz) - Skew(Gradient(ez1@uz, ch), index=0) + curl_z = - ez3*Divergence(Skew(uh)) + I = operand.dist.IdentityTensor(ch, coordsys, dtype=operand.dtype) + arg = I@curl_h + curl_z + if coordsys.curvilinear == coordsys.right_handed: + # Skew implements the correct thing by default for left-handed curvilinear + # and right-handed Cartesian coordinate systems + arg *= -1 + LinearOperator.__init__(self, arg, out=out) + self.index = index + self.coordsys = coordsys + # LinearOperator requirements + self.operand = operand + # FutureField requirements + self.domain = arg.domain + self.tensorsig = arg.tensorsig + self.dtype = arg.dtype + self.expression_matrices = arg.expression_matrices + + def matrix_dependence(self, *vars): + return self.args[0].matrix_dependence(*vars) + + def matrix_coupling(self, *vars): + return self.args[0].matrix_coupling(*vars) + + def subproblem_matrix(self, subproblem): + """Build operator matrix for a specific subproblem.""" + pass + + def check_conditions(self): + """Check that operands are in a proper layout.""" + # Any layout (addition is done) + return True + + def enforce_conditions(self): + """Require operands to be in a proper layout.""" + # Any layout (addition is done) + pass + + def operate(self, out): + """Perform operation.""" + # OPTIMIZE: this has an extra copy + arg0 = self.args[0] # Set output layout out.preset_layout(arg0.layout) np.copyto(out.data, arg0.data) diff --git a/dedalus/tests/test_cylinder_calculus.py b/dedalus/tests/test_cylinder_calculus.py new file mode 100644 index 00000000..24376a5f --- /dev/null +++ b/dedalus/tests/test_cylinder_calculus.py @@ -0,0 +1,228 @@ +"""Cylinder tests for gradient, divergence, curl, laplacian.""" + +import pytest +import numpy as np +from dedalus.core import coords, distributor, basis, field, operators, arithmetic +from dedalus.tools.cache import CachedFunction + + +length = 1.88 +radius_disk = 1.5 +radii_annulus = (0.5, 3) + +@CachedFunction +def build_periodic_cylinder(Nz, Nphi, Nr, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.DiskBasis(cp, (Nphi, Nr), dtype=dtype, radius=radius_disk, alpha=alpha, k=k, dealias=dealias) + z, phi, r = d.local_grids(bz, bp, scales=dealias) + x, y = cp.cartesian(phi, r) + return c, d, (bz, bp), z, phi, r, x, y + +@CachedFunction +def build_periodic_cylindrical_annulus(Nz, Nphi, Nr, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.AnnulusBasis(cp, (Nphi, Nr), dtype=dtype, radii=radii_annulus, alpha=alpha, k=k, dealias=dealias) + z, phi, r = d.local_grids(bz, bp, scales=dealias) + x, y = cp.cartesian(phi, r) + return c, d, (bz, bp), z, phi, r, x, y + + +Nz_range = [8] +Nphi_range = [16] +Nr_range = [8] +alpha_range = [0] +k_range = [0] +dealias_range = [1, 3/2] +basis_range = [build_periodic_cylinder, build_periodic_cylindrical_annulus] +dtype_range = [np.float64, np.complex128] + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_gradient_scalar(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = field.Field(dist=d, bases=b, dtype=dtype) + f.preset_scales(dealias) + kz = 4 * np.pi / length + f['g'] = 3*x**2 + 2*y + np.sin(kz*z) + u = operators.Gradient(f, c).evaluate() + zero_grid = np.zeros((z.size, phi.size, r.size)) + ex = [0, -np.sin(phi), np.cos(phi)] + ey = [0, np.cos(phi), np.sin(phi)] + ez = [1, 0, 0] + ex = np.array([zero_grid+i for i in ex]) + ey = np.array([zero_grid+i for i in ey]) + ez = np.array([zero_grid+i for i in ez]) + ug = 6*x*ex + 2*ey + kz*np.cos(kz*z)*ez + assert np.allclose(u['g'], ug) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_gradient_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = field.Field(dist=d, bases=b, dtype=dtype) + f.preset_scales(dealias) + kz = 4 * np.pi / length + f['g'] = 3*x**4 + 2*y*x + np.sin(kz*z)*x + grad = lambda A: operators.Gradient(A, c) + T = grad(grad(f)).evaluate() + zero_grid = np.zeros((z.size, phi.size, r.size)) + ex = [0, -np.sin(phi), np.cos(phi)] + ey = [0, np.cos(phi), np.sin(phi)] + ez = [1, 0, 0] + ex = np.array([zero_grid+i for i in ex]) + ey = np.array([zero_grid+i for i in ey]) + ez = np.array([zero_grid+i for i in ez]) + exex = ex[:,None, ...] * ex[None,...] + exey = ex[:,None, ...] * ey[None,...] + exez = ex[:,None, ...] * ez[None,...] + eyex = ey[:,None, ...] * ex[None,...] + eyey = ey[:,None, ...] * ey[None,...] + eyez = ey[:,None, ...] * ez[None,...] + ezex = ez[:,None, ...] * ex[None,...] + ezey = ez[:,None, ...] * ey[None,...] + ezez = ez[:,None, ...] * ez[None,...] + Tg = 36*x**2*exex + 2*(exey + eyex) + kz*np.cos(kz*z)*(exez+ezex) - kz**2*np.sin(kz*z)*x*ezez + assert np.allclose(T['g'], Tg) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_divergence_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = field.Field(dist=d, bases=b, dtype=dtype) + f.preset_scales(dealias) + kz = 4 * np.pi / length + f['g'] = 3*x**4 + 2*y*x + np.sin(kz*z)*x + grad = lambda A: operators.Gradient(A, c) + div = lambda A: operators.Divergence(A) + S = div(grad(f)).evaluate() + Sg = 36*x**2 - kz**2*np.sin(kz*z)*x + assert np.allclose(S['g'], Sg) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_divergence_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + v = field.Field(dist=d, tensorsig=(c,), bases=b, dtype=dtype) + v.preset_scales(dealias) + zero_grid = np.zeros((z.size, phi.size, r.size)) + ex = [0, -np.sin(phi), np.cos(phi)] + ey = [0, np.cos(phi), np.sin(phi)] + ez = [1, 0, 0] + ex = np.array([zero_grid+i for i in ex]) + ey = np.array([zero_grid+i for i in ey]) + ez = np.array([zero_grid+i for i in ez]) + kz = 4 * np.pi / length + v['g'] = 4*x**3*ey + 3*y**2*ey + x*y*np.sin(kz*z)*ez + grad = lambda A: operators.Gradient(A, c) + div = lambda A: operators.Divergence(A) + U = div(grad(v)).evaluate() + Ug = (24*x + 6)*ey - kz**2*x*y*np.sin(kz*z)*ez + assert np.allclose(U['g'], Ug) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_curl_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + v = field.Field(dist=d, tensorsig=(c,), bases=b, dtype=dtype) + v.preset_scales(dealias) + zero_grid = np.zeros((z.size, phi.size, r.size)) + ex = [0, -np.sin(phi), np.cos(phi)] + ey = [0, np.cos(phi), np.sin(phi)] + ez = [1, 0, 0] + ex = np.array([zero_grid+i for i in ex]) + ey = np.array([zero_grid+i for i in ey]) + ez = np.array([zero_grid+i for i in ez]) + kz = 4 * np.pi / length + v['g'] = 4*x**3*ey + 3*y**2*ey + x*y*np.sin(kz*z)*ez + u = operators.Curl(v).evaluate() + u_true = 12*x**2*ez + x*np.sin(kz*z)*ex - y*np.sin(kz*z)*ey + assert np.allclose(u['g'], u_true) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_laplacian_scalar(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = field.Field(dist=d, bases=b, dtype=dtype) + f.preset_scales(dealias) + kz = 4 * np.pi / length + f['g'] = x**4 + 2*y**4 + np.sin(kz*z)*x + h = operators.Laplacian(f, c).evaluate() + hg = 12*x**2 + 24*y**2 - kz**2*np.sin(kz*z)*x + assert np.allclose(h['g'], hg) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_laplacian_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + v = field.Field(dist=d, tensorsig=(c,), bases=b, dtype=dtype) + v.preset_scales(dealias) + zero_grid = np.zeros((z.size, phi.size, r.size)) + ex = [0, -np.sin(phi), np.cos(phi)] + ey = [0, np.cos(phi), np.sin(phi)] + ez = [1, 0, 0] + ex = np.array([zero_grid+i for i in ex]) + ey = np.array([zero_grid+i for i in ey]) + ez = np.array([zero_grid+i for i in ez]) + kz = 4 * np.pi / length + v['g'] = 4*x**3*ey + 3*y**2*ey + np.sin(kz*z)*x*ez + U = operators.Laplacian(v,c).evaluate() + Ug = (24*x + 6)*ey - kz**2*np.sin(kz*z)*x*ez + assert np.allclose(U['g'], Ug) + diff --git a/dedalus/tests/test_cylinder_operators.py b/dedalus/tests/test_cylinder_operators.py new file mode 100644 index 00000000..044fe73f --- /dev/null +++ b/dedalus/tests/test_cylinder_operators.py @@ -0,0 +1,174 @@ +"""Cylinder tests for trace, transpose, integrate, average.""" + +import pytest +import numpy as np +from dedalus.core import coords, distributor, basis, field, operators, arithmetic, problems, solvers +from dedalus.tools.cache import CachedFunction +from dedalus.core.basis import DiskBasis + + +length = 1.88 +radius_disk = 1.5 +radii_annulus = (0.5, 3) + + +@CachedFunction +def build_periodic_cylinder(Nz, Nphi, Nr, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c, dtype=dtype) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.DiskBasis(cp, (Nphi, Nr), dtype=dtype, radius=radius_disk, alpha=alpha, k=k, dealias=dealias) + z, phi, r = d.local_grids(bz, bp, scales=dealias) + x, y = cp.cartesian(phi, r) + return c, d, (bz, bp), z, phi, r, x, y + + +@CachedFunction +def build_periodic_cylindrical_annulus(Nz, Nphi, Nr, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c, dtype=dtype) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.AnnulusBasis(cp, (Nphi, Nr), dtype=dtype, radii=radii_annulus, alpha=alpha, k=k, dealias=dealias) + z, phi, r = d.local_grids(bz, bp, scales=dealias) + x, y = cp.cartesian(phi, r) + return c, d, (bz, bp), z, phi, r, x, y + + +Nz_range = [8] +Nphi_range = [16] +Nr_range = [8] +alpha_range = [0] +k_range = [0] +dealias_range = [1, 3/2] +basis_range = [build_periodic_cylinder, build_periodic_cylindrical_annulus] +dtype_range = [np.float64, np.complex128] + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_explicit_trace_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + T = d.TensorField((c, c), bases=b) + T.fill_random('g') + T.low_pass_filter(scales=0.5) + f_true = T['g'][0,0] + T['g'][1,1] + T['g'][2,2] + f = operators.Trace(T).evaluate() + f.change_scales(1) + assert np.allclose(f['g'], f_true) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +def test_implicit_trace_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + I = d.IdentityTensor(c) + f = d.Field(bases=b) + g = d.Field(bases=b) + g.fill_random('g') + g.low_pass_filter(scales=0.5) + problem = problems.LBVP([f]) + problem.add_equation((operators.Trace(I*f), 3*g)) + solver = solvers.LinearBoundaryValueSolver(problem) + solver.solve() + assert np.allclose(f['c'], g['c']) + + +# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) +# @pytest.mark.parametrize('Nphi', Nphi_range) +# @pytest.mark.parametrize('Nr', Nr_range) +# @pytest.mark.parametrize('k', k_range) +# @pytest.mark.parametrize('dealias', dealias_range) +# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) +# @pytest.mark.parametrize('layout', ['c', 'g']) +# def test_transpose_explicit(basis, Nphi, Nr, k, dealias, dtype, layout): +# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) +# # Random tensor field +# f = d.TensorField((c, c), bases=b) +# f.fill_random(layout='g') +# f.low_pass_filter(scales=0.75) +# # Evaluate transpose +# f.change_layout(layout) +# g = operators.transpose(f).evaluate() +# assert np.allclose(g['g'], np.transpose(f['g'], (1,0,2,3))) + + +# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) +# @pytest.mark.parametrize('Nphi', Nphi_range) +# @pytest.mark.parametrize('Nr', Nr_range) +# @pytest.mark.parametrize('k', k_range) +# @pytest.mark.parametrize('dealias', dealias_range) +# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) +# def test_transpose_implicit(basis, Nphi, Nr, k, dealias, dtype): +# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) +# # Random tensor field +# f = d.TensorField((c, c), bases=b) +# f.fill_random(layout='g') +# f.low_pass_filter(scales=0.75) +# # Transpose LBVP +# u = d.TensorField((c, c), bases=b) +# problem = problems.LBVP([u], namespace=locals()) +# problem.add_equation("trans(u) = trans(f)") +# solver = problem.build_solver() +# solver.solve() +# u.change_scales(dealias) +# f.change_scales(dealias) +# assert np.allclose(u['g'], f['g']) + + +# @pytest.mark.parametrize('Nphi', [16]) +# @pytest.mark.parametrize('Nr', [10]) +# @pytest.mark.parametrize('k', [0, 1, 2, 5]) +# @pytest.mark.parametrize('dealias', dealias_range) +# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) +# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) +# @pytest.mark.parametrize('n', [0, 1, 2]) +# def test_integrate_scalar(Nphi, Nr, k, dealias, dtype, basis, n): +# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) +# f = field.Field(dist=d, bases=(b,), dtype=dtype) +# f.preset_scales(dealias) +# f['g'] = r**(2*n) +# h = operators.Integrate(f, c).evaluate() +# if isinstance(b, DiskBasis): +# r_inner, r_outer = 0, b.radius +# else: +# r_inner, r_outer = b.radii +# hg = 2 * np.pi * (r_outer**(2 + 2*n) - r_inner**(2 + 2*n)) / (2 + 2*n) +# assert np.allclose(h['g'], hg) + + +# @pytest.mark.parametrize('Nphi', [16]) +# @pytest.mark.parametrize('Nr', [10]) +# @pytest.mark.parametrize('k', [0, 1, 2, 5]) +# @pytest.mark.parametrize('dealias', dealias_range) +# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) +# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) +# @pytest.mark.parametrize('n', [0, 1, 2]) +# def test_average_scalar(Nphi, Nr, k, dealias, dtype, basis, n): +# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) +# f = field.Field(dist=d, bases=(b,), dtype=dtype) +# f.preset_scales(dealias) +# f['g'] = r**(2*n) +# h = operators.Integrate(f, c).evaluate() +# if isinstance(b, DiskBasis): +# r_inner, r_outer = 0, b.radius +# else: +# r_inner, r_outer = b.radii +# hg = 2 * np.pi * (r_outer**(2 + 2*n) - r_inner**(2 + 2*n)) / (2 + 2*n) +# assert np.allclose(h['g'], hg) + diff --git a/dedalus/tests/test_jacobi_operators.py b/dedalus/tests/test_jacobi_operators.py index 1f852a60..891d61a4 100644 --- a/dedalus/tests/test_jacobi_operators.py +++ b/dedalus/tests/test_jacobi_operators.py @@ -112,7 +112,7 @@ def test_jacobi_interpolate(N, a, b, k, dealias, dtype): @pytest.mark.parametrize('k', k_range) @pytest.mark.parametrize('dealias', dealias_range) @pytest.mark.parametrize('dtype', dtype_range) -def test_jacobi_intergrate(N, a, b, k, dealias, dtype): +def test_jacobi_integrate(N, a, b, k, dealias, dtype): """Test integration in Jacobi basis.""" c, d, b, x = build_jacobi(N, a, b, k, (0, 3), dealias, dtype) f = d.Field(bases=b) From 337ce6dccbde56f8645fe8c7db8f5e50b86e3621 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Thu, 21 Dec 2023 14:06:25 -0500 Subject: [PATCH 11/19] Finish cylinder operator tests. Add PolarTrace, AverageDisk, IntegrateDisk --- dedalus/core/basis.py | 44 ++++++ dedalus/core/distributor.py | 4 +- dedalus/core/field.py | 1 + dedalus/core/operators.py | 170 ++++----------------- dedalus/tests/test_cylinder_operators.py | 181 ++++++++++++----------- dedalus/tests/test_polar_calculus.py | 3 +- dedalus/tests/test_polar_operators.py | 11 +- 7 files changed, 174 insertions(+), 240 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index 59b64300..ba5e9e30 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -5425,6 +5425,26 @@ def _radial_matrix(basis, m): return matrix +class AverageDisk(operators.Average, IntegrateSpinBasis): + """Average DiskBasis scalar fields.""" + + input_basis_type = DiskBasis + + @staticmethod + @CachedMethod + def _radial_matrix(basis, m): + n_size = basis.n_size(m) + if m == 0: + N = basis.shape[1] + z0, w0 = dedalus_sphere.zernike.quadrature(2, N, k=0) + Qk = dedalus_sphere.zernike.polynomials(2, n_size, basis.alpha+basis.k, abs(m), z0) + matrix = (w0[None, :] @ Qk.T).astype(basis.dtype) + matrix *= 2 # Fourier contribution + else: + matrix= sparse.csr_matrix((0, n_size), dtype=basis.dtype) + return matrix + + class IntegrateAnnulus(operators.Integrate, IntegrateSpinBasis): """Integrate AnnulusBasis scalar fields.""" @@ -5448,6 +5468,30 @@ def _radial_matrix(basis, m): return matrix +class AverageAnnulus(operators.Average, IntegrateSpinBasis): + """Average AnnulusBasis scalar fields.""" + + input_basis_type = AnnulusBasis + + @staticmethod + @CachedMethod + def _radial_matrix(basis, m): + n_size = basis.n_size(m) + if m == 0: + N = 2 * basis.shape[1] # Add some dealiasing to help with large k + z0, w0 = dedalus_sphere.jacobi.quadrature(N, a=0, b=0) + r0 = basis.dR / 2 * (z0 + basis.rho) + Qk = dedalus_sphere.jacobi.polynomials(n_size, basis.alpha[0]+basis.k, basis.alpha[1]+basis.k, z0) + w0_geom = r0 * w0 * (r0 / basis.dR)**(-basis.k) + matrix = (w0_geom[None, :] @ Qk.T).astype(basis.dtype) + matrix *= basis.dR / 2 + matrix *= 2 * np.pi # Fourier contribution + matrix /= np.pi * (basis.radii[1]**2 - basis.radii[0]**2) + else: + matrix= sparse.csr_matrix((0, n_size), dtype=basis.dtype) + return matrix + + class IntegrateSpherical(operators.SphericalEllOperator): """Integrate spherical scalar fields.""" diff --git a/dedalus/core/distributor.py b/dedalus/core/distributor.py index 3ba95ed2..52aa964e 100644 --- a/dedalus/core/distributor.py +++ b/dedalus/core/distributor.py @@ -228,12 +228,12 @@ def TensorField(self, *args, **kw): from .field import TensorField return TensorField(self, *args, **kw) - def IdentityTensor(self, coordsys_in, coordsys_out=None, dtype=None): + def IdentityTensor(self, coordsys_in, coordsys_out=None, bases=None, dtype=None): """Identity tensor field.""" if coordsys_out is None: coordsys_out = coordsys_in from .field import TensorField - I = TensorField(self, (coordsys_out, coordsys_in), dtype=dtype) + I = TensorField(self, (coordsys_out, coordsys_in), bases=bases, dtype=dtype) if coordsys_in is coordsys_out: for i in range(coordsys_in.dim): I['g'][i, i] = 1 diff --git a/dedalus/core/field.py b/dedalus/core/field.py index 4ba1535d..6cd5a8f7 100644 --- a/dedalus/core/field.py +++ b/dedalus/core/field.py @@ -303,6 +303,7 @@ def T(self): def H(self): from .operators import TransposeComponents return TransposeComponents(np.conj(self)) + @CachedAttribute def is_complex(self): from ..tools.general import is_complex_dtype diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index f4a98e68..63dc98d6 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -1214,6 +1214,9 @@ def _preprocess_args(cls, operand, coord=None): # Split Cartesian coordinates if isinstance(coord, coords.CartesianCoordinates): coord = coord.coords + # Split DirectProduct coordinates + if isinstance(coord, coords.DirectProduct): + coord = coord.coordsystems # Recurse over multiple coordinates if isinstance(coord, (tuple, list)): if len(coord) > 1: @@ -1796,7 +1799,7 @@ def subproblem_matrix(self, subproblem): return matrix -class CylindricalTrace(Trace): +class PolarTrace(Trace): cs_type = coords.PolarCoordinates @@ -1810,19 +1813,18 @@ def subproblem_matrix(self, subproblem): # [ 1, 2] trace_spin = np.zeros(4) trace_spin[[1, 2]] = 1 - trace = sparse.kron(trace_spin, sparse.eye(2**len(self.tensorsig))) - # Assume all components have the same n_size - eye = sparse.identity(self.input_basis.n_size(m), self.dtype, format='csr') - matrix = sparse.kron(trace, eye) - # Block-diag for sin/cos parts for real dtype - if self.dtype == np.float64: - matrix = sparse.kron(matrix, sparse.identity(2, format='csr')).tocsr() + # Kronecker up identity for remaining tensor components + n_eye = prod(cs.dim for cs in self.tensorsig) + # Kronecker up identity for coeff size + n_eye *= subproblem.coeff_size(self.domain) + eye = sparse.identity(n_eye, self.dtype, format='csr') + matrix = sparse.kron(trace_spin, eye) return matrix class CartesianTrace(Trace): - cs_type = coords.CartesianCoordinates + cs_type = (coords.CartesianCoordinates, coords.Coordinate) def subproblem_matrix(self, subproblem): dim = self.coordsys.dim @@ -1837,6 +1839,11 @@ class DirectProductTrace(Trace): cs_type = coords.DirectProduct + def subproblem_matrix(self, subproblem): + comps = [DirectProductComponent(DirectProductComponent(self.operand, index=0, comp=cs), index=1, comp=cs) for cs in self.coordsys.coordsystems] + fulltrace = sum(Trace(comp) for comp in comps) + return fulltrace.expression_matrices(subproblem, [self.operand])[self.operand] + @alias("transpose", "trans") class TransposeComponents(LinearOperator, metaclass=MultiClass): @@ -1926,7 +1933,8 @@ class StandardTransposeComponents(TransposeComponents): cs_type = (coords.CartesianCoordinates, coords.PolarCoordinates, - coords.S2Coordinates) + coords.S2Coordinates, + coords.DirectProduct) def subproblem_matrix(self, subproblem): """Build operator matrix for a specific subproblem.""" @@ -3736,8 +3744,13 @@ def __init__(self, operand, index=0, out=None): #curl_z = - Divergence(TransposeComponents(ez3*Skew(uh, index=index), indices=(0,index+1)), index=0) curl_h = Differentiate(Skew(uh), cz) - Skew(Gradient(ez1@uz, ch), index=0) curl_z = - ez3*Divergence(Skew(uh)) - I = operand.dist.IdentityTensor(ch, coordsys, dtype=operand.dtype) - arg = I@curl_h + curl_z + # Hack to get multiplication by identity working for matrix constuction + if isinstance(ch, coords.PolarCoordinates): + bases = operand.domain.get_basis(ch).radial_basis + else: + bases = None + I = operand.dist.IdentityTensor(ch, coordsys, bases=bases, dtype=operand.dtype) + arg = I @ curl_h + curl_z if coordsys.curvilinear == coordsys.right_handed: # Skew implements the correct thing by default for left-handed curvilinear # and right-handed Cartesian coordinate systems @@ -3925,139 +3938,6 @@ def operate(self, out): comp_out[tuple(slices)][msin_slice] += vec_out_complex.imag -class PolarCurl(Curl, PolarMOperator): - - cs_type = coords.PolarCoordinates - - def __init__(self, operand, index=0, out=None): - Curl.__init__(self, operand, out=out) - if index != 0: - raise ValueError("Curl only implemented along index 0.") - self.index = index - coordsys = operand.tensorsig[index] - PolarMOperator.__init__(self, operand, coordsys) - # FutureField requirements - self.domain = operand.domain.substitute_basis(self.input_basis, self.output_basis) - self.tensorsig = operand.tensorsig[:index] + operand.tensorsig[index+1:] - self.dtype = operand.dtype - - @staticmethod - def _output_basis(input_basis): - return input_basis.derivative_basis(1) - - def check_conditions(self): - """Check that operands are in a proper layout.""" - # Require radius to be in coefficient space - layout = self.args[0].layout - return (not layout.grid_space[self.radius_axis]) and (layout.local[self.radius_axis]) - - def enforce_conditions(self): - """Require operands to be in a proper layout.""" - # Require radius to be in coefficient space - self.args[0].require_coeff_space(self.radius_axis) - self.args[0].require_local(self.radius_axis) - - def spinindex_out(self, spinindex_in): - # Spinorder: -, +, 0 - # - and + map to 0 - if spinindex_in[0] in (0, 1): - return (spinindex_in[1:],) - - @CachedMethod - def radial_matrix(self, spinindex_in, spinindex_out, m): - radial_basis = self.input_basis - spintotal_in = radial_basis.spintotal(self.operand.tensorsig, spinindex_in) - if spinindex_in[1:] == spinindex_out: - return self._radial_matrix(radial_basis, spinindex_in[0], spintotal_in, m) - else: - raise ValueError("This should never happen") - - @staticmethod - @CachedMethod - def _radial_matrix(radial_basis, spinindex_in0, spintotal_in, m): - # NB: the sign here is different than Vasil et al. (2019) eqn 84 - # because det(Q) = -1 - if spinindex_in0 == 0: - return 1j * 1/np.sqrt(2) * radial_basis.operator_matrix('D+', m, spintotal_in) - elif spinindex_in0 == 1: - return -1j * 1/np.sqrt(2) * radial_basis.operator_matrix('D-', m, spintotal_in) - else: - raise ValueError("This should never happen") - - def subproblem_matrix(self, subproblem): - raise NotImplementedError("subproblem_matrix is not implemented.") - if self.dtype == np.complex128: - return super().subproblem_matrix(subproblem) - elif self.dtype == np.float64: - operand = self.args[0] - radial_basis = self.radial_basis - R_in = radial_basis.regularity_classes(operand.tensorsig) - R_out = radial_basis.regularity_classes(self.tensorsig) # Should this use output_basis? - ell = subproblem.group[self.last_axis - 1] - # Loop over components - submatrices = [] - for spinindex_out, spintotal_out in np.ndenumerate(R_out): - submatrix_row = [] - for spinindex_in, spintotal_in in np.ndenumerate(R_in): - # Build identity matrices for each axis - subshape_in = subproblem.coeff_shape(self.operand.domain) - subshape_out = subproblem.coeff_shape(self.domain) - # Check if regularity component exists for this ell - if (spinindex_out in self.spinindex_out(spinindex_in)) and radial_basis.regularity_allowed(ell, spinindex_in) and radial_basis.regularity_allowed(ell, spinindex_out): - factors = [sparse.eye(m, n, format='csr') for m, n in zip(subshape_out, subshape_in)] - radial_matrix = self.radial_matrix(spinindex_in, spinindex_out, ell) - # Real part - factors[self.last_axis] = radial_matrix.real - comp_matrix_real = reduce(sparse.kron, factors, 1).tocsr() - # Imaginary pary - m_size = subshape_in[self.first_axis] - mult_1j = np.array([[0, -1], [1, 0]]) - m_blocks = sparse.eye(m_size//2, m_size//2, format='csr') - factors[self.first_axis] = sparse.kron(mult_1j, m_blocks) - factors[self.last_axis] = radial_matrix.imag - comp_matrix_imag = reduce(sparse.kron, factors, 1).tocsr() - comp_matrix = comp_matrix_real + comp_matrix_imag - else: - # Build zero matrix - comp_matrix = sparse.csr_matrix((prod(subshape_out), prod(subshape_in))) - submatrix_row.append(comp_matrix) - submatrices.append(submatrix_row) - matrix = sparse.bmat(submatrices) - matrix.tocsr() - return matrix - - def operate(self, out): - """Perform operation.""" - if self.dtype == np.complex128: - return super().operate(out) - operand = self.args[0] - input_basis = self.input_basis - axis = self.radius_axis - # Set output layout - out.preset_layout(operand.layout) - out.data[:] = 0 - # Apply operator - S_in = input_basis.spin_weights(operand.tensorsig) - slices = [slice(None) for i in range(self.dist.dim)] - for spinindex_in, spintotal_in in np.ndenumerate(S_in): - for spinindex_out in self.spinindex_out(spinindex_in): - comp_in = operand.data[spinindex_in] - comp_out = out.data[spinindex_out] - - for m, mg_slice, mc_slice, n_slice in input_basis.m_maps(self.dist): - slices[axis-1] = mc_slice - slices[axis] = n_slice - cos_slice = axslice(axis-1, 0, None, 2) - msin_slice = axslice(axis-1, 1, None, 2) - vec_in_cos = comp_in[tuple(slices)][cos_slice] - vec_in_msin = comp_in[tuple(slices)][msin_slice] - vec_in_complex = vec_in_cos + 1j*vec_in_msin - A = self.radial_matrix(spinindex_in, spinindex_out, m) - vec_out_complex = apply_matrix(A, vec_in_complex, axis=axis) - comp_out[tuple(slices)][cos_slice] += vec_out_complex.real - comp_out[tuple(slices)][msin_slice] += vec_out_complex.imag - - @alias("lap") class Laplacian(LinearOperator, metaclass=MultiClass): diff --git a/dedalus/tests/test_cylinder_operators.py b/dedalus/tests/test_cylinder_operators.py index 044fe73f..8663012d 100644 --- a/dedalus/tests/test_cylinder_operators.py +++ b/dedalus/tests/test_cylinder_operators.py @@ -39,9 +39,9 @@ def build_periodic_cylindrical_annulus(Nz, Nphi, Nr, alpha, k, dealias, dtype): Nz_range = [8] -Nphi_range = [16] +Nphi_range = [8] Nr_range = [8] -alpha_range = [0] +alpha_range = [0, 1] k_range = [0] dealias_range = [1, 3/2] basis_range = [build_periodic_cylinder, build_periodic_cylindrical_annulus] @@ -77,98 +77,107 @@ def test_explicit_trace_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): @pytest.mark.parametrize('basis', basis_range) def test_implicit_trace_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis): c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) - I = d.IdentityTensor(c) f = d.Field(bases=b) g = d.Field(bases=b) g.fill_random('g') g.low_pass_filter(scales=0.5) + I = d.IdentityTensor(c, bases=b[1].radial_basis) problem = problems.LBVP([f]) problem.add_equation((operators.Trace(I*f), 3*g)) - solver = solvers.LinearBoundaryValueSolver(problem) + solver = solvers.LinearBoundaryValueSolver(problem, matrix_coupling=[False, False, True]) solver.solve() assert np.allclose(f['c'], g['c']) -# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) -# @pytest.mark.parametrize('Nphi', Nphi_range) -# @pytest.mark.parametrize('Nr', Nr_range) -# @pytest.mark.parametrize('k', k_range) -# @pytest.mark.parametrize('dealias', dealias_range) -# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) -# @pytest.mark.parametrize('layout', ['c', 'g']) -# def test_transpose_explicit(basis, Nphi, Nr, k, dealias, dtype, layout): -# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) -# # Random tensor field -# f = d.TensorField((c, c), bases=b) -# f.fill_random(layout='g') -# f.low_pass_filter(scales=0.75) -# # Evaluate transpose -# f.change_layout(layout) -# g = operators.transpose(f).evaluate() -# assert np.allclose(g['g'], np.transpose(f['g'], (1,0,2,3))) - - -# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) -# @pytest.mark.parametrize('Nphi', Nphi_range) -# @pytest.mark.parametrize('Nr', Nr_range) -# @pytest.mark.parametrize('k', k_range) -# @pytest.mark.parametrize('dealias', dealias_range) -# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) -# def test_transpose_implicit(basis, Nphi, Nr, k, dealias, dtype): -# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) -# # Random tensor field -# f = d.TensorField((c, c), bases=b) -# f.fill_random(layout='g') -# f.low_pass_filter(scales=0.75) -# # Transpose LBVP -# u = d.TensorField((c, c), bases=b) -# problem = problems.LBVP([u], namespace=locals()) -# problem.add_equation("trans(u) = trans(f)") -# solver = problem.build_solver() -# solver.solve() -# u.change_scales(dealias) -# f.change_scales(dealias) -# assert np.allclose(u['g'], f['g']) - - -# @pytest.mark.parametrize('Nphi', [16]) -# @pytest.mark.parametrize('Nr', [10]) -# @pytest.mark.parametrize('k', [0, 1, 2, 5]) -# @pytest.mark.parametrize('dealias', dealias_range) -# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) -# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) -# @pytest.mark.parametrize('n', [0, 1, 2]) -# def test_integrate_scalar(Nphi, Nr, k, dealias, dtype, basis, n): -# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) -# f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(dealias) -# f['g'] = r**(2*n) -# h = operators.Integrate(f, c).evaluate() -# if isinstance(b, DiskBasis): -# r_inner, r_outer = 0, b.radius -# else: -# r_inner, r_outer = b.radii -# hg = 2 * np.pi * (r_outer**(2 + 2*n) - r_inner**(2 + 2*n)) / (2 + 2*n) -# assert np.allclose(h['g'], hg) - - -# @pytest.mark.parametrize('Nphi', [16]) -# @pytest.mark.parametrize('Nr', [10]) -# @pytest.mark.parametrize('k', [0, 1, 2, 5]) -# @pytest.mark.parametrize('dealias', dealias_range) -# @pytest.mark.parametrize('basis', [build_disk, build_annulus]) -# @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) -# @pytest.mark.parametrize('n', [0, 1, 2]) -# def test_average_scalar(Nphi, Nr, k, dealias, dtype, basis, n): -# c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) -# f = field.Field(dist=d, bases=(b,), dtype=dtype) -# f.preset_scales(dealias) -# f['g'] = r**(2*n) -# h = operators.Integrate(f, c).evaluate() -# if isinstance(b, DiskBasis): -# r_inner, r_outer = 0, b.radius -# else: -# r_inner, r_outer = b.radii -# hg = 2 * np.pi * (r_outer**(2 + 2*n) - r_inner**(2 + 2*n)) / (2 + 2*n) -# assert np.allclose(h['g'], hg) +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('layout', ['c', 'g']) +def test_transpose_explicit(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, layout): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + # Random tensor field + f = d.TensorField((c, c), bases=b) + f.fill_random(layout='g') + f.low_pass_filter(scales=0.75) + # Evaluate transpose + f.change_layout(layout) + g = operators.transpose(f).evaluate() + assert np.allclose(g['g'], np.transpose(f['g'], (1,0,2,3,4))) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('layout', ['c', 'g']) +def test_transpose_implicit(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, layout): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + # Random tensor field + f = d.TensorField((c, c), bases=b) + f.fill_random(layout='g') + f.low_pass_filter(scales=0.75) + # Transpose LBVP + u = d.TensorField((c, c), bases=b) + problem = problems.LBVP([u], namespace=locals()) + problem.add_equation("trans(u) = trans(f)") + solver = problem.build_solver() + solver.solve() + u.change_scales(dealias) + f.change_scales(dealias) + assert np.allclose(u['g'], f['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('n', [0, 1, 2]) +def test_integrate_scalar(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, n): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = field.Field(dist=d, bases=b, dtype=dtype) + f.preset_scales(dealias) + f['g'] = r**(2*n) + np.sin(4*np.pi*z/length) + h = operators.Integrate(f, c).evaluate() + if isinstance(b[1], DiskBasis): + r_inner, r_outer = 0, b[1].radius + else: + r_inner, r_outer = b[1].radii + hg = 2 * np.pi * length * (r_outer**(2 + 2*n) - r_inner**(2 + 2*n)) / (2 + 2*n) + assert np.allclose(h['g'], hg) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('n', [0, 1, 2]) +def test_average_scalar(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, n): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = field.Field(dist=d, bases=b, dtype=dtype) + f.preset_scales(dealias) + f['g'] = r**(2*n) + np.sin(4*np.pi*z/length) + h = operators.Average(f, c).evaluate() + if isinstance(b[1], DiskBasis): + r_inner, r_outer = 0, b[1].radius + else: + r_inner, r_outer = b[1].radii + hg = 2 * (r_outer**(2 + 2*n) - r_inner**(2 + 2*n)) / (2 + 2*n) / (r_outer**2 - r_inner**2) + assert np.allclose(h['g'], hg) diff --git a/dedalus/tests/test_polar_calculus.py b/dedalus/tests/test_polar_calculus.py index da7f430c..518d24ba 100644 --- a/dedalus/tests/test_polar_calculus.py +++ b/dedalus/tests/test_polar_calculus.py @@ -172,7 +172,8 @@ def test_curl_vector(Nphi, Nr, dealias, basis, dtype): ex = np.array([-np.sin(phi)+0.*r,np.cos(phi)+0.*r]) ey = np.array([np.cos(phi)+0.*r,np.sin(phi)+0.*r]) v['g'] = 4*x**3*ey + 3*y**2*ey - u = operators.Curl(v).evaluate() + curl = lambda A: - operators.Divergence(operators.Skew(A)) + u = curl(v).evaluate() ug = 12*x**2 assert np.allclose(u['g'], ug) diff --git a/dedalus/tests/test_polar_operators.py b/dedalus/tests/test_polar_operators.py index d74243b6..7ac3217c 100644 --- a/dedalus/tests/test_polar_operators.py +++ b/dedalus/tests/test_polar_operators.py @@ -227,13 +227,11 @@ def test_implicit_trace_tensor(Nphi, Nr, k, dealias, basis, dtype): c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) g = field.Field(dist=d, bases=(b,), dtype=dtype) - g.preset_scales(g.domain.dealias) - g['g'] = 3*x**2 + 2*y - I = field.Field(dist=d, bases=(b.clone_with(shape=(1,Nr), k=0),), tensorsig=(c,c), dtype=dtype) - I['g'][0,0] = I['g'][1,1] = 1 - trace = lambda A: operators.Trace(A) + g.fill_random('g') + g.low_pass_filter(scales=0.5) + I = d.IdentityTensor(c, bases=b.radial_basis) problem = problems.LBVP([f]) - problem.add_equation((trace(I*f), 2*g)) + problem.add_equation((operators.Trace(I*f), 2*g)) solver = solvers.LinearBoundaryValueSolver(problem, matrix_coupling=[False, True]) solver.solve() assert np.allclose(f['c'], g['c']) @@ -305,6 +303,7 @@ def test_azimuthal_average_scalar(Nphi, Nr, k, dealias, dtype, basis): @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) @pytest.mark.parametrize('n', [0, 1, 2]) def test_integrate_scalar(Nphi, Nr, k, dealias, dtype, basis, n): + # Need to test if this fails for alpha != 0? c, d, b, phi, r, x, y = basis(Nphi, Nr, k, dealias, dtype) f = field.Field(dist=d, bases=(b,), dtype=dtype) f.preset_scales(dealias) From fe802b4dd1ca9c37f413eac86f163c237b9a69bf Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Thu, 21 Dec 2023 14:16:13 -0500 Subject: [PATCH 12/19] Include NCC tests in standard test suite, except spherical ones, which need dealiasing updates --- dedalus/tests/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dedalus/tests/__init__.py b/dedalus/tests/__init__.py index 71b1827d..3430177b 100644 --- a/dedalus/tests/__init__.py +++ b/dedalus/tests/__init__.py @@ -11,7 +11,7 @@ def base_cmd(): workers = os.getenv("PYTEST_WORKERS", "auto") - return ["-k", "not ncc", f"--workers={workers}"] + return ["--ignore=test_spherical_ncc.py", f"--workers={workers}"] def test(report=False): """Run tests.""" From 433bd979186337797aa9fbfbcbc011a2f91df0c1 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Thu, 21 Dec 2023 14:35:14 -0500 Subject: [PATCH 13/19] Remove some dead files --- dedalus/core/evaluator.py.master | 658 ------------------------------- dedalus/core/spaces.py | 275 ------------- dedalus/core/vectorspaces.py | 38 -- 3 files changed, 971 deletions(-) delete mode 100644 dedalus/core/evaluator.py.master delete mode 100644 dedalus/core/spaces.py delete mode 100644 dedalus/core/vectorspaces.py diff --git a/dedalus/core/evaluator.py.master b/dedalus/core/evaluator.py.master deleted file mode 100644 index 828cb877..00000000 --- a/dedalus/core/evaluator.py.master +++ /dev/null @@ -1,658 +0,0 @@ -""" -Class for centralized evaluation of expression trees. - -""" - -import os -import re -from collections import defaultdict -import pathlib -import h5py -import shutil -import uuid -import numpy as np -from mpi4py import MPI -from math import prod - -from .system import FieldSystem -from .operators import FieldCopy -from .future import Future, FutureField -from .field import Field -from ..tools.array import reshape_vector -from ..tools.general import OrderedSet -from ..tools.general import oscillate -from ..tools.parallel import Sync - -from ..tools.config import config -FILEHANDLER_MODE_DEFAULT = config['analysis'].get('FILEHANDLER_MODE_DEFAULT') -FILEHANDLER_PARALLEL_DEFAULT = config['analysis'].getboolean('FILEHANDLER_PARALLEL_DEFAULT') -FILEHANDLER_TOUCH_TMPFILE = config['analysis'].getboolean('FILEHANDLER_TOUCH_TMPFILE') - -import logging -logger = logging.getLogger(__name__.split('.')[-1]) - - -class Evaluator: - """ - Coordinates evaluation of operator trees through various handlers. - - Parameters - ---------- - domain : domain object - Problem domain - vars : dict - Variables for parsing task expression strings - - """ - - def __init__(self, domain, vars): - - self.domain = domain - self.vars = vars - self.handlers = [] - self.groups = defaultdict(list) - - def add_dictionary_handler(self, **kw): - """Create a dictionary handler and add to evaluator.""" - - DH = DictionaryHandler(self.domain, self.vars, **kw) - return self.add_handler(DH) - - def add_system_handler(self, **kw): - """Create a system handler and add to evaluator.""" - - SH = SystemHandler(self.domain, self.vars, **kw) - return self.add_handler(SH) - - def add_file_handler(self, filename, **kw): - """Create a file handler and add to evaluator.""" - - FH = FileHandler(filename, self.domain, self.vars, **kw) - return self.add_handler(FH) - - def add_handler(self, handler): - """Add a handler to evaluator.""" - - self.handlers.append(handler) - # Register with group - if handler.group is not None: - self.groups[handler.group].append(handler) - return handler - - def evaluate_group(self, group, **kw): - """Evaluate all handlers in a group.""" - handlers = self.groups[group] - self.evaluate_handlers(handlers, **kw) - - def evaluate_scheduled(self, wall_time, sim_time, iteration, **kw): - """Evaluate all scheduled handlers.""" - - scheduled_handlers = [] - for handler in self.handlers: - # Get cadence devisors - wall_div = wall_time // handler.wall_dt - sim_div = sim_time // handler.sim_dt - iter_div = iteration // handler.iter - # Compare to divisor at last evaluation - wall_up = (wall_div > handler.last_wall_div) - sim_up = (sim_div > handler.last_sim_div) - iter_up = (iter_div > handler.last_iter_div) - - if any((wall_up, sim_up, iter_up)): - scheduled_handlers.append(handler) - # Update all divisors - handler.last_wall_div = wall_div - handler.last_sim_div = sim_div - handler.last_iter_div = iter_div - - self.evaluate_handlers(scheduled_handlers, wall_time=wall_time, sim_time=sim_time, iteration=iteration, **kw) - - def evaluate_handlers(self, handlers, id=None, **kw): - """Evaluate a collection of handlers.""" - - # Default to uuid to cache within evaluation, but not across evaluations - if id is None: - id = uuid.uuid4() - - tasks = [t for h in handlers for t in h.tasks] - for task in tasks: - task['out'] = None - - # Attempt initial evaluation - tasks = self.attempt_tasks(tasks, id=id) - - # Move all fields to coefficient layout - fields = self.get_fields(tasks) - self.require_coeff_space(fields) - tasks = self.attempt_tasks(tasks, id=id) - - # Oscillate through layouts until all tasks are evaluated - n_layouts = len(self.domain.dist.layouts) - oscillate_indices = oscillate(range(n_layouts)) - current_index = next(oscillate_indices) - while tasks: - next_index = next(oscillate_indices) - # Transform fields - fields = self.get_fields(tasks) - if current_index < next_index: - self.domain.dist.paths[current_index].increment(fields) - else: - self.domain.dist.paths[next_index].decrement(fields) - current_index = next_index - # Attempt evaluation - tasks = self.attempt_tasks(tasks, id=id) - - # Transform all outputs to coefficient layout to dealias - outputs = OrderedSet([t['out'] for h in handlers for t in h.tasks]) - self.require_coeff_space(outputs) - - # Copy redundant outputs so processing is independent - outputs = set() - for handler in handlers: - for task in handler.tasks: - if task['out'] in outputs: - task['out'] = task['out'].copy() - else: - outputs.add(task['out']) - - # Process - for handler in handlers: - handler.process(**kw) - - def require_coeff_space(self, fields): - """Move all fields to coefficient layout.""" - # Build dictionary of starting layout indices - layouts = defaultdict(list, {0:[]}) - for f in fields: - layouts[f.layout.index].append(f) - # Decrement all fields down to layout 0 - max_index = max(layouts.keys()) - current_fields = [] - for index in range(max_index, 0, -1): - current_fields.extend(layouts[index]) - self.domain.dist.paths[index-1].decrement(current_fields) - - @staticmethod - def get_fields(tasks): - """Get field set for a collection of tasks.""" - fields = OrderedSet() - for task in tasks: - fields.update(task['operator'].atoms(Field)) - return fields - - @staticmethod - def attempt_tasks(tasks, **kw): - """Attempt tasks and return the unfinished ones.""" - unfinished = [] - for task in tasks: - output = task['operator'].attempt(**kw) - if output is None: - unfinished.append(task) - else: - task['out'] = output - return unfinished - - -class Handler: - """ - Group of tasks with associated scheduling data. - - Parameters - ---------- - domain : domain object - Problem domain - vars : dict - Variables for parsing task expression strings - group : str, optional - Group name for forcing selected handelrs (default: None) - wall_dt : float, optional - Wall time cadence for evaluating tasks (default: infinite) - sim_dt : float, optional - Simulation time cadence for evaluating tasks (default: infinite) - iter : int, optional - Iteration cadence for evaluating tasks (default: infinite) - - """ - - def __init__(self, domain, vars, group=None, wall_dt=np.inf, sim_dt=np.inf, iter=np.inf): - - # Attributes - self.domain = domain - self.vars = vars - self.group = group - self.wall_dt = wall_dt - self.sim_dt = sim_dt - self.iter = iter - - self.tasks = [] - # Set initial divisors to be scheduled for sim_time, iteration = 0 - self.last_wall_div = -1 - self.last_sim_div = -1 - self.last_iter_div = -1 - - def add_task(self, task, layout='g', name=None, scales=None): - """Add task to handler.""" - - # Default name - if name is None: - name = str(task) - - # Create futurefield operator - if isinstance(task, str): - op = FutureField.parse(task, self.vars, self.domain) - else: - op = FutureField.cast(task, self.domain) - - # Build task dictionary - task = dict() - task['operator'] = op - task['layout'] = self.domain.distributor.get_layout_object(layout) - task['name'] = name - task['scales'] = self.domain.remedy_scales(scales) - - self.tasks.append(task) - - def add_tasks(self, tasks, **kw): - """Add multiple tasks.""" - - name = kw.pop('name', '') - for task in tasks: - tname = name + str(task) - self.add_task(task, name=tname, **kw) - - def add_system(self, system, **kw): - """Add fields from a FieldSystem.""" - - self.add_tasks(system.fields, **kw) - - -class DictionaryHandler(Handler): - """Handler that stores outputs in a dictionary.""" - - def __init__(self, *args, **kw): - Handler.__init__(self, *args, **kw) - self.fields = dict() - - def __getitem__(self, item): - return self.fields[item] - - def process(self, **kw): - """Reference fields from dictionary.""" - for task in self.tasks: - task['out'].set_scales(task['scales'], keep_data=True) - task['out'].require_layout(task['layout']) - self.fields[task['name']] = task['out'] - - -class SystemHandler(Handler): - """Handler that sets fields in a FieldSystem.""" - - def build_system(self): - """Build FieldSystem and set task outputs.""" - - nfields = len(self.tasks) - names = ['sys'+str(i) for i in range(nfields)] - fields = [self.domain.new_field(name=name) for name in names] - self.system = FieldSystem(fields) - - for i, task in enumerate(self.tasks): - task['operator'].out = self.system.fields[i] - - return self.system - - def process(self, **kw): - """Gather fields into system.""" - self.system.gather() - - -class FileHandler(Handler): - """ - Handler that writes tasks to an HDF5 file. - - Parameters - ---------- - base_path : str - Base path for analyis output folder - max_writes : int, optional - Maximum number of writes per set (default: infinite) - max_size : int, optional - Maximum file size to write to, in bytes (default: 2**30 = 1 GB). - (Note: files may be larger after final write.) - parallel : bool, optional - Perform parallel writes from each process to single file (True), or - separately write to individual process files (False). - Default behavior set by config option. - mode : str, optional - 'overwrite' to delete any present analysis output with the same base path. - 'append' to begin with set number incremented past any present analysis output. - Default behavior set by config option. - - """ - - def __init__(self, base_path, *args, max_writes=np.inf, max_size=2**30, parallel=None, mode=None, **kw): - - Handler.__init__(self, *args, **kw) - - # Resolve defaults from config - if parallel is None: - parallel = FILEHANDLER_PARALLEL_DEFAULT - if mode is None: - mode = FILEHANDLER_MODE_DEFAULT - - # Check base_path - base_path = pathlib.Path(base_path).resolve() - if any(base_path.suffixes): - raise ValueError("base_path should indicate a folder for storing HDF5 files.") - - # Attributes - self.base_path = base_path - self.max_writes = max_writes - self.max_size = max_size - self.parallel = parallel - self._sl_array = np.zeros(1, dtype=int) - - # Resolve mode - mode = mode.lower() - if mode not in ['overwrite', 'append']: - raise ValueError("Write mode {} not defined.".format(mode)) - - comm = self.domain.dist.comm_cart - if comm.rank == 0: - set_pattern = '%s_s*' % (self.base_path.stem) - sets = list(self.base_path.glob(set_pattern)) - if mode == "overwrite": - for set in sets: - if set.is_dir(): - shutil.rmtree(str(set)) - else: - set.unlink() - set_num = 1 - total_write_num = 0 - elif mode == "append": - set_nums = [] - if sets: - for set in sets: - m = re.match("{}_s(\d+)$".format(base_path.stem), set.stem) - if m: - set_nums.append(int(m.groups()[0])) - max_set = max(set_nums) - joined_file = base_path.joinpath("{}_s{}.h5".format(base_path.stem,max_set)) - p0_file = base_path.joinpath("{0}_s{1}/{0}_s{1}_p0.h5".format(base_path.stem,max_set)) - if os.path.exists(str(joined_file)): - with h5py.File(str(joined_file),'r') as testfile: - last_write_num = testfile['/scales/write_number'][-1] - elif os.path.exists(str(p0_file)): - with h5py.File(str(p0_file),'r') as testfile: - last_write_num = testfile['/scales/write_number'][-1] - else: - last_write_num = 0 - logger.warning("Cannot determine write num from files. Restarting count.") - else: - max_set = 0 - last_write_num = 0 - set_num = max_set + 1 - total_write_num = last_write_num - else: - set_num = None - total_write_num = None - # Communicate set and write numbers - self.set_num = comm.bcast(set_num, root=0) - self.total_write_num = comm.bcast(total_write_num, root=0) - - # Create output folder - with Sync(comm): - if comm.rank == 0: - base_path.mkdir(exist_ok=True) - - if parallel: - # Set HDF5 property list for collective writing - self._property_list = h5py.h5p.create(h5py.h5p.DATASET_XFER) - self._property_list.set_dxpl_mpio(h5py.h5fd.MPIO_COLLECTIVE) - - def check_file_limits(self): - """Check if write or size limits have been reached.""" - write_limit = (self.file_write_num >= self.max_writes) - size_limit = (self.current_path.stat().st_size >= self.max_size) - if not self.parallel: - # reduce(size_limit, or) across processes - comm = self.domain.distributor.comm_cart - self._sl_array[0] = size_limit - comm.Allreduce(MPI.IN_PLACE, self._sl_array, op=MPI.LOR) - size_limit = self._sl_array[0] - return (write_limit or size_limit) - - def get_file(self): - """Return current HDF5 file, creating if necessary.""" - # Create new file if necessary - if os.path.exists(str(self.current_path)): - if self.check_file_limits(): - self.set_num += 1 - self.create_current_file() - else: - self.create_current_file() - # Open current file - if self.parallel: - comm = self.domain.distributor.comm_cart - h5file = h5py.File(str(self.current_path), 'r+', driver='mpio', comm=comm) - else: - h5file = h5py.File(str(self.current_path), 'r+') - self.file_write_num = h5file['/scales/write_number'].shape[0] - return h5file - - @property - def current_path(self): - domain = self.domain - comm = domain.distributor.comm_cart - set_num = self.set_num - if self.parallel: - # Save in base directory - file_name = '%s_s%i.hdf5' %(self.base_path.stem, set_num) - return self.base_path.joinpath(file_name) - else: - # Save in folders for each filenum in base directory - folder_name = '%s_s%i' %(self.base_path.stem, set_num) - folder_path = self.base_path.joinpath(folder_name) - file_name = '%s_s%i_p%i.h5' %(self.base_path.stem, set_num, comm.rank) - return folder_path.joinpath(file_name) - - def create_current_file(self): - """Generate new HDF5 file in current_path.""" - self.file_write_num = 0 - comm = self.domain.distributor.comm_cart - if self.parallel: - file = h5py.File(str(self.current_path), 'w-', driver='mpio', comm=comm) - else: - # Create set folder - with Sync(comm): - if comm.rank == 0: - self.current_path.parent.mkdir() - if FILEHANDLER_TOUCH_TMPFILE: - tmpfile = self.base_path.joinpath('tmpfile_p%i' %(comm.rank)) - tmpfile.touch() - file = h5py.File(str(self.current_path), 'w-') - if FILEHANDLER_TOUCH_TMPFILE: - tmpfile.unlink() - self.setup_file(file) - file.close() - - def setup_file(self, file): - - domain = self.domain - - # Metadeta - file.attrs['set_number'] = self.set_num - file.attrs['handler_name'] = self.base_path.stem - file.attrs['writes'] = self.file_write_num - if not self.parallel: - file.attrs['mpi_rank'] = domain.distributor.comm_cart.rank - file.attrs['mpi_size'] = domain.distributor.comm_cart.size - - # Scales - scale_group = file.create_group('scales') - # Start time scales with shape=(0,) to chunk across writes - scale_group.create_dataset(name='sim_time', shape=(0,), maxshape=(None,), dtype=np.float64) - scale_group.create_dataset(name='timestep', shape=(0,), maxshape=(None,), dtype=np.float64) - scale_group.create_dataset(name='world_time', shape=(0,), maxshape=(None,), dtype=np.float64) - scale_group.create_dataset(name='wall_time', shape=(0,), maxshape=(None,), dtype=np.float64) - scale_group.create_dataset(name='iteration', shape=(0,), maxshape=(None,), dtype=np.int) - scale_group.create_dataset(name='write_number', shape=(0,), maxshape=(None,), dtype=np.int) - const = scale_group.create_dataset(name='constant', data=np.array([0.], dtype=np.float64)) - for axis, basis in enumerate(domain.bases): - coeff_name = basis.element_label + basis.name - scale_group.create_dataset(name=coeff_name, data=basis.elements) - scale_group.create_group(basis.name) - - # Tasks - task_group = file.create_group('tasks') - for task_num, task in enumerate(self.tasks): - layout = task['layout'] - constant = task['operator'].meta[:]['constant'] - scales = task['scales'] - gnc_shape, gnc_start, write_shape, write_start, write_count = self.get_write_stats(layout, scales, constant, index=0) - if prod(write_shape) <= 1: - # Start with shape[0] = 0 to chunk across writes for scalars - file_shape = (0,) + tuple(write_shape) - else: - # Start with shape[0] = 1 to chunk within writes - file_shape = (1,) + tuple(write_shape) - file_max = (None,) + tuple(write_shape) - dset = task_group.create_dataset(name=task['name'], shape=file_shape, maxshape=file_max, dtype=layout.dtype) - if not self.parallel: - dset.attrs['global_shape'] = gnc_shape - dset.attrs['start'] = gnc_start - dset.attrs['count'] = write_count - - # Metadata and scales - dset.attrs['task_number'] = task_num - dset.attrs['constant'] = constant - dset.attrs['grid_space'] = layout.grid_space - dset.attrs['scales'] = scales - - # Time scales - dset.dims[0].label = 't' - for sn in ['sim_time', 'world_time', 'wall_time', 'timestep', 'iteration', 'write_number']: - scale = scale_group[sn] - dset.dims.create_scale(scale, sn) - dset.dims[0].attach_scale(scale) - - # Spatial scales - for axis, basis in enumerate(domain.bases): - if constant[axis]: - sn = lookup = 'constant' - else: - if layout.grid_space[axis]: - sn = basis.name - axscale = scales[axis] - if str(axscale) not in scale_group[sn]: - scale_group[sn].create_dataset(name=str(axscale), data=basis.grid(axscale)) - lookup = '/'.join((sn, str(axscale))) - else: - sn = lookup = basis.element_label + basis.name - scale = scale_group[lookup] - dset.dims.create_scale(scale, lookup) - dset.dims[axis+1].label = sn - dset.dims[axis+1].attach_scale(scale) - - def process(self, world_time, wall_time, sim_time, timestep, iteration, **kw): - """Save task outputs to HDF5 file.""" - - file = self.get_file() - self.total_write_num += 1 - self.file_write_num += 1 - file.attrs['writes'] = self.file_write_num - index = self.file_write_num - 1 - - # Update time scales - sim_time_dset = file['scales/sim_time'] - world_time_dset = file['scales/world_time'] - wall_time_dset = file['scales/wall_time'] - timestep_dset = file['scales/timestep'] - iteration_dset = file['scales/iteration'] - write_num_dset = file['scales/write_number'] - - sim_time_dset.resize(index+1, axis=0) - sim_time_dset[index] = sim_time - world_time_dset.resize(index+1, axis=0) - world_time_dset[index] = world_time - wall_time_dset.resize(index+1, axis=0) - wall_time_dset[index] = wall_time - timestep_dset.resize(index+1, axis=0) - timestep_dset[index] = timestep - iteration_dset.resize(index+1, axis=0) - iteration_dset[index] = iteration - write_num_dset.resize(index+1, axis=0) - write_num_dset[index] = self.total_write_num - - # Create task datasets - for task_num, task in enumerate(self.tasks): - out = task['out'] - out.set_scales(task['scales'], keep_data=True) - out.require_layout(task['layout']) - - dset = file['tasks'][task['name']] - dset.resize(index+1, axis=0) - - memory_space, file_space = self.get_hdf5_spaces(out.layout, task['scales'], out.meta[:]['constant'], index) - if self.parallel: - dset.id.write(memory_space, file_space, out.data, dxpl=self._property_list) - else: - dset.id.write(memory_space, file_space, out.data) - - file.close() - - def get_write_stats(self, layout, scales, constant, index): - """Determine write parameters for nonconstant subspace of a field.""" - - constant = np.array(constant) - # References - gshape = layout.global_shape(scales) - lshape = layout.local_shape(scales) - start = layout.start(scales) - first = (start == 0) - - # Build counts, taking just the first entry along constant axes - write_count = lshape.copy() - write_count[constant & first] = 1 - write_count[constant & ~first] = 0 - - # Collectively writing global data - global_nc_shape = gshape.copy() - global_nc_shape[constant] = 1 - global_nc_start = start.copy() - global_nc_start[constant & ~first] = 1 - - if self.parallel: - # Collectively writing global data - write_shape = global_nc_shape - write_start = global_nc_start - else: - # Independently writing local data - write_shape = write_count - write_start = 0 * start - - return global_nc_shape, global_nc_start, write_shape, write_start, write_count - - def get_hdf5_spaces(self, layout, scales, constant, index): - """Create HDF5 space objects for writing nonconstant subspace of a field.""" - - constant = np.array(constant) - # References - lshape = layout.local_shape(scales) - start = layout.start(scales) - gnc_shape, gnc_start, write_shape, write_start, write_count = self.get_write_stats(layout, scales, constant, index) - - # Build HDF5 spaces - memory_shape = tuple(lshape) - memory_start = tuple(0 * start) - memory_count = tuple(write_count) - memory_space = h5py.h5s.create_simple(memory_shape) - memory_space.select_hyperslab(memory_start, memory_count) - - file_shape = (index+1,) + tuple(write_shape) - file_start = (index,) + tuple(write_start) - file_count = (1,) + tuple(write_count) - file_space = h5py.h5s.create_simple(file_shape) - file_space.select_hyperslab(file_start, file_count) - - return memory_space, file_space - diff --git a/dedalus/core/spaces.py b/dedalus/core/spaces.py deleted file mode 100644 index 45794066..00000000 --- a/dedalus/core/spaces.py +++ /dev/null @@ -1,275 +0,0 @@ -""" -Space class definitions. -""" - -import numpy as np - -from ..tools import jacobi -from ..tools.array import reshape_vector -from ..tools.cache import CachedMethod, CachedAttribute - - -class Space: - """Base class for spaces.""" - - def _check_coords(self): - if not len(self.coords) == self.dim: - raise ValueError("Wrong number of coordinates.") - - def grids(self, scale): - """Flat global grids.""" - raise NotImplementedError() - - @CachedAttribute - def domain(self): - from .domain import Domain - return Domain(self.dist, [self]) - - # @classmethod - # def check_shape(cls, space_shape): - # """Check compatibility between space shape and group shape.""" - # for ss, gs in zip(space_shape, cls.group_shape): - # if (ss % gs) != 0: - # raise ValueError("Space shape must be multiple of group shape.") - - # def grid_shape(self, scales): - # """Scaled grid shape.""" - # scales = self.dist.remedy_scales(scales) - # subscales = scales[self.axis:self.axis+self.dim] - # return tuple(int(s*n) for s,n in zip(subscales, self.shape)) - - # def local_grids(self, scales): - # """Local grid vectors by axis.""" - # scales = self.dist.remedy_scales(scales) - # # Get grid slices for relevant axes - # slices = self.dist.grid_layout.slices(self.domain, scales) - # subslices = slices[self.axis:self.axis+self.dim] - # # Select local portion of global grids - # grids = self.grids(scales) - # local_grids = tuple(g[s] for g,s in zip(grids, subslices)) - # # Reshape as vectors - # return tuple(reshape_vector(g, self.dist.dim, i) for g,i in zip(local_grids, self.axes)) - - # @CachedMethod - # def grid_field(self, scales=None): - # """Return field object representing grid.""" - # from .field import Field - # grid = Field(name=self.name, dist=self.dist, bases=[self.grid_basis]) - # grid.preset_scales(scales) - # grid['g'] = self.local_grid(scales) - # return grid - - # @CachedAttribute - # def operators(self): - # from .operators import prefixes - # return {prefix+self.name: partial(op, **{self.name:1}) for prefix, op in prefixes.items()} - - -# class Constant(Space): -# """Constant spaces.""" - -# constant = True -# dim = 1 -# group_shape = (1,) -# shape = (1,) -# dealias = 1 - -# def __init__(self, dist, axis): -# self.dist = dist -# self.axes = (axis,) -# self.grid_basis = self.Constant - -# def grid_shape(self, scale): -# """Compute scaled grid size.""" -# # No scaling for constant spaces -# return self.shape - -# def grids(self, scales): -# # No scaling for constant spaces -# return (np.array([0.]),) - -# def Constant(self): -# return basis.Constant(self) - - -class Interval(Space): - """Base class for 1D intervals.""" - pass - - -class ParityInterval(Interval): - """Definite-parity periodic interval for Sine and Cosine series.""" - - native_bounds = (0, np.pi) - - def __init__(self, *args, **kw): - super().__init__(*args, **kw) - self.kmax = self.size - 1 - - def _native_grid(self, scales): - """Evenly spaced interior grid: cos(N*x) = 0""" - N, = self.grid_shape(scales) - return (np.pi / N) * (np.arange(N) + 1/2) - - def Sine(self): - return basis.Sine(self) - - def Cosine(self): - return basis.Cosine(self) - - def grid_basis(self,): - return self.Cosine() - - - -class Disk(Space): - """2D disk (azimuth, radius).""" - - dim = 2 - group_shape = (1, 1) - - def __init__(self, coords, shape, radius, dist, axis, dealias=1, k0=0): - if radius <= 0: - raise ValueError("Radius must be positive.") - self.coords = coords - self.shape = shape - self.radius = radius - self.dist = dist - self.axis = axis - self.dealias = dealias - self.k0 = k0 - self._check_coords() - self.azimuth_space = PeriodicInterval(coords[0], size=self.shape[0], - bounds=(0, 2*np.pi), dist=dist, axis=axis, dealias=dealias) - self.radial_COV = AffineCOV((0, 1), (0, radius)) - - def grids(self, scale): - N0, N1 = self.grid_shape(scale) - azimuth_grid, = self.azimuth_space.grids(scale) - radial_grid = self._radial_grid(N1) - return (azimuth_grid, radial_grid) - - def _radial_grid(self, Ng): - from ..libraries import dedalus_sphere - z_grid, weights = dedalus_sphere.disk128.quadrature(Ng-1, k=k0, niter=3) - # z = 2*r**2 - 1 - return np.sqrt((z_grid + 1)/2).astype(np.float64) - - -class Annulus(Space): - """2D annulus (azimuth, radius).""" - - dim = 2 - group_shape = (1, 1) - - def __init__(self, coords, shape, radial_bounds, dist, axis, dealias=1): - r0, r1 = radial_bounds - if r0 <= 0: - raise ValueError("Inner radius must be positive.") - if r1 <= r0: - raise ValueError("Outer radius must be larger than inner radius.") - self.coords = coords - self.shape = shape - self.radial_bounds = radial_bounds - self.r0, self.r1 = r0, r1 - self.dist = dist - self.axis = axis - self.dealias = dealias - self._check_coords() - self.azimuth_space = PeriodicInterval(coords[0], size=self.shape[0], - bounds=(0, 2*np.pi), dist=dist, axis=axis, dealias=dealias) - self.radial_space = FiniteInterval(coords[1], size=shape[1], - bounds=radial_bounds, dist=dist, axis=axis+1, dealias=dealias) - - def grids(self, scale): - azimuth_grid, = self.azimuth_space.grids(scale) - radial_grid, = self.radial_space.grids(scale) - return (azimuth_grid, radial_grid) - - -class SphericalShell(Space): - """Spherical shell/annulus (azimuth, colatitude, radius).""" - - dim = 3 - group_shape = (1, 1, 1) - - def __init__(self, coords, shape, radial_bounds, dist, axis, dealias=1): - r0, r1 = radial_bounds - if r0 <= 0: - raise ValueError("Inner radius must be positive.") - if r1 <= r0: - raise ValueError("Outer radius must be larger than inner radius.") - self.coords = coords - self.shape = shape - self.radial_bounds = radial_bounds - self.r0, self.r1 = r0, r1 - self.dist = dist - self.axis = axis - self.dealias = dealias - self._check_coords() - self.inner_sphere_space = Sphere(coords[:2], shape[:2], r0, dist, axis, dealias=dealias) - self.outer_sphere_space = Sphere(coords[:2], shape[:2], r1, dist, axis, dealias=dealias) - self.radial_space = FiniteInterval(coords[2], shape[2], radial_bounds, dist, axis+2, dealias=dealias) - - def grids(self, scale): - azimuth_grid, colatitude_grid = self.inner_sphere_space.grids(scale) - radial_grid, = self.radial_space.grids(scale) - return (azimuth_grid, colatitude_grid, radial_grid) - - - - - - - - - -# class Sheet(Space): -# """Base class for 1-dimensional spaces.""" - -# dim = 1 - -# def __init__(self, names, shape, bounds, domain, axis, dealias=1): -# self.name = name -# self.base_grid_size = base_grid_size -# self.domain = domain -# self.axis = axis -# self.axes = np.arange(axis, axis+self.dim) -# self.dealias = dealias - -# for index in range(self.dim): -# domain.spaces[axis+index].append(self) -# domain.space_dict[name] = self - -# def grid_size(self, scale): -# """Compute scaled grid size.""" -# grid_size = float(scale) * self.base_grid_size -# if not grid_size.is_integer(): -# raise ValueError("Scaled grid size is not an integer: %f" %grid_size) -# return int(grid_size) - - - -# @CachedMethod -# def local_grid(self, scales=None): -# """Return local grid along one axis.""" -# scales = self.domain.remedy_scales(scales) -# axis = self.axis -# # Get local part of global basis grid -# elements = np.ix_(*self.domain.dist.grid_layout.local_elements(self.subdomain, scales)) -# grid = self.grid(scales[axis]) -# local_grid = grid[elements[axis]] -# # Reshape as multidimensional vector -# #local_grid = reshape_vector(local_grid, self.domain.dim, axis) - -# return local_grid - -# @CachedMethod -# def grid_field(self, scales=None): -# """Return field object representing grid.""" -# from .field import Field -# grid = Field(name=self.name, domain=self.domain, bases=[self.grid_basis]) -# grid.preset_scales(scales) -# grid.set_local_data(self.local_grid(scales)) -# return grid - diff --git a/dedalus/core/vectorspaces.py b/dedalus/core/vectorspaces.py deleted file mode 100644 index ec8eafaa..00000000 --- a/dedalus/core/vectorspaces.py +++ /dev/null @@ -1,38 +0,0 @@ - - - -import numpy as np - - -""" -Idea: just make VS based on string coordinates? -Eliminates issues with 2sphere basis with 3sphere vectors? - -""" - - -class VectorSpace: - """Collection of coordinates forming a vector space.""" - - def __init__(self, spaces): - from .domain import Domain - self.domain = Domain(spaces[0].dist, spaces) - self.spaces = self.domain.spaces - space_dims = [space.dim for space in self.spaces] - space_indeces = np.cumsum(space_dims) - space_dims[0] - self.indeces = dict(zip(self.spaces, space_indeces)) - self.dim = sum(space_dims) - - def get_index(self, space): - return self.indeces[space] - - -class TensorSignature: - - def __init__(self, vector_spaces): - self.vector_spaces = vector_spaces - self.tensor_order = len(vector_spaces) - self.tensor_shape = tuple(vs.dim for vs in vector_spaces) - self.n_components = prod(self.tensor_shape) - - From 38096b8c7063276aac356a92dfa395828f29f362 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Thu, 21 Dec 2023 14:42:14 -0500 Subject: [PATCH 14/19] Fix test selection --- dedalus/tests/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dedalus/tests/__init__.py b/dedalus/tests/__init__.py index 3430177b..1ad802b8 100644 --- a/dedalus/tests/__init__.py +++ b/dedalus/tests/__init__.py @@ -11,7 +11,7 @@ def base_cmd(): workers = os.getenv("PYTEST_WORKERS", "auto") - return ["--ignore=test_spherical_ncc.py", f"--workers={workers}"] + return [f"--ignore={testpath}/test_spherical_ncc.py", f"--workers={workers}"] def test(report=False): """Run tests.""" From f3a154f16207e6b5008a66dd4401576159513f7f Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Thu, 21 Dec 2023 17:00:28 -0500 Subject: [PATCH 15/19] DirectProduct CFL and change basis grid methods to take basis-sized scales --- dedalus/core/basis.py | 150 +++++++++++++++++++----------------- dedalus/core/distributor.py | 7 +- dedalus/core/operators.py | 7 +- dedalus/tests/test_cfl.py | 12 +-- 4 files changed, 92 insertions(+), 84 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index ba5e9e30..b28bca48 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -15,7 +15,7 @@ from ..tools.array import reshape_vector, axindex, axslice, interleave_matrices from ..tools.dispatch import MultiClass, SkipDispatchException from ..tools.general import unify, DeferredTuple -from .coords import Coordinate, CartesianCoordinates, S2Coordinates, SphericalCoordinates, PolarCoordinates, AzimuthalCoordinate +from .coords import Coordinate, CartesianCoordinates, S2Coordinates, SphericalCoordinates, PolarCoordinates, AzimuthalCoordinate, DirectProduct from .domain import Domain from .field import Operand, LockedField from .future import FutureLockedField @@ -359,8 +359,7 @@ def matrix_dependence(self, matrix_coupling): def global_grids(self, dist, scales): """Global grids.""" - scale = scales[dist.first_axis(self)] - return (self.global_grid(dist, scale),) + return (self.global_grid(dist, scales[0]),) def global_grid(self, dist, scale): """Global grid.""" @@ -370,8 +369,7 @@ def global_grid(self, dist, scale): def local_grids(self, dist, scales): """Local grids.""" - scale = scales[dist.first_axis(self)] - return (self.local_grid(dist, scale),) + return (self.local_grid(dist, scales[0]),) def local_grid(self, dist, scale): """Local grid.""" @@ -380,15 +378,13 @@ def local_grid(self, dist, scale): problem_grid = self.COV.problem_coord(native_grid) return reshape_vector(problem_grid, dim=dist.dim, axis=dist.get_basis_axis(self)) - def global_grid_spacing(self, dist, scale=None): + def global_grid_spacing(self, dist, scale): """Global grid spacings.""" grid = self.global_grid(dist, scale=scale) - return np.gradient(grid, axis=dist.get_basis_axis(self), edge_order=2) + return np.gradient(grid, axis=dist.first_axis(self), edge_order=2) - def local_grid_spacing(self, dist, scale=None): + def local_grid_spacing(self, dist, scale): """Local grids spacings.""" - if scale is None: - scale = 1 global_spacing = self.global_grid_spacing(dist, scale=scale) local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale) local_spacing_flat = np.ravel(global_spacing)[local_elements[dist.get_basis_axis(self)]] @@ -1942,32 +1938,30 @@ def ell_reversed(self, dist): return ell_reversed def global_grids(self, dist, scales): - first_axis = dist.first_axis(self) - return (self.global_grid_azimuth(dist, scales[first_axis]), - self.global_grid_radius(dist, scales[first_axis+1])) + return (self.global_grid_azimuth(dist, scales[0]), + self.global_grid_radius(dist, scales[1])) def global_grid_radius(self, dist, scale): r = self.radial_COV.problem_coord(self._native_radius_grid(scale)) return reshape_vector(r, dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedMethod - def global_grid_spacing(self, dist, axis, scales=None): + def global_grid_spacing(self, dist, subaxis, scales): """Global grids spacings.""" - if scales is None: scales = (1,1) - return np.gradient(self.global_grids(dist, scales=scales)[axis], axis=axis, edge_order=2) + axis = dist.first_axis(self) + subaxis + return np.gradient(self.global_grids(dist, scales=scales)[subaxis], axis=axis, edge_order=2) @CachedMethod - def local_grid_spacing(self, dist, axis, scales=None): + def local_grid_spacing(self, dist, axis, scales): """Local grids spacings.""" + subaxis = axis - dist.first_axis(self) global_spacing = self.global_grid_spacing(dist, axis, scales=scales) - if scales is None: scales = (1,1) - local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[subaxis])[axis] return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) def local_grids(self, dist, scales): - first_axis = dist.first_axis(self) - return (self.local_grid_azimuth(dist, scales[first_axis]), - self.local_grid_radius(dist, scales[first_axis+1])) + return (self.local_grid_azimuth(dist, scales[0]), + self.local_grid_radius(dist, scales[1])) def forward_transform_azimuth_Mmax0(self, field, axis, gdata, cdata): # slice_axis = axis + len(field.tensorsig) @@ -2120,11 +2114,11 @@ def __rmatmul__(self, other): def global_grid_radius(self, dist, scale): r = self._radius_grid(scale) - return reshape_vector(r, dim=dist.dim, axis=dist.get_basis_axis(self)+1) + return reshape_vector(r, dim=dist.dim, axis=dist.first_axis(self)+1) def local_grid_radius(self, dist, scale): r = self._radius_grid(scale) - local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.first_axis(self)+1] return reshape_vector(r[local_elements], dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedMethod @@ -2623,8 +2617,11 @@ class ConvertConstantDisk(operators.ConvertConstant, operators.PolarMOperator): def __init__(self, operand, output_basis, out=None): super().__init__(operand, output_basis, out=out) - if self.coords in operand.tensorsig: - raise ValueError("Tensors not yet supported.") + for coordsys in operand.tensorsig: + if self.coords == coordsys: + raise ValueError("Tensors not yet supported.") + if isinstance(coordsys, DirectProduct) and self.coords in coordsys.coordsystems: + raise ValueError("DirectProduct tensors not yet supported.") def spinindex_out(self, spinindex_in): return (spinindex_in,) @@ -3013,32 +3010,30 @@ def ell_maps(self, dist): return tuple(ell_maps) def global_grids(self, dist, scales): - first_axis = dist.first_axis(self) - return (self.global_grid_azimuth(dist, scales[first_axis]), - self.global_grid_colatitude(dist, scales[first_axis+1])) + return (self.global_grid_azimuth(dist, scales[0]), + self.global_grid_colatitude(dist, scales[1])) def global_grid_colatitude(self, dist, scale): theta = self._native_colatitude_grid(scale) return reshape_vector(theta, dim=dist.dim, axis=dist.get_basis_axis(self)+1) @CachedMethod - def global_grid_spacing(self, dist, axis, scales=None): + def global_grid_spacing(self, dist, subaxis, scales): """Global grids spacings.""" - if scales is None: scales = (1,1) - return np.gradient(self.global_grids(dist, scales=scales)[axis], axis=axis, edge_order=2) + axis = dist.first_axis(self) + subaxis + return np.gradient(self.global_grids(dist, scales=scales)[subaxis], axis=axis, edge_order=2) @CachedMethod - def local_grid_spacing(self, dist, axis, scales=None): + def local_grid_spacing(self, dist, subaxis, scales): """Local grids spacings.""" - global_spacing = self.global_grid_spacing(dist, axis, scales=scales) - if scales is None: scales = (1,1) - local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] + axis = dist.first_axis(self) + subaxis + global_spacing = self.global_grid_spacing(dist, subaxis, scales=scales) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[subaxis])[axis] return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) def local_grids(self, dist, scales): - first_axis = dist.first_axis(self) - return (self.local_grid_azimuth(dist, scales[first_axis]), - self.local_grid_colatitude(dist, scales[first_axis+1])) + return (self.local_grid_azimuth(dist, scales[0]), + self.local_grid_colatitude(dist, scales[1])) def local_grid_colatitude(self, dist, scale): local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scale)[dist.get_basis_axis(self)+1] @@ -4209,33 +4204,31 @@ def constant_mode_value(self): return self.radial_basis.constant_mode_value / np.sqrt(2) def global_grids(self, dist, scales): - first_axis = dist.first_axis(self) - return (self.global_grid_azimuth(dist, scales[first_axis]), - self.global_grid_colatitude(dist, scales[first_axis+1]), - self.global_grid_radius(dist, scales[first_axis+2])) + return (self.global_grid_azimuth(dist, scales[0]), + self.global_grid_colatitude(dist, scales[1]), + self.global_grid_radius(dist, scales[2])) def local_grids(self, dist, scales): - first_axis = dist.first_axis(self) - return (self.local_grid_azimuth(dist, scales[first_axis]), - self.local_grid_colatitude(dist, scales[first_axis+1]), - self.local_grid_radius(dist, scales[first_axis+2])) + return (self.local_grid_azimuth(dist, scales[0]), + self.local_grid_colatitude(dist, scales[1]), + self.local_grid_radius(dist, scales[2])) @CachedMethod - def global_grid_spacing(self, dist, axis, scales=None): + def global_grid_spacing(self, dist, subaxis, scales): """Global grids spacings.""" - if scales is None: scales = (1,1,1) - grid = self.global_grids(dist, scales=scales)[axis] + grid = self.global_grids(dist, scales=scales)[subaxis] if grid.size == 1: return np.array([np.inf,], dtype=grid.dtype) else: + axis = dist.first_axis(self) + subaxis return np.gradient(grid, axis=axis, edge_order=2) @CachedMethod - def local_grid_spacing(self, dist, axis, scales=None): + def local_grid_spacing(self, dist, subaxis, scales): """Local grids spacings.""" - if scales is None: scales = (1,1,1) - global_spacing = self.global_grid_spacing(dist, axis, scales=scales) - local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[axis])[axis] + axis = dist.first_axis(self) + subaxis + global_spacing = self.global_grid_spacing(dist, subaxis, scales=scales) + local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[subaxis])[axis] return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) def local_elements(self): @@ -6091,11 +6084,11 @@ def operate(self, out): class CartesianAdvectiveCFL(operators.AdvectiveCFL): - input_coord_type = CartesianCoordinates - input_basis_type = (ComplexFourier, RealFourier, Jacobi) + input_coord_type = (CartesianCoordinates, Coordinate) @CachedMethod - def cfl_spacing(self, velocity): + def cfl_spacing(self): + velocity = self.operand coordsys = velocity.tensorsig[0] spacing = [] for i, c in enumerate(coordsys.coords): @@ -6120,16 +6113,30 @@ def cfl_spacing(self, velocity): return spacing def compute_cfl_frequency(self, velocity, out): - u_mag = np.abs(velocity.data) - out.data[:] = 0 - for i, dx in enumerate(self.cfl_spacing(velocity)): - out.data += u_mag[i] / dx + u_mag = np.abs(velocity) + for i, dx in enumerate(self.cfl_spacing()): + out += u_mag[i] / dx + + +class DirectProductAdvectiveCFL(operators.AdvectiveCFL): + + input_coord_type = DirectProduct + + def __init__(self, operand, coords): + super().__init__(operand, coords) + self.comps = [operators.DirectProductComponent(operand, index=0, comp=cs) for cs in coords.coordsystems] + self.comp_cfls = [operators.AdvectiveCFL(comp, comp.comp) for comp in self.comps] + + def compute_cfl_frequency(self, velocity, out): + # Add contributions from each component + for comp, comp_cfl in zip(self.comps, self.comp_cfls): + comp_velocity = velocity[comp.comp_slices] + comp_cfl.compute_cfl_frequency(comp_velocity, out) class PolarAdvectiveCFL(operators.AdvectiveCFL): input_coord_type = PolarCoordinates - input_basis_type = (DiskBasis, AnnulusBasis) @CachedMethod def cfl_spacing(self): @@ -6148,22 +6155,22 @@ def cfl_spacing(self): def compute_cfl_frequency(self, velocity, out): #Assumes velocity is a 2-length vector over polar coordinates - u_theta, u_r = np.abs(velocity.data) - out.data[:] = u_theta / self.cfl_spacing()[0] - out.data += u_r / self.cfl_spacing()[1] + u_theta, u_r = np.abs(velocity) + out += u_theta / self.cfl_spacing()[0] + out += u_r / self.cfl_spacing()[1] class S2AdvectiveCFL(operators.AdvectiveCFL): input_coord_type = S2Coordinates - input_basis_type = SphereBasis @CachedMethod def cfl_spacing(self, r=None): #Assumes velocity is a 2-length vector over spherical coordinates basis = self.input_basis dealias = basis.dealias - if r is None: r = basis.radius + if r is None: + r = basis.radius s2_spacing = basis.local_grid_spacing(self.dist, 0, scales=dealias) if basis.Lmax == 0: s2_spacing[:] = np.inf @@ -6174,16 +6181,15 @@ def cfl_spacing(self, r=None): def compute_cfl_frequency(self, velocity, out): #compute u_mag * sqrt(ell*(ell+1)) / r #Assumes velocity is a 2-length vector over spherical coordinates - u_phi, u_theta = velocity.data[0], velocity.data[1] + u_phi, u_theta = velocity[0], velocity[1] u_mag = np.sqrt(u_phi**2 + u_theta**2) #Again assumes that this field only has an S2 basis - out.data[:] = u_mag / self.cfl_spacing()[0] + out += u_mag / self.cfl_spacing()[0] class Spherical3DAdvectiveCFL(operators.AdvectiveCFL): input_coord_type = SphericalCoordinates - input_basis_type = (BallBasis, ShellBasis) @CachedMethod def cfl_spacing(self): @@ -6201,8 +6207,8 @@ def cfl_spacing(self): def compute_cfl_frequency(self, velocity, out): #Assumes velocity is a 3-length vector in spherical coordinates S2AdvectiveCFL.compute_cfl_frequency(self, velocity, out) - u_r = np.abs(velocity.data[2]) - out.data += u_r / self.cfl_spacing()[1] + u_r = np.abs(velocity[2]) + out += u_r / self.cfl_spacing()[1] from . import transforms diff --git a/dedalus/core/distributor.py b/dedalus/core/distributor.py index 52aa964e..36eff31b 100644 --- a/dedalus/core/distributor.py +++ b/dedalus/core/distributor.py @@ -288,8 +288,11 @@ def local_grid(self, basis, scale=None): def local_grids(self, *bases, scales=None): scales = self.remedy_scales(scales) - # TODO: remove from bases and do it all here? - return sum((basis.local_grids(self, scales=scales) for basis in bases), ()) + grids = [] + for basis in bases: + basis_scales = scales[self.first_axis(basis):self.last_axis(basis)+1] + grids.extend(basis.local_grids(self, scales=basis_scales)) + return grids def local_modes(self, basis): # TODO: remove from bases and do it all here? diff --git a/dedalus/core/operators.py b/dedalus/core/operators.py index 63dc98d6..f762846b 100644 --- a/dedalus/core/operators.py +++ b/dedalus/core/operators.py @@ -4319,9 +4319,7 @@ def _check_args(cls, operand, coords): # Dispatch by operand basis if isinstance(operand, Operand): if isinstance(coords, cls.input_coord_type): - basis = operand.domain.get_basis(coords) - if isinstance(basis, cls.input_basis_type): - return True + return True return False def __init__(self, operand, coords): @@ -4357,7 +4355,8 @@ def operate(self, out): # Set output lock out.lock_axis_to_grid(0) # Compute CFL frequencies - self.compute_cfl_frequency(arg, out) + out.data[:] = 0 + self.compute_cfl_frequency(arg.data, out.data) def compute_cfl_frequency(self, velocity, out): """Return a scalar multi-D field of the cfl frequency everywhere in the domain.""" diff --git a/dedalus/tests/test_cfl.py b/dedalus/tests/test_cfl.py index 09742144..11f5ac32 100644 --- a/dedalus/tests/test_cfl.py +++ b/dedalus/tests/test_cfl.py @@ -87,8 +87,8 @@ def test_full_cfl_fourier_chebyshev(Nx, Nz, dealias, dtype, timestepper, safety, solver.step(1) dt_cfl = cfl.compute_timestep() cfl_op = d3.AdvectiveCFL(u, c) - cfl_freq = np.abs(u['g'][0] / cfl_op.cfl_spacing(u)[0]) - cfl_freq += np.abs(u['g'][1] / cfl_op.cfl_spacing(u)[1]) + cfl_freq = np.abs(u['g'][0] / cfl_op.cfl_spacing()[0]) + cfl_freq += np.abs(u['g'][1] / cfl_op.cfl_spacing()[1]) cfl_freq = np.max(cfl_freq) dt_target = safety / cfl_freq assert np.allclose(dt_cfl, dt_target) @@ -108,7 +108,7 @@ def test_cfl_fourier(N, L, dealias, dtype): u.fill_random(layout='g') cfl = d3.AdvectiveCFL(u, c) cfl_freq = cfl.evaluate()['g'] - target_freq = np.abs(u['g']) / cfl.cfl_spacing(u)[0] + target_freq = np.abs(u['g']) / cfl.cfl_spacing()[0] assert np.allclose(cfl_freq, target_freq) @@ -126,7 +126,7 @@ def test_cfl_chebyshev(N, L, dealias, dtype): u.fill_random(layout='g') cfl = d3.AdvectiveCFL(u, c) cfl_freq = cfl.evaluate()['g'] - target_freq = np.abs(u['g']) / cfl.cfl_spacing(u)[0] + target_freq = np.abs(u['g']) / cfl.cfl_spacing()[0] assert np.allclose(cfl_freq, target_freq) @@ -146,8 +146,8 @@ def test_cfl_fourier_chebyshev(Nx, Nz, dealias, dtype, z_velocity_mag): u.fill_random(layout='g') cfl = d3.AdvectiveCFL(u, c) cfl_freq = cfl.evaluate()['g'] - target_freq = np.abs(u['g'][0]) / cfl.cfl_spacing(u)[0] - target_freq += np.abs(u['g'][1]) / cfl.cfl_spacing(u)[1] + target_freq = np.abs(u['g'][0]) / cfl.cfl_spacing()[0] + target_freq += np.abs(u['g'][1]) / cfl.cfl_spacing()[1] assert np.allclose(cfl_freq, target_freq) From c13699d7cd8674a3f06c70e9d39e9c61c36d12c3 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Thu, 21 Dec 2023 17:19:01 -0500 Subject: [PATCH 16/19] Fix basis axis logic --- dedalus/core/domain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dedalus/core/domain.py b/dedalus/core/domain.py index 97764768..551882a5 100644 --- a/dedalus/core/domain.py +++ b/dedalus/core/domain.py @@ -105,7 +105,7 @@ def get_basis_subaxis(self, coord): axis = self.dist.get_axis(coord) for basis in self.bases: basis_axis = self.dist.get_basis_axis(basis) - if (axis >= basis_axis) and (axis <= basis_axis + basis.dim): + if basis_axis <= axis < basis_axis + basis.dim: return axis - basis_axis def get_coord(self, name): From 55230c91b61dc108e132024c27dbe8daf0b6e121 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Thu, 21 Dec 2023 18:01:04 -0500 Subject: [PATCH 17/19] Fix grid spacing axis bug --- dedalus/core/basis.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index b28bca48..acd4f55b 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -1952,10 +1952,10 @@ def global_grid_spacing(self, dist, subaxis, scales): return np.gradient(self.global_grids(dist, scales=scales)[subaxis], axis=axis, edge_order=2) @CachedMethod - def local_grid_spacing(self, dist, axis, scales): + def local_grid_spacing(self, dist, subaxis, scales): """Local grids spacings.""" - subaxis = axis - dist.first_axis(self) - global_spacing = self.global_grid_spacing(dist, axis, scales=scales) + axis = dist.first_axis(self) + subaxis + global_spacing = self.global_grid_spacing(dist, subaxis, scales=scales) local_elements = dist.grid_layout.local_elements(self.domain(dist), scales=scales[subaxis])[axis] return reshape_vector(np.ravel(global_spacing)[local_elements], dim=dist.dim, axis=axis) @@ -2353,7 +2353,6 @@ def __init__(self, coordsys, shape, dtype, radius=1, k=0, alpha=0, dealias=(1,1) self.radial_COV = AffineCOV((0, 1), (0, radius)) if self.mmax > 2*self.Nmax: logger.warning("You are using more azimuthal modes than can be resolved with your current radial resolution") - #raise ValueError("shape[0] cannot be more than twice shape[1].") self.grid_params = (coordsys, dtype, radius, alpha, dealias, azimuth_library, radius_library) self.edge = self.S1_basis(radius) From 5aa1a837aa97f88f3a5da0d00e9714a746361681 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Fri, 22 Dec 2023 16:03:58 -0500 Subject: [PATCH 18/19] Cylinder NCC tests --- dedalus/core/basis.py | 4 +- dedalus/tests/__init__.py | 2 +- dedalus/tests/test_cylinder_ncc.py | 393 +++++++++++++++++++++++++++++ dedalus/tests/test_polar_ncc.py | 2 + 4 files changed, 398 insertions(+), 3 deletions(-) create mode 100644 dedalus/tests/test_cylinder_ncc.py diff --git a/dedalus/core/basis.py b/dedalus/core/basis.py index acd4f55b..bd06be95 100644 --- a/dedalus/core/basis.py +++ b/dedalus/core/basis.py @@ -2273,7 +2273,8 @@ def conversion_matrix(self, m, spintotal, dk): @classmethod def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_basis, coeffs, ncc_comp, arg_comp, out_comp, ncc_tensorsig, arg_tensorsig, out_tensorsig, cutoff=1e-6): - m = subproblem.group[0] # HACK + first_axis = subproblem.dist.first_axis(out_basis) + m = subproblem.group[first_axis] spintotal_arg = out_basis.spintotal(arg_tensorsig, arg_comp) # Jacobi parameters a_ncc = ncc_basis.k + ncc_basis.alpha[0] @@ -2563,7 +2564,6 @@ def _last_axis_component_ncc_matrix(cls, subproblem, ncc_basis, arg_basis, out_b b_ncc = regtotal_ncc N = ncc_basis.n_size(m) d = regtotal_ncc - abs(diff_regtotal) - if (d >= 0) and (d % 2 == 0): J = arg_basis.operator_matrix('Z', m, spintotal_arg) A, B = clenshaw.jacobi_recursion(N, a_ncc, b_ncc, J) diff --git a/dedalus/tests/__init__.py b/dedalus/tests/__init__.py index 1ad802b8..ce02ae0f 100644 --- a/dedalus/tests/__init__.py +++ b/dedalus/tests/__init__.py @@ -11,7 +11,7 @@ def base_cmd(): workers = os.getenv("PYTEST_WORKERS", "auto") - return [f"--ignore={testpath}/test_spherical_ncc.py", f"--workers={workers}"] + return [f"--ignore={testpath}/test_spherical_ncc.py", f"--ignore={testpath}/test_cylinder_ncc.py", f"--workers={workers}"] def test(report=False): """Run tests.""" diff --git a/dedalus/tests/test_cylinder_ncc.py b/dedalus/tests/test_cylinder_ncc.py new file mode 100644 index 00000000..e6130bc7 --- /dev/null +++ b/dedalus/tests/test_cylinder_ncc.py @@ -0,0 +1,393 @@ +"""Tests for NCCs that depend only on radius in the cylinder.""" + +import pytest +import numpy as np +from dedalus.core import coords, distributor, basis, field, operators, problems, solvers, arithmetic +from dedalus.core import future +from dedalus.tools.array import apply_matrix +from dedalus.tools.cache import CachedFunction + + +dot = arithmetic.DotProduct +length = 1.88 +radius_disk = 1.5 +radii_annulus = (0.5, 3) + +@CachedFunction +def build_periodic_cylinder(Nz, Nphi, Nr, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c, dtype=dtype) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.DiskBasis(cp, (Nphi, Nr), dtype=dtype, radius=radius_disk, alpha=alpha, k=k, dealias=dealias) + z, phi, r = d.local_grids(bz, bp, scales=dealias) + x, y = cp.cartesian(phi, r) + return c, d, (bz, bp), z, phi, r, x, y + +@CachedFunction +def build_periodic_cylindrical_annulus(Nz, Nphi, Nr, alpha, k, dealias, dtype): + cz = coords.Coordinate('z') + cp = coords.PolarCoordinates('phi', 'r') + c = coords.DirectProduct(cz, cp) + d = distributor.Distributor(c, dtype=dtype) + bz = basis.Fourier(cz, Nz, bounds=(0, length), dealias=dealias, dtype=dtype) + bp = basis.AnnulusBasis(cp, (Nphi, Nr), dtype=dtype, radii=radii_annulus, alpha=alpha, k=k, dealias=dealias) + z, phi, r = d.local_grids(bz, bp, scales=dealias) + x, y = cp.cartesian(phi, r) + return c, d, (bz, bp), z, phi, r, x, y + + +Nz_range = [8] +Nphi_range = [16] +Nr_range = [16] +alpha_range = [0] +k_range = [0, 1] +dealias_range = [1, 3/2] +basis_range = [build_periodic_cylinder, build_periodic_cylindrical_annulus] +dtype_range = [np.float64, np.complex128] + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_scalar_prod_scalar(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + vars = [g] + if ncc_first: + w0 = f * g + else: + w0 = g * f + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((w1, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_scalar_prod_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + u = operators.Gradient(g, c).evaluate() + vars = [u] + if ncc_first: + w0 = f * u + else: + w0 = u * f + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((w1, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_scalar_prod_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + T = operators.Gradient(operators.Gradient(g, c), c).evaluate() + vars = [T] + if ncc_first: + w0 = f * T + else: + w0 = T * f + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((w1, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_vector_prod_scalar(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + u = operators.Gradient(f, c).evaluate() + vars = [g] + if ncc_first: + w0 = u * g + else: + w0 = g * u + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((dot(u,u)*g, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_vector_prod_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + u = operators.Gradient(f, c).evaluate() + v = operators.Gradient(g, c).evaluate() + vars = [v] + if ncc_first: + w0 = u * v + else: + w0 = v * u + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((dot(u,u)*v, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_vector_dot_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + u = operators.Gradient(f, c).evaluate() + v = operators.Gradient(g, c).evaluate() + vars = [v] + if ncc_first: + w0 = dot(u, v) + else: + w0 = dot(v, u) + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((dot(u,u)*v, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_vector_dot_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + u = operators.Gradient(f, c).evaluate() + T = operators.Gradient(operators.Gradient(g, c), c).evaluate() + vars = [T] + if ncc_first: + w0 = dot(u, T) + else: + w0 = dot(T, u) + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((dot(u,u)*T, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_tensor_prod_scalar(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + T = operators.Gradient(operators.Gradient(f, c), c).evaluate() + vars = [g] + if ncc_first: + U0 = T * g + else: + U0 = g * T + U1 = U0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((f*g, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + U1.store_ncc_matrices(vars, solver.subproblems) + U0 = U0.evaluate() + U0.change_scales(1) + U1 = U1.evaluate_as_ncc() + assert np.allclose(U0['g'], U1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_tensor_dot_vector(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + T = operators.Gradient(operators.Gradient(f, c), c).evaluate() + u = operators.Gradient(g, c).evaluate() + vars = [u] + if ncc_first: + w0 = dot(T, u) + else: + w0 = dot(u, T) + w1 = w0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((f*u, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + w1.store_ncc_matrices(vars, solver.subproblems) + w0 = w0.evaluate() + w0.change_scales(1) + w1 = w1.evaluate_as_ncc() + assert np.allclose(w0['g'], w1['g']) + + +@pytest.mark.parametrize('Nz', Nz_range) +@pytest.mark.parametrize('Nphi', Nphi_range) +@pytest.mark.parametrize('Nr', Nr_range) +@pytest.mark.parametrize('alpha', alpha_range) +@pytest.mark.parametrize('k', k_range) +@pytest.mark.parametrize('dealias', dealias_range) +@pytest.mark.parametrize('dtype', dtype_range) +@pytest.mark.parametrize('basis', basis_range) +@pytest.mark.parametrize('ncc_first', [True, False]) +def test_tensor_dot_tensor(Nz, Nphi, Nr, alpha, k, dealias, dtype, basis, ncc_first): + c, d, b, z, phi, r, x, y = basis(Nz, Nphi, Nr, alpha, k, dealias, dtype) + f = d.Field(bases=b[1].radial_basis) + g = d.Field(bases=b) + f.preset_scales(dealias) + g.preset_scales(dealias) + f['g'] = r**4 + g['g'] = 3*x**2 + 2*y + x*np.sin(4*np.pi*z/length) + U = operators.Gradient(operators.Gradient(f, c), c).evaluate() + T = operators.Gradient(operators.Gradient(g, c), c).evaluate() + vars = [T] + if ncc_first: + W0 = dot(T, U) + else: + W0 = dot(U, T) + W1 = W0.reinitialize(ncc=True, ncc_vars=vars) + problem = problems.LBVP(vars) + problem.add_equation((f*T, 0)) + solver = solvers.LinearBoundaryValueSolver(problem, matsolver='SuperluNaturalSpsolve', matrix_coupling=[False,False,True]) + W1.store_ncc_matrices(vars, solver.subproblems) + W0 = W0.evaluate() + W0.change_scales(1) + W1 = W1.evaluate_as_ncc() + assert np.allclose(W0['g'], W1['g']) + diff --git a/dedalus/tests/test_polar_ncc.py b/dedalus/tests/test_polar_ncc.py index eaa7e526..301406e5 100644 --- a/dedalus/tests/test_polar_ncc.py +++ b/dedalus/tests/test_polar_ncc.py @@ -6,6 +6,8 @@ from dedalus.tools.array import apply_matrix from dedalus.tools.cache import CachedFunction +# TODO: add in alpha and k +# Seem to need higher resolution and allclose tolerance for k=1 dot = arithmetic.DotProduct dtypes = [np.float64, np.complex128] From f7a9f2f0104c2d04dc1d55f5e2cc7c48eaf7b8d3 Mon Sep 17 00:00:00 2001 From: "Keaton J. Burns" Date: Fri, 22 Dec 2023 16:51:16 -0500 Subject: [PATCH 19/19] NCC test tweaks --- dedalus/tests/test_cylinder_ncc.py | 2 +- dedalus/tests/test_polar_ncc.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/dedalus/tests/test_cylinder_ncc.py b/dedalus/tests/test_cylinder_ncc.py index e6130bc7..69201a16 100644 --- a/dedalus/tests/test_cylinder_ncc.py +++ b/dedalus/tests/test_cylinder_ncc.py @@ -11,7 +11,7 @@ dot = arithmetic.DotProduct length = 1.88 radius_disk = 1.5 -radii_annulus = (0.5, 3) +radii_annulus = (0.5, 1.1) @CachedFunction def build_periodic_cylinder(Nz, Nphi, Nr, alpha, k, dealias, dtype): diff --git a/dedalus/tests/test_polar_ncc.py b/dedalus/tests/test_polar_ncc.py index 301406e5..dddce84b 100644 --- a/dedalus/tests/test_polar_ncc.py +++ b/dedalus/tests/test_polar_ncc.py @@ -8,6 +8,7 @@ # TODO: add in alpha and k # Seem to need higher resolution and allclose tolerance for k=1 +# or lowering annulus radius dot = arithmetic.DotProduct dtypes = [np.float64, np.complex128]