Sun, 11 Dec 2022 23:25:53 +0200
Support arbitrary regularisation terms; implement non-positivity-constrained regularisation.
* Fixes the conditional gradient methods that were incorrectly solving
positivity constrained subproblems although the infinite-dimensional
versions do not include such constraints.
* Introduces the `ExperimentV2` struct that has `regularisation` in place
of `α`. The `Experiment` struct is now deprecated.
* The L^2-squared experiments were switch to be unconstrained, as the
Franke-Wolfe implementations do not support constraints. (This would
be easy to add for the “fully corrective” variant, but is not
immediate for the “relaxed” variant.)
/*! Forward models from discrete measures to observations. */ use numeric_literals::replace_float_literals; use nalgebra::base::{ DMatrix, DVector }; use std::iter::Zip; use std::ops::RangeFrom; use std::marker::PhantomData; pub use alg_tools::linops::*; use alg_tools::euclidean::Euclidean; use alg_tools::norms::{ L1, Linfinity, Norm }; use alg_tools::bisection_tree::*; use alg_tools::mapping::RealMapping; use alg_tools::lingrid::*; use alg_tools::iter::{MapX, Mappable}; use alg_tools::nalgebra_support::ToNalgebraRealField; use alg_tools::tabledump::write_csv; use alg_tools::error::DynError; use crate::types::*; use crate::measures::*; use crate::seminorms::{ Lipschitz, ConvolutionOp, SimpleConvolutionKernel, }; use crate::kernels::{ Convolution, AutoConvolution, BoundedBy, }; pub type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F,N>, F>; /// `ForwardeModel`s are bounded preadjointable linear operators $A ∈ 𝕃(𝒵(Ω); E)$ /// where $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by /// [`DiscreteMeasure`], and $E$ is a [`Euclidean`] space. pub trait ForwardModel<Domain, F : Float + ToNalgebraRealField> : BoundedLinear<DiscreteMeasure<Domain, F>, Codomain=Self::Observable, FloatType=F> + GEMV<F, DiscreteMeasure<Domain, F>, Self::Observable> + Linear<DeltaMeasure<Domain, F>, Codomain=Self::Observable> + Preadjointable<DiscreteMeasure<Domain, F>, Self::Observable> { /// The codomain or value space (of “observables”) for this operator. /// It is assumed to be a [`Euclidean`] space, and therefore also (identified with) /// the domain of the preadjoint. type Observable : Euclidean<F, Output=Self::Observable> + AXPY<F> + Clone; /// Return A_*A and A_* b fn findim_quadratic_model( &self, μ : &DiscreteMeasure<Domain, F>, b : &Self::Observable ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>); /// Write an observable into a file. fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError; /// Returns a zero observable fn zero_observable(&self) -> Self::Observable; /// Returns an empty (uninitialised) observable. /// /// This is used as a placeholder for temporary [`std::mem::replace`] move operations. fn empty_observable(&self) -> Self::Observable; } pub type ShiftedSensor<F, S, P, const N : usize> = Shift<Convolution<S, P>, F, N>; /// Trait for physical convolution models. Has blanket implementation for all cases. pub trait Spread<F : Float, const N : usize> : 'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F> {} impl<F, T, const N : usize> Spread<F, N> for T where F : Float, T : 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N> {} /// Trait for compactly supported sensors. Has blanket implementation for all cases. pub trait Sensor<F : Float, const N : usize> : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {} impl<F, T, const N : usize> Sensor<F, N> for T where F : Float, T : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {} pub trait SensorGridBT<F, S, P, const N : usize> : Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>> where F : Float, S : Sensor<F, N>, P : Spread<F, N> {} impl<F, S, P, T, const N : usize> SensorGridBT<F, S, P, N> for T where T : Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>, F : Float, S : Sensor<F, N>, P : Spread<F, N> {} // We need type alias bounds to access associated types #[allow(type_alias_bounds)] type SensorGridBTFN<F, S, P, BT : SensorGridBT<F, S, P, N>, const N : usize> = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>; /// Sensor grid forward model #[derive(Clone)] pub struct SensorGrid<F, S, P, BT, const N : usize> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, BT : SensorGridBT<F, S, P, N>, { domain : Cube<F, N>, sensor_count : [usize; N], sensor : S, spread : P, base_sensor : Convolution<S, P>, bt : BT, } impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { pub fn new( domain : Cube<F, N>, sensor_count : [usize; N], sensor : S, spread : P, depth : BT::Depth ) -> Self { let base_sensor = Convolution(sensor.clone(), spread.clone()); let bt = BT::new(domain, depth); let mut sensorgrid = SensorGrid { domain, sensor_count, sensor, spread, base_sensor, bt, }; for (x, id) in sensorgrid.grid().into_iter().zip(0usize..) { let s = sensorgrid.shifted_sensor(x); sensorgrid.bt.insert(id, &s); } sensorgrid } pub fn grid(&self) -> LinGrid<F, N> { lingrid_centered(&self.domain, &self.sensor_count) } pub fn n_sensors(&self) -> usize { self.sensor_count.iter().product() } #[inline] fn shifted_sensor(&self, x : Loc<F, N>) -> ShiftedSensor<F, S, P, N> { self.base_sensor.clone().shift(x) } #[inline] fn _zero_observable(&self) -> DVector<F> { DVector::zeros(self.n_sensors()) } } impl<F, S, P, BT, const N : usize> Apply<RNDM<F, N>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { type Output = DVector<F>; #[inline] fn apply(&self, μ : RNDM<F, N>) -> DVector<F> { self.apply(&μ) } } impl<'a, F, S, P, BT, const N : usize> Apply<&'a RNDM<F, N>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { type Output = DVector<F>; fn apply(&self, μ : &'a RNDM<F, N>) -> DVector<F> { let mut res = self._zero_observable(); self.apply_add(&mut res, μ); res } } impl<F, S, P, BT, const N : usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { type Codomain = DVector<F>; } #[replace_float_literals(F::cast_from(literal))] impl<F, S, P, BT, const N : usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { fn gemv(&self, y : &mut DVector<F>, α : F, μ : &RNDM<F, N>, β : F) { let grid = self.grid(); if β == 0.0 { y.fill(0.0) } else if β != 1.0 { *y *= β; // Need to multiply first, as we have to be able to add to y. } if α == 1.0 { self.apply_add(y, μ) } else { for δ in μ.iter_spikes() { for &d in self.bt.iter_at(&δ.x) { let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d)); y[d] += sensor.apply(&δ.x) * (α * δ.α); } } } } fn apply_add(&self, y : &mut DVector<F>, μ : &RNDM<F, N>) { let grid = self.grid(); for δ in μ.iter_spikes() { for &d in self.bt.iter_at(&δ.x) { let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d)); y[d] += sensor.apply(&δ.x) * δ.α; } } } } impl<F, S, P, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { type Output = DVector<F>; #[inline] fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> DVector<F> { self.apply(&δ) } } impl<'a, F, S, P, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { type Output = DVector<F>; fn apply(&self, δ : &DeltaMeasure<Loc<F, N>, F>) -> DVector<F> { let mut res = DVector::zeros(self.n_sensors()); let grid = self.grid(); for &d in self.bt.iter_at(&δ.x) { let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d)); res[d] += sensor.apply(&δ.x) * δ.α; } res } } impl<F, S, P, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { type Codomain = DVector<F>; } impl<F, S, P, BT, const N : usize> BoundedLinear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N, Agg=Bounds<F>>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> { type FloatType = F; /// An estimate on the operator norm in $𝕃(ℳ(Ω); ℝ^n)$ with $ℳ(Ω)$ equipped /// with the Radon norm, and $ℝ^n$ with the Euclidean norm. fn opnorm_bound(&self) -> F { // With {x_i}_{i=1}^n the grid centres and φ the kernel, we have // |Aμ|_2 = sup_{|z|_2 ≤ 1} ⟨z,Αμ⟩ = sup_{|z|_2 ≤ 1} ⟨A^*z|μ⟩ // ≤ sup_{|z|_2 ≤ 1} |A^*z|_∞ |μ|_ℳ // = sup_{|z|_2 ≤ 1} |∑ φ(· - x_i)z_i|_∞ |μ|_ℳ // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ ∑ |z_i| |μ|_ℳ // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ √n |z|_2 |μ|_ℳ // = |φ|_∞ √n |μ|_ℳ. // Hence let n = F::cast_from(self.n_sensors()); self.base_sensor.bounds().uniform() * n.sqrt() } } type SensorGridPreadjoint<'a, A, F, const N : usize> = PreadjointHelper<'a, A, RNDM<F,N>>; impl<F, S, P, BT, const N : usize> Preadjointable<RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>, Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> { type PreadjointCodomain = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>; type Preadjoint<'a> = SensorGridPreadjoint<'a, Self, F, N> where Self : 'a; fn preadjoint(&self) -> Self::Preadjoint<'_> { PreadjointHelper::new(self) } } #[derive(Clone,Debug)] pub struct SensorGridSupportGenerator<F, S, P, const N : usize> where F : Float, S : Sensor<F, N>, P : Spread<F, N> { base_sensor : Convolution<S, P>, grid : LinGrid<F, N>, weights : DVector<F> } impl<F, S, P, const N : usize> SensorGridSupportGenerator<F, S, P, N> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N> { #[inline] fn construct_sensor(&self, id : usize, w : F) -> Weighted<ShiftedSensor<F, S, P, N>, F> { let x = self.grid.entry_linear_unchecked(id); self.base_sensor.clone().shift(x).weigh(w) } #[inline] fn construct_sensor_and_id<'a>(&'a self, (id, w) : (usize, &'a F)) -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) { (id.into(), self.construct_sensor(id, *w)) } } impl<F, S, P, const N : usize> SupportGenerator<F, N> for SensorGridSupportGenerator<F, S, P, N> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N> { type Id = usize; type SupportType = Weighted<ShiftedSensor<F, S, P, N>, F>; type AllDataIter<'a> = MapX<'a, Zip<RangeFrom<usize>, std::slice::Iter<'a, F>>, Self, (Self::Id, Self::SupportType)> where Self : 'a; #[inline] fn support_for(&self, d : Self::Id) -> Self::SupportType { self.construct_sensor(d, self.weights[d]) } #[inline] fn support_count(&self) -> usize { self.weights.len() } #[inline] fn all_data(&self) -> Self::AllDataIter<'_> { (0..).zip(self.weights.as_slice().iter()).mapX(self, Self::construct_sensor_and_id) } } /// Helper structure for constructing preadjoints of `S` where `S : Linear<X>`. /// [`Linear`] needs to be implemented for each instance, but [`Adjointable`] /// and [`BoundedLinear`] have blanket implementations. #[derive(Clone,Debug)] pub struct PreadjointHelper<'a, S : 'a, X> { forward_op : &'a S, _domain : PhantomData<X> } impl<'a, S : 'a, X> PreadjointHelper<'a, S, X> { pub fn new(forward_op : &'a S) -> Self { PreadjointHelper { forward_op, _domain: PhantomData } } } impl<'a, X, Ypre, S> Adjointable<Ypre, X> for PreadjointHelper<'a, S, X> where Self : Linear<Ypre>, S : Clone + Linear<X> { type AdjointCodomain = S::Codomain; type Adjoint<'b> = S where Self : 'b; fn adjoint(&self) -> Self::Adjoint<'_> { self.forward_op.clone() } } impl<'a, X, Ypre, S> BoundedLinear<Ypre> for PreadjointHelper<'a, S, X> where Self : Linear<Ypre>, S : 'a + Clone + BoundedLinear<X> { type FloatType = S::FloatType; fn opnorm_bound(&self) -> Self::FloatType { self.forward_op.opnorm_bound() } } impl<'a, 'b, F, S, P, BT, const N : usize> Apply<&'b DVector<F>> for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>, Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> { type Output = SensorGridBTFN<F, S, P, BT, N>; fn apply(&self, x : &'b DVector<F>) -> Self::Output { self.apply(x.clone()) } } impl<'a, F, S, P, BT, const N : usize> Apply<DVector<F>> for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>, Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> { type Output = SensorGridBTFN<F, S, P, BT, N>; fn apply(&self, x : DVector<F>) -> Self::Output { let fwd = &self.forward_op; let generator = SensorGridSupportGenerator{ base_sensor : fwd.base_sensor.clone(), grid : fwd.grid(), weights : x }; BTFN::new_refresh(&fwd.bt, generator) } } impl<'a, F, S, P, BT, const N : usize> Linear<DVector<F>> for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>> where F : Float, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>, Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> { type Codomain = SensorGridBTFN<F, S, P, BT, N>; } impl<F, S, P, BT, const N : usize> ForwardModel<Loc<F, N>, F> for SensorGrid<F, S, P, BT, N> where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>, Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> { type Observable = DVector<F>; fn findim_quadratic_model( &self, μ : &DiscreteMeasure<Loc<F, N>, F>, b : &Self::Observable ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>) { assert_eq!(b.len(), self.n_sensors()); let mut mA = DMatrix::zeros(self.n_sensors(), μ.len()); let grid = self.grid(); for (mut mAcol, δ) in mA.column_iter_mut().zip(μ.iter_spikes()) { for &d in self.bt.iter_at(&δ.x) { let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d)); mAcol[d] += sensor.apply(&δ.x); } } let mAt = mA.transpose(); (&mAt * mA, &mAt * b) } fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError { let it = self.grid().into_iter().zip(b.iter()).map(|(x, &v)| (x, v)); write_csv(it, prefix + ".txt") } #[inline] fn zero_observable(&self) -> Self::Observable { self._zero_observable() } #[inline] fn empty_observable(&self) -> Self::Observable { DVector::zeros(0) } } /// Implements the calculation a factor $L$ such that $A_*A ≤ L 𝒟$ for $A$ the forward model /// and $𝒟$ a seminorm of suitable form. /// /// **This assumes (but does not check) that the sensors are not overlapping.** #[replace_float_literals(F::cast_from(literal))] impl<F, BT, S, P, K, const N : usize> Lipschitz<ConvolutionOp<F, K, BT, N>> for SensorGrid<F, S, P, BT, N> where F : Float + nalgebra::RealField + ToNalgebraRealField, BT : SensorGridBT<F, S, P, N>, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N>, K : SimpleConvolutionKernel<F, N>, AutoConvolution<P> : BoundedBy<F, K> { type FloatType = F; fn lipschitz_factor(&self, seminorm : &ConvolutionOp<F, K, BT, N>) -> Option<F> { // Sensors should not take on negative values to allow // A_*A to be upper bounded by a simple convolution of `spread`. if self.sensor.bounds().lower() < 0.0 { return None } // Calculate the factor $L_1$ for betwee $ℱ[ψ * ψ] ≤ L_1 ℱ[ρ]$ for $ψ$ the base spread // and $ρ$ the kernel of the seminorm. let l1 = AutoConvolution(self.spread.clone()).bounding_factor(seminorm.kernel())?; // Calculate the factor for transitioning from $A_*A$ to `AutoConvolution<P>`, where A // consists of several `Convolution<S, P>` for the physical model `P` and the sensor `S`. let l0 = self.sensor.norm(Linfinity) * self.sensor.norm(L1); // The final transition factor is: Some(l0 * l1) } } macro_rules! make_sensorgridsupportgenerator_scalarop_rhs { ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { impl<F, S, P, const N : usize> std::ops::$trait_assign<F> for SensorGridSupportGenerator<F, S, P, N> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N> { fn $fn_assign(&mut self, t : F) { self.weights.$fn_assign(t); } } impl<F, S, P, const N : usize> std::ops::$trait<F> for SensorGridSupportGenerator<F, S, P, N> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N> { type Output = SensorGridSupportGenerator<F, S, P, N>; fn $fn(mut self, t : F) -> Self::Output { std::ops::$trait_assign::$fn_assign(&mut self.weights, t); self } } impl<'a, F, S, P, const N : usize> std::ops::$trait<F> for &'a SensorGridSupportGenerator<F, S, P, N> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N> { type Output = SensorGridSupportGenerator<F, S, P, N>; fn $fn(self, t : F) -> Self::Output { SensorGridSupportGenerator{ base_sensor : self.base_sensor.clone(), grid : self.grid, weights : (&self.weights).$fn(t) } } } } } make_sensorgridsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); make_sensorgridsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign); macro_rules! make_sensorgridsupportgenerator_unaryop { ($trait:ident, $fn:ident) => { impl<F, S, P, const N : usize> std::ops::$trait for SensorGridSupportGenerator<F, S, P, N> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N> { type Output = SensorGridSupportGenerator<F, S, P, N>; fn $fn(mut self) -> Self::Output { self.weights = self.weights.$fn(); self } } impl<'a, F, S, P, const N : usize> std::ops::$trait for &'a SensorGridSupportGenerator<F, S, P, N> where F : Float, S : Sensor<F, N>, P : Spread<F, N>, Convolution<S, P> : Spread<F, N> { type Output = SensorGridSupportGenerator<F, S, P, N>; fn $fn(self) -> Self::Output { SensorGridSupportGenerator{ base_sensor : self.base_sensor.clone(), grid : self.grid, weights : (&self.weights).$fn() } } } } } make_sensorgridsupportgenerator_unaryop!(Neg, neg);