diff -r 9738b51d90d7 -r 4f468d35fa29 src/kernels/base.rs --- a/src/kernels/base.rs Sun Apr 27 15:03:51 2025 -0500 +++ b/src/kernels/base.rs Thu Feb 26 11:38:43 2026 -0500 @@ -1,28 +1,20 @@ - //! Things for constructing new kernels from component kernels and traits for analysing them -use serde::Serialize; use numeric_literals::replace_float_literals; +use serde::Serialize; -use alg_tools::types::*; +use alg_tools::bisection_tree::Support; +use alg_tools::bounds::{Bounded, Bounds, GlobalAnalysis, LocalAnalysis}; +use alg_tools::instance::{Instance, Space}; +use alg_tools::loc::Loc; +use alg_tools::mapping::{ + DifferentiableImpl, DifferentiableMapping, LipschitzDifferentiableImpl, Mapping, +}; +use alg_tools::maputil::{array_init, map1_indexed, map2}; use alg_tools::norms::*; -use alg_tools::loc::Loc; use alg_tools::sets::Cube; -use alg_tools::bisection_tree::{ - Support, - Bounds, - LocalAnalysis, - GlobalAnalysis, - Bounded, -}; -use alg_tools::mapping::{ - Mapping, - DifferentiableImpl, - DifferentiableMapping, - Differential, -}; -use alg_tools::instance::{Instance, Space}; -use alg_tools::maputil::{array_init, map2, map1_indexed}; use alg_tools::sets::SetOrd; +use alg_tools::types::*; +use anyhow::anyhow; use crate::fourier::Fourier; use crate::types::*; @@ -32,134 +24,129 @@ /// The kernels typically implement [`Support`] and [`Mapping`]. /// /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! -#[derive(Copy,Clone,Serialize,Debug)] +#[derive(Copy, Clone, Serialize, Debug)] pub struct SupportProductFirst( /// First kernel pub A, /// Second kernel - pub B + pub B, ); -impl Mapping> -for SupportProductFirst +impl Mapping> for SupportProductFirst where - A : Mapping, Codomain = F>, - B : Mapping, Codomain = F>, + A: Mapping, Codomain = F>, + B: Mapping, Codomain = F>, { type Codomain = F; #[inline] - fn apply>>(&self, x : I) -> Self::Codomain { - self.0.apply(x.ref_instance()) * self.1.apply(x) + fn apply>>(&self, x: I) -> Self::Codomain { + x.eval_ref(|r| self.0.apply(r)) * self.1.apply(x) } } -impl DifferentiableImpl> -for SupportProductFirst +impl DifferentiableImpl> for SupportProductFirst where - A : DifferentiableMapping< - Loc, - DerivativeDomain=Loc, - Codomain = F - >, - B : DifferentiableMapping< - Loc, - DerivativeDomain=Loc, - Codomain = F, - > + A: DifferentiableMapping, DerivativeDomain = Loc, Codomain = F>, + B: DifferentiableMapping, DerivativeDomain = Loc, Codomain = F>, { - type Derivative = Loc; + type Derivative = Loc; #[inline] - fn differential_impl>>(&self, x : I) -> Self::Derivative { - let xr = x.ref_instance(); - self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(x) + fn differential_impl>>(&self, x: I) -> Self::Derivative { + x.eval_ref(|xr| { + self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(xr) + }) } } -impl Lipschitz -for SupportProductFirst -where A : Lipschitz + Bounded, - B : Lipschitz + Bounded { +impl Lipschitz for SupportProductFirst +where + A: Lipschitz + Bounded, + B: Lipschitz + Bounded, +{ type FloatType = F; #[inline] - fn lipschitz_factor(&self, m : M) -> Option { + fn lipschitz_factor(&self, m: M) -> DynResult { // f(x)g(x) - f(y)g(y) = f(x)[g(x)-g(y)] - [f(y)-f(x)]g(y) let &SupportProductFirst(ref f, ref g) = self; - f.lipschitz_factor(m).map(|l| l * g.bounds().uniform()) - .zip(g.lipschitz_factor(m).map(|l| l * f.bounds().uniform())) - .map(|(a, b)| a + b) + Ok(f.lipschitz_factor(m)? * g.bounds().uniform() + + g.lipschitz_factor(m)? * f.bounds().uniform()) } } -impl<'a, A, B, M : Copy, Domain, F : Float> Lipschitz -for Differential<'a, Domain, SupportProductFirst> +impl<'a, A, B, M: Copy, Domain, F: Float> LipschitzDifferentiableImpl + for SupportProductFirst where - Domain : Space, - A : Clone + DifferentiableMapping + Lipschitz + Bounded, - B : Clone + DifferentiableMapping + Lipschitz + Bounded, - SupportProductFirst : DifferentiableMapping, - for<'b> A::Differential<'b> : Lipschitz + NormBounded, - for<'b> B::Differential<'b> : Lipschitz + NormBounded + Domain: Space, + Self: DifferentiableImpl, + A: DifferentiableMapping + Lipschitz + Bounded, + B: DifferentiableMapping + Lipschitz + Bounded, + SupportProductFirst: DifferentiableMapping, + for<'b> A::Differential<'b>: Lipschitz + NormBounded, + for<'b> B::Differential<'b>: Lipschitz + NormBounded, { type FloatType = F; #[inline] - fn lipschitz_factor(&self, m : M) -> Option { + fn diff_lipschitz_factor(&self, m: M) -> DynResult { // ∇[gf] = f∇g + g∇f // ⟹ ∇[gf](x) - ∇[gf](y) = f(x)∇g(x) + g(x)∇f(x) - f(y)∇g(y) + g(y)∇f(y) // = f(x)[∇g(x)-∇g(y)] + g(x)∇f(x) - [f(y)-f(x)]∇g(y) + g(y)∇f(y) // = f(x)[∇g(x)-∇g(y)] + g(x)[∇f(x)-∇f(y)] // - [f(y)-f(x)]∇g(y) + [g(y)-g(x)]∇f(y) - let &SupportProductFirst(ref f, ref g) = self.base_fn(); + let &SupportProductFirst(ref f, ref g) = self; let (df, dg) = (f.diff_ref(), g.diff_ref()); - [ - df.lipschitz_factor(m).map(|l| l * g.bounds().uniform()), - dg.lipschitz_factor(m).map(|l| l * f.bounds().uniform()), - f.lipschitz_factor(m).map(|l| l * dg.norm_bound(L2)), - g.lipschitz_factor(m).map(|l| l * df.norm_bound(L2)) - ].into_iter().sum() + Ok([ + df.lipschitz_factor(m)? * g.bounds().uniform(), + dg.lipschitz_factor(m)? * f.bounds().uniform(), + f.lipschitz_factor(m)? * dg.norm_bound(L2), + g.lipschitz_factor(m)? * df.norm_bound(L2), + ] + .into_iter() + .sum()) } } - -impl<'a, A, B, F : Float, const N : usize> Support -for SupportProductFirst +impl<'a, A, B, F: Float, const N: usize> Support for SupportProductFirst where - A : Support, - B : Support + A: Support, + B: Support, { #[inline] - fn support_hint(&self) -> Cube { + fn support_hint(&self) -> Cube { self.0.support_hint() } #[inline] - fn in_support(&self, x : &Loc) -> bool { + fn in_support(&self, x: &Loc) -> bool { self.0.in_support(x) } #[inline] - fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + fn bisection_hint(&self, cube: &Cube) -> [Option; N] { self.0.bisection_hint(cube) } } -impl<'a, A, B, F : Float> GlobalAnalysis> -for SupportProductFirst -where A : GlobalAnalysis>, - B : GlobalAnalysis> { +impl<'a, A, B, F: Float> GlobalAnalysis> for SupportProductFirst +where + A: GlobalAnalysis>, + B: GlobalAnalysis>, +{ #[inline] fn global_analysis(&self) -> Bounds { self.0.global_analysis() * self.1.global_analysis() } } -impl<'a, A, B, F : Float, const N : usize> LocalAnalysis, N> -for SupportProductFirst -where A : LocalAnalysis, N>, - B : LocalAnalysis, N> { +impl<'a, A, B, F: Float, const N: usize> LocalAnalysis, N> + for SupportProductFirst +where + A: LocalAnalysis, N>, + B: LocalAnalysis, N>, +{ #[inline] - fn local_analysis(&self, cube : &Cube) -> Bounds { + fn local_analysis(&self, cube: &Cube) -> Bounds { self.0.local_analysis(cube) * self.1.local_analysis(cube) } } @@ -169,125 +156,116 @@ /// The kernels typically implement [`Support`] and [`Mapping`]. /// /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! -#[derive(Copy,Clone,Serialize,Debug)] +#[derive(Copy, Clone, Serialize, Debug)] pub struct SupportSum( /// First kernel pub A, /// Second kernel - pub B + pub B, ); -impl<'a, A, B, F : Float, const N : usize> Mapping> -for SupportSum +impl<'a, A, B, F: Float, const N: usize> Mapping> for SupportSum where - A : Mapping, Codomain = F>, - B : Mapping, Codomain = F>, + A: Mapping, Codomain = F>, + B: Mapping, Codomain = F>, { type Codomain = F; #[inline] - fn apply>>(&self, x : I) -> Self::Codomain { - self.0.apply(x.ref_instance()) + self.1.apply(x) + fn apply>>(&self, x: I) -> Self::Codomain { + x.eval_ref(|r| self.0.apply(r)) + self.1.apply(x) } } -impl<'a, A, B, F : Float, const N : usize> DifferentiableImpl> -for SupportSum +impl<'a, A, B, F: Float, const N: usize> DifferentiableImpl> for SupportSum where - A : DifferentiableMapping< - Loc, - DerivativeDomain = Loc - >, - B : DifferentiableMapping< - Loc, - DerivativeDomain = Loc, - > + A: DifferentiableMapping, DerivativeDomain = Loc>, + B: DifferentiableMapping, DerivativeDomain = Loc>, { - - type Derivative = Loc; + type Derivative = Loc; #[inline] - fn differential_impl>>(&self, x : I) -> Self::Derivative { - self.0.differential(x.ref_instance()) + self.1.differential(x) + fn differential_impl>>(&self, x: I) -> Self::Derivative { + x.eval_ref(|r| self.0.differential(r)) + self.1.differential(x) } } - -impl<'a, A, B, F : Float, const N : usize> Support -for SupportSum -where A : Support, - B : Support, - Cube : SetOrd { - +impl<'a, A, B, F: Float, const N: usize> Support for SupportSum +where + A: Support, + B: Support, + Cube: SetOrd, +{ #[inline] - fn support_hint(&self) -> Cube { + fn support_hint(&self) -> Cube { self.0.support_hint().common(&self.1.support_hint()) } #[inline] - fn in_support(&self, x : &Loc) -> bool { + fn in_support(&self, x: &Loc) -> bool { self.0.in_support(x) || self.1.in_support(x) } #[inline] - fn bisection_hint(&self, cube : &Cube) -> [Option; N] { - map2(self.0.bisection_hint(cube), - self.1.bisection_hint(cube), - |a, b| a.or(b)) + fn bisection_hint(&self, cube: &Cube) -> [Option; N] { + map2( + self.0.bisection_hint(cube), + self.1.bisection_hint(cube), + |a, b| a.or(b), + ) } } -impl<'a, A, B, F : Float> GlobalAnalysis> -for SupportSum -where A : GlobalAnalysis>, - B : GlobalAnalysis> { +impl<'a, A, B, F: Float> GlobalAnalysis> for SupportSum +where + A: GlobalAnalysis>, + B: GlobalAnalysis>, +{ #[inline] fn global_analysis(&self) -> Bounds { self.0.global_analysis() + self.1.global_analysis() } } -impl<'a, A, B, F : Float, const N : usize> LocalAnalysis, N> -for SupportSum -where A : LocalAnalysis, N>, - B : LocalAnalysis, N>, - Cube : SetOrd { +impl<'a, A, B, F: Float, const N: usize> LocalAnalysis, N> for SupportSum +where + A: LocalAnalysis, N>, + B: LocalAnalysis, N>, + Cube: SetOrd, +{ #[inline] - fn local_analysis(&self, cube : &Cube) -> Bounds { + fn local_analysis(&self, cube: &Cube) -> Bounds { self.0.local_analysis(cube) + self.1.local_analysis(cube) } } -impl Lipschitz for SupportSum -where A : Lipschitz, - B : Lipschitz { +impl Lipschitz for SupportSum +where + A: Lipschitz, + B: Lipschitz, +{ type FloatType = F; - fn lipschitz_factor(&self, m : M) -> Option { - match (self.0.lipschitz_factor(m), self.1.lipschitz_factor(m)) { - (Some(l0), Some(l1)) => Some(l0 + l1), - _ => None - } + fn lipschitz_factor(&self, m: M) -> DynResult { + Ok(self.0.lipschitz_factor(m)? * self.1.lipschitz_factor(m)?) } } -impl<'b, F : Float, M : Copy, A, B, Domain> Lipschitz -for Differential<'b, Domain, SupportSum> +impl<'b, F: Float, M: Copy, A, B, Domain> LipschitzDifferentiableImpl + for SupportSum where - Domain : Space, - A : Clone + DifferentiableMapping, - B : Clone + DifferentiableMapping, - SupportSum : DifferentiableMapping, - for<'a> A :: Differential<'a> : Lipschitz, - for<'a> B :: Differential<'a> : Lipschitz + Domain: Space, + Self: DifferentiableImpl, + A: DifferentiableMapping, + B: DifferentiableMapping, + SupportSum: DifferentiableMapping, + for<'a> A::Differential<'a>: Lipschitz, + for<'a> B::Differential<'a>: Lipschitz, { type FloatType = F; - fn lipschitz_factor(&self, m : M) -> Option { - let base = self.base_fn(); - base.0.diff_ref().lipschitz_factor(m) - .zip(base.1.diff_ref().lipschitz_factor(m)) - .map(|(a, b)| a + b) + fn diff_lipschitz_factor(&self, m: M) -> DynResult { + Ok(self.0.diff_ref().lipschitz_factor(m)? + self.1.diff_ref().lipschitz_factor(m)?) } } @@ -296,48 +274,52 @@ /// The kernels typically implement [`Support`]s and [`Mapping`]. // /// Trait implementations have to be on a case-by-case basis. -#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] +#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)] pub struct Convolution( /// First kernel pub A, /// Second kernel - pub B + pub B, ); -impl Lipschitz for Convolution -where A : Norm , - B : Lipschitz { +impl Lipschitz for Convolution +where + A: Norm, + B: Lipschitz, +{ type FloatType = F; - fn lipschitz_factor(&self, m : M) -> Option { + fn lipschitz_factor(&self, m: M) -> DynResult { // For [f * g](x) = ∫ f(x-y)g(y) dy we have // [f * g](x) - [f * g](z) = ∫ [f(x-y)-f(z-y)]g(y) dy. // Hence |[f * g](x) - [f * g](z)| ≤ ∫ |f(x-y)-f(z-y)|g(y)| dy. // ≤ L|x-z| ∫ |g(y)| dy, // where L is the Lipschitz factor of f. - self.1.lipschitz_factor(m).map(|l| l * self.0.norm(L1)) + Ok(self.1.lipschitz_factor(m)? * self.0.norm(L1)) } } -impl<'b, F : Float, M, A, B, Domain> Lipschitz -for Differential<'b, Domain, Convolution> +impl<'b, F: Float, M, A, B, Domain> LipschitzDifferentiableImpl for Convolution where - Domain : Space, - A : Clone + Norm , - Convolution : DifferentiableMapping, - B : Clone + DifferentiableMapping, - for<'a> B :: Differential<'a> : Lipschitz + Domain: Space, + Self: DifferentiableImpl, + A: Norm, + Convolution: DifferentiableMapping, + B: DifferentiableMapping, + for<'a> B::Differential<'a>: Lipschitz, { type FloatType = F; - fn lipschitz_factor(&self, m : M) -> Option { + fn diff_lipschitz_factor(&self, m: M) -> DynResult { // For [f * g](x) = ∫ f(x-y)g(y) dy we have // ∇[f * g](x) - ∇[f * g](z) = ∫ [∇f(x-y)-∇f(z-y)]g(y) dy. // Hence |∇[f * g](x) - ∇[f * g](z)| ≤ ∫ |∇f(x-y)-∇f(z-y)|g(y)| dy. // ≤ L|x-z| ∫ |g(y)| dy, // where L is the Lipschitz factor of ∇f. - let base = self.base_fn(); - base.1.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1)) + self.1 + .diff_ref() + .lipschitz_factor(m) + .map(|l| l * self.0.norm(L1)) } } @@ -346,78 +328,76 @@ /// The kernel typically implements [`Support`] and [`Mapping`]. /// /// Trait implementations have to be on a case-by-case basis. -#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] +#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)] pub struct AutoConvolution( /// The kernel to be autoconvolved - pub A + pub A, ); -impl Lipschitz for AutoConvolution -where C : Lipschitz + Norm { +impl Lipschitz for AutoConvolution +where + C: Lipschitz + Norm, +{ type FloatType = F; - fn lipschitz_factor(&self, m : M) -> Option { + fn lipschitz_factor(&self, m: M) -> DynResult { self.0.lipschitz_factor(m).map(|l| l * self.0.norm(L1)) } } -impl<'b, F : Float, M, C, Domain> Lipschitz -for Differential<'b, Domain, AutoConvolution> +impl<'b, F: Float, M, C, Domain> LipschitzDifferentiableImpl for AutoConvolution where - Domain : Space, - C : Clone + Norm + DifferentiableMapping, - AutoConvolution : DifferentiableMapping, - for<'a> C :: Differential<'a> : Lipschitz + Domain: Space, + Self: DifferentiableImpl, + C: Norm + DifferentiableMapping, + AutoConvolution: DifferentiableMapping, + for<'a> C::Differential<'a>: Lipschitz, { type FloatType = F; - fn lipschitz_factor(&self, m : M) -> Option { - let base = self.base_fn(); - base.0.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1)) + fn diff_lipschitz_factor(&self, m: M) -> DynResult { + self.0 + .diff_ref() + .lipschitz_factor(m) + .map(|l| l * self.0.norm(L1)) } } - /// Representation a multi-dimensional product of a one-dimensional kernel. /// /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$. /// The kernel $G$ typically implements [`Support`] and [`Mapping`] -/// on [`Loc`]. Then the product implements them on [`Loc`]. -#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] +/// on [`Loc<1, F>`]. Then the product implements them on [`Loc`]. +#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)] #[allow(dead_code)] -struct UniformProduct( +struct UniformProduct( /// The one-dimensional kernel - G + G, ); -impl<'a, G, F : Float, const N : usize> Mapping> -for UniformProduct +impl<'a, G, F: Float, const N: usize> Mapping> for UniformProduct where - G : Mapping, Codomain = F> + G: Mapping, Codomain = F>, { type Codomain = F; #[inline] - fn apply>>(&self, x : I) -> F { - x.cow().iter().map(|&y| self.0.apply(Loc([y]))).product() + fn apply>>(&self, x: I) -> F { + x.decompose() + .iter() + .map(|&y| self.0.apply(Loc([y]))) + .product() } } - - -impl<'a, G, F : Float, const N : usize> DifferentiableImpl> -for UniformProduct +impl<'a, G, F: Float, const N: usize> DifferentiableImpl> for UniformProduct where - G : DifferentiableMapping< - Loc, - DerivativeDomain = F, - Codomain = F, - > + G: DifferentiableMapping, DerivativeDomain = F, Codomain = F>, { - type Derivative = Loc; + type Derivative = Loc; #[inline] - fn differential_impl>>(&self, x0 : I) -> Loc { + fn differential_impl>>(&self, x0: I) -> Loc { x0.eval(|x| { let vs = x.map(|y| self.0.apply(Loc([y]))); product_differential(x, &vs, |y| self.0.differential(Loc([y]))) @@ -430,17 +410,19 @@ /// The vector `x` is the location, `vs` consists of the values `g(x_i)`, and /// `gd` calculates the derivative `g'`. #[inline] -pub(crate) fn product_differential F, const N : usize>( - x : &Loc, - vs : &Loc, - gd : G -) -> Loc { +pub(crate) fn product_differential F, const N: usize>( + x: &Loc, + vs: &Loc, + gd: G, +) -> Loc { map1_indexed(x, |i, &y| { - gd(y) * vs.iter() - .zip(0..) - .filter_map(|(v, j)| (j != i).then_some(*v)) - .product() - }).into() + gd(y) + * vs.iter() + .zip(0..) + .filter_map(|(v, j)| (j != i).then_some(*v)) + .product() + }) + .into() } /// Helper function to calulate the Lipschitz factor of $∇f$ for $f(x)=∏_{i=1}^N g(x_i)$. @@ -448,12 +430,12 @@ /// The parameter `bound` is a bound on $|g|_∞$, `lip` is a Lipschitz factor for $g$, /// `dbound` is a bound on $|∇g|_∞$, and `dlip` a Lipschitz factor for $∇g$. #[inline] -pub(crate) fn product_differential_lipschitz_factor( - bound : F, - lip : F, - dbound : F, - dlip : F -) -> F { +pub(crate) fn product_differential_lipschitz_factor( + bound: F, + lip: F, + dbound: F, + dlip: F, +) -> DynResult { // For arbitrary ψ(x) = ∏_{i=1}^n ψ_i(x_i), we have // ψ(x) - ψ(y) = ∑_i [ψ_i(x_i)-ψ_i(y_i)] ∏_{j ≠ i} ψ_j(x_j) // by a simple recursive argument. In particular, if ψ_i=g for all i, j, we have @@ -470,31 +452,33 @@ // = n [L_{∇g} M_g^{n-1} + (n-1) L_g M_g^{n-2} M_{∇g}]. // = n M_g^{n-2}[L_{∇g} M_g + (n-1) L_g M_{∇g}]. if N >= 2 { - F::cast_from(N) * bound.powi((N-2) as i32) - * (dlip * bound + F::cast_from(N-1) * lip * dbound) - } else if N==1 { - dlip + Ok(F::cast_from(N) + * bound.powi((N - 2) as i32) + * (dlip * bound + F::cast_from(N - 1) * lip * dbound)) + } else if N == 1 { + Ok(dlip) } else { - panic!("Invalid dimension") + Err(anyhow!("Invalid dimension")) } } -impl Support -for UniformProduct -where G : Support { +impl Support for UniformProduct +where + G: Support<1, F>, +{ #[inline] - fn support_hint(&self) -> Cube { - let [a] : [[F; 2]; 1] = self.0.support_hint().into(); + fn support_hint(&self) -> Cube { + let [a]: [[F; 2]; 1] = self.0.support_hint().into(); array_init(|| a.clone()).into() } #[inline] - fn in_support(&self, x : &Loc) -> bool { + fn in_support(&self, x: &Loc) -> bool { x.iter().all(|&y| self.0.in_support(&Loc([y]))) } #[inline] - fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + fn bisection_hint(&self, cube: &Cube) -> [Option; N] { cube.map(|a, b| { let [h] = self.0.bisection_hint(&[[a, b]].into()); h @@ -502,9 +486,10 @@ } } -impl GlobalAnalysis> -for UniformProduct -where G : GlobalAnalysis> { +impl GlobalAnalysis> for UniformProduct +where + G: GlobalAnalysis>, +{ #[inline] fn global_analysis(&self) -> Bounds { let g = self.0.global_analysis(); @@ -512,88 +497,91 @@ } } -impl LocalAnalysis, N> -for UniformProduct -where G : LocalAnalysis, 1> { +impl LocalAnalysis, N> for UniformProduct +where + G: LocalAnalysis, 1>, +{ #[inline] - fn local_analysis(&self, cube : &Cube) -> Bounds { - cube.iter_coords().map( - |&[a, b]| self.0.local_analysis(&([[a, b]].into())) - ).product() + fn local_analysis(&self, cube: &Cube) -> Bounds { + cube.iter_coords() + .map(|&[a, b]| self.0.local_analysis(&([[a, b]].into()))) + .product() } } macro_rules! product_lpnorm { ($lp:ident) => { - impl Norm - for UniformProduct - where G : Norm { + impl Norm<$lp, F> for UniformProduct + where + G: Norm<$lp, F>, + { #[inline] - fn norm(&self, lp : $lp) -> F { + fn norm(&self, lp: $lp) -> F { self.0.norm(lp).powi(N as i32) } } - } + }; } product_lpnorm!(L1); product_lpnorm!(L2); product_lpnorm!(Linfinity); - /// Trait for bounding one kernel with respect to another. /// /// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared. -pub trait BoundedBy { +pub trait BoundedBy { /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for /// $v$ `self` and $u$ `other`. /// /// If no such factors exits, `None` is returned. - fn bounding_factor(&self, other : &T) -> Option; + fn bounding_factor(&self, other: &T) -> DynResult; } /// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$. #[replace_float_literals(F::cast_from(literal))] -impl -BoundedBy, BaseP>> -for AutoConvolution> -where F : Float, - C : Clone + PartialEq, - BaseP : Fourier + PartialOrd, // TODO: replace by BoundedBy, - >::Transformed : Bounded + Norm { - - fn bounding_factor(&self, kernel : &SupportProductFirst, BaseP>) -> Option { +impl BoundedBy, BaseP>> + for AutoConvolution> +where + F: Float, + C: PartialEq, + BaseP: Fourier + PartialOrd, // TODO: replace by BoundedBy, + >::Transformed: Bounded + Norm, +{ + fn bounding_factor( + &self, + kernel: &SupportProductFirst, BaseP>, + ) -> DynResult { let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel; let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self; let v̂ = base_spread.fourier(); // Verify that the cut-off and ideal physical model (base spread) are the same. - if cutoff == cutoff2 - && base_spread <= base_spread2 - && v̂.bounds().lower() >= 0.0 { + if cutoff == cutoff2 && base_spread <= base_spread2 && v̂.bounds().lower() >= 0.0 { // Calculate the factor between the convolution approximation // `AutoConvolution>` of $A_*A$ and the // kernel of the seminorm. This depends on the physical model P being // `SupportProductFirst` with the kernel `K` being // a `SupportSum` involving `SupportProductFirst, BaseP>`. - Some(v̂.norm(L1)) + Ok(v̂.norm(L1)) } else { // We cannot compare - None + Err(anyhow!("Incomprable kernels")) } } } -impl BoundedBy> for A -where A : BoundedBy, - C : Bounded { - +impl BoundedBy> for A +where + A: BoundedBy, + C: Bounded, +{ #[replace_float_literals(F::cast_from(literal))] - fn bounding_factor(&self, SupportSum(ref kernel1, kernel2) : &SupportSum) -> Option { + fn bounding_factor(&self, SupportSum(ref kernel1, kernel2): &SupportSum) -> DynResult { if kernel2.bounds().lower() >= 0.0 { self.bounding_factor(kernel1) } else { - None + Err(anyhow!("Component kernel not lower-bounded by zero")) } } } @@ -603,7 +591,7 @@ /// It will attempt to place the subdivision point at $-r$ or $r$. /// If neither of these points lies within $[a, b]$, `None` is returned. #[inline] -pub(super) fn symmetric_interval_hint(r : F, a : F, b : F) -> Option { +pub(super) fn symmetric_interval_hint(r: F, a: F, b: F) -> Option { if a < -r && -r < b { Some(-r) } else if a < r && r < b { @@ -622,7 +610,7 @@ /// returned. #[replace_float_literals(F::cast_from(literal))] #[inline] -pub(super) fn symmetric_peak_hint(r : F, a : F, b : F) -> Option { +pub(super) fn symmetric_peak_hint(r: F, a: F, b: F) -> Option { let stage1 = if a < -r { if b <= -r { None @@ -648,7 +636,7 @@ // Ignore stage1 hint if either side of subdivision would be just a small fraction of the // interval match stage1 { - Some(h) if (h - a).min(b-h) >= 0.3 * r => Some(h), - _ => None + Some(h) if (h - a).min(b - h) >= 0.3 * r => Some(h), + _ => None, } }