src/kernels/base.rs

branch
dev
changeset 61
4f468d35fa29
parent 35
b087e3eab191
--- a/src/kernels/base.rs	Sun Apr 27 15:03:51 2025 -0500
+++ b/src/kernels/base.rs	Thu Feb 26 11:38:43 2026 -0500
@@ -1,28 +1,20 @@
-
 //! Things for constructing new kernels from component kernels and traits for analysing them
-use serde::Serialize;
 use numeric_literals::replace_float_literals;
+use serde::Serialize;
 
-use alg_tools::types::*;
+use alg_tools::bisection_tree::Support;
+use alg_tools::bounds::{Bounded, Bounds, GlobalAnalysis, LocalAnalysis};
+use alg_tools::instance::{Instance, Space};
+use alg_tools::loc::Loc;
+use alg_tools::mapping::{
+    DifferentiableImpl, DifferentiableMapping, LipschitzDifferentiableImpl, Mapping,
+};
+use alg_tools::maputil::{array_init, map1_indexed, map2};
 use alg_tools::norms::*;
-use alg_tools::loc::Loc;
 use alg_tools::sets::Cube;
-use alg_tools::bisection_tree::{
-    Support,
-    Bounds,
-    LocalAnalysis,
-    GlobalAnalysis,
-    Bounded,
-};
-use alg_tools::mapping::{
-    Mapping,
-    DifferentiableImpl,
-    DifferentiableMapping,
-    Differential,
-};
-use alg_tools::instance::{Instance, Space};
-use alg_tools::maputil::{array_init, map2, map1_indexed};
 use alg_tools::sets::SetOrd;
+use alg_tools::types::*;
+use anyhow::anyhow;
 
 use crate::fourier::Fourier;
 use crate::types::*;
@@ -32,134 +24,129 @@
 /// The kernels typically implement [`Support`] and [`Mapping`].
 ///
 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
-#[derive(Copy,Clone,Serialize,Debug)]
+#[derive(Copy, Clone, Serialize, Debug)]
 pub struct SupportProductFirst<A, B>(
     /// First kernel
     pub A,
     /// Second kernel
-    pub B
+    pub B,
 );
 
-impl<A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
-for SupportProductFirst<A, B>
+impl<A, B, F: Float, const N: usize> Mapping<Loc<N, F>> for SupportProductFirst<A, B>
 where
-    A : Mapping<Loc<F, N>, Codomain = F>,
-    B : Mapping<Loc<F, N>, Codomain = F>,
+    A: Mapping<Loc<N, F>, Codomain = F>,
+    B: Mapping<Loc<N, F>, Codomain = F>,
 {
     type Codomain = F;
 
     #[inline]
-    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
-        self.0.apply(x.ref_instance()) * self.1.apply(x)
+    fn apply<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Codomain {
+        x.eval_ref(|r| self.0.apply(r)) * self.1.apply(x)
     }
 }
 
-impl<A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
-for SupportProductFirst<A, B>
+impl<A, B, F: Float, const N: usize> DifferentiableImpl<Loc<N, F>> for SupportProductFirst<A, B>
 where
-    A : DifferentiableMapping<
-        Loc<F, N>,
-        DerivativeDomain=Loc<F, N>,
-        Codomain = F
-    >,
-    B : DifferentiableMapping<
-        Loc<F, N>,
-        DerivativeDomain=Loc<F, N>,
-        Codomain = F,
-    >
+    A: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>, Codomain = F>,
+    B: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>, Codomain = F>,
 {
-    type Derivative = Loc<F, N>;
+    type Derivative = Loc<N, F>;
 
     #[inline]
-    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
-        let xr = x.ref_instance();
-        self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(x)
+    fn differential_impl<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Derivative {
+        x.eval_ref(|xr| {
+            self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(xr)
+        })
     }
 }
 
-impl<A, B, M : Copy, F : Float> Lipschitz<M>
-for SupportProductFirst<A, B>
-where A : Lipschitz<M, FloatType = F> + Bounded<F>,
-      B : Lipschitz<M, FloatType = F> + Bounded<F> {
+impl<A, B, M: Copy, F: Float> Lipschitz<M> for SupportProductFirst<A, B>
+where
+    A: Lipschitz<M, FloatType = F> + Bounded<F>,
+    B: Lipschitz<M, FloatType = F> + Bounded<F>,
+{
     type FloatType = F;
     #[inline]
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
+    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
         // f(x)g(x) - f(y)g(y) = f(x)[g(x)-g(y)] - [f(y)-f(x)]g(y)
         let &SupportProductFirst(ref f, ref g) = self;
-        f.lipschitz_factor(m).map(|l| l * g.bounds().uniform())
-         .zip(g.lipschitz_factor(m).map(|l| l * f.bounds().uniform()))
-         .map(|(a, b)| a + b)
+        Ok(f.lipschitz_factor(m)? * g.bounds().uniform()
+            + g.lipschitz_factor(m)? * f.bounds().uniform())
     }
 }
 
-impl<'a, A, B, M : Copy, Domain, F : Float> Lipschitz<M>
-for Differential<'a, Domain, SupportProductFirst<A, B>>
+impl<'a, A, B, M: Copy, Domain, F: Float> LipschitzDifferentiableImpl<Domain, M>
+    for SupportProductFirst<A, B>
 where
-    Domain : Space,
-    A : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
-    B : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
-    SupportProductFirst<A, B> :  DifferentiableMapping<Domain>,
-    for<'b> A::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>,
-    for<'b> B::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>
+    Domain: Space,
+    Self: DifferentiableImpl<Domain>,
+    A: DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
+    B: DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
+    SupportProductFirst<A, B>: DifferentiableMapping<Domain>,
+    for<'b> A::Differential<'b>: Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType = F>,
+    for<'b> B::Differential<'b>: Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType = F>,
 {
     type FloatType = F;
     #[inline]
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
+    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
         // ∇[gf] = f∇g + g∇f
         // ⟹ ∇[gf](x) - ∇[gf](y) = f(x)∇g(x) + g(x)∇f(x) - f(y)∇g(y) + g(y)∇f(y)
         //                        = f(x)[∇g(x)-∇g(y)] + g(x)∇f(x) - [f(y)-f(x)]∇g(y) + g(y)∇f(y)
         //                        = f(x)[∇g(x)-∇g(y)] + g(x)[∇f(x)-∇f(y)]
         //                          - [f(y)-f(x)]∇g(y) + [g(y)-g(x)]∇f(y)
-        let &SupportProductFirst(ref f, ref g) = self.base_fn();
+        let &SupportProductFirst(ref f, ref g) = self;
         let (df, dg) = (f.diff_ref(), g.diff_ref());
-        [
-            df.lipschitz_factor(m).map(|l| l * g.bounds().uniform()),
-            dg.lipschitz_factor(m).map(|l| l * f.bounds().uniform()),
-            f.lipschitz_factor(m).map(|l| l * dg.norm_bound(L2)),
-            g.lipschitz_factor(m).map(|l| l * df.norm_bound(L2))
-        ].into_iter().sum()
+        Ok([
+            df.lipschitz_factor(m)? * g.bounds().uniform(),
+            dg.lipschitz_factor(m)? * f.bounds().uniform(),
+            f.lipschitz_factor(m)? * dg.norm_bound(L2),
+            g.lipschitz_factor(m)? * df.norm_bound(L2),
+        ]
+        .into_iter()
+        .sum())
     }
 }
 
-
-impl<'a, A, B, F : Float, const N : usize> Support<F, N>
-for SupportProductFirst<A, B>
+impl<'a, A, B, F: Float, const N: usize> Support<N, F> for SupportProductFirst<A, B>
 where
-    A : Support<F, N>,
-    B : Support<F, N>
+    A: Support<N, F>,
+    B: Support<N, F>,
 {
     #[inline]
-    fn support_hint(&self) -> Cube<F, N> {
+    fn support_hint(&self) -> Cube<N, F> {
         self.0.support_hint()
     }
 
     #[inline]
-    fn in_support(&self, x : &Loc<F, N>) -> bool {
+    fn in_support(&self, x: &Loc<N, F>) -> bool {
         self.0.in_support(x)
     }
 
     #[inline]
-    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+    fn bisection_hint(&self, cube: &Cube<N, F>) -> [Option<F>; N] {
         self.0.bisection_hint(cube)
     }
 }
 
-impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
-for SupportProductFirst<A, B>
-where A : GlobalAnalysis<F, Bounds<F>>,
-      B : GlobalAnalysis<F, Bounds<F>> {
+impl<'a, A, B, F: Float> GlobalAnalysis<F, Bounds<F>> for SupportProductFirst<A, B>
+where
+    A: GlobalAnalysis<F, Bounds<F>>,
+    B: GlobalAnalysis<F, Bounds<F>>,
+{
     #[inline]
     fn global_analysis(&self) -> Bounds<F> {
         self.0.global_analysis() * self.1.global_analysis()
     }
 }
 
-impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
-for SupportProductFirst<A, B>
-where A : LocalAnalysis<F, Bounds<F>, N>,
-      B : LocalAnalysis<F, Bounds<F>, N> {
+impl<'a, A, B, F: Float, const N: usize> LocalAnalysis<F, Bounds<F>, N>
+    for SupportProductFirst<A, B>
+where
+    A: LocalAnalysis<F, Bounds<F>, N>,
+    B: LocalAnalysis<F, Bounds<F>, N>,
+{
     #[inline]
-    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+    fn local_analysis(&self, cube: &Cube<N, F>) -> Bounds<F> {
         self.0.local_analysis(cube) * self.1.local_analysis(cube)
     }
 }
@@ -169,125 +156,116 @@
 /// The kernels typically implement [`Support`] and [`Mapping`].
 ///
 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
-#[derive(Copy,Clone,Serialize,Debug)]
+#[derive(Copy, Clone, Serialize, Debug)]
 pub struct SupportSum<A, B>(
     /// First kernel
     pub A,
     /// Second kernel
-    pub B
+    pub B,
 );
 
-impl<'a, A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
-for SupportSum<A, B>
+impl<'a, A, B, F: Float, const N: usize> Mapping<Loc<N, F>> for SupportSum<A, B>
 where
-    A : Mapping<Loc<F, N>, Codomain = F>,
-    B : Mapping<Loc<F, N>, Codomain = F>,
+    A: Mapping<Loc<N, F>, Codomain = F>,
+    B: Mapping<Loc<N, F>, Codomain = F>,
 {
     type Codomain = F;
 
     #[inline]
-    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
-        self.0.apply(x.ref_instance()) + self.1.apply(x)
+    fn apply<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Codomain {
+        x.eval_ref(|r| self.0.apply(r)) + self.1.apply(x)
     }
 }
 
-impl<'a, A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
-for SupportSum<A, B>
+impl<'a, A, B, F: Float, const N: usize> DifferentiableImpl<Loc<N, F>> for SupportSum<A, B>
 where
-    A : DifferentiableMapping<
-        Loc<F, N>,
-        DerivativeDomain = Loc<F, N>
-    >,
-    B : DifferentiableMapping<
-        Loc<F, N>,
-        DerivativeDomain = Loc<F, N>,
-    >
+    A: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>>,
+    B: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>>,
 {
-
-    type Derivative = Loc<F, N>;
+    type Derivative = Loc<N, F>;
 
     #[inline]
-    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
-        self.0.differential(x.ref_instance()) + self.1.differential(x)
+    fn differential_impl<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Derivative {
+        x.eval_ref(|r| self.0.differential(r)) + self.1.differential(x)
     }
 }
 
-
-impl<'a, A, B, F : Float, const N : usize> Support<F, N>
-for SupportSum<A, B>
-where A : Support<F, N>,
-      B : Support<F, N>,
-      Cube<F, N> : SetOrd {
-
+impl<'a, A, B, F: Float, const N: usize> Support<N, F> for SupportSum<A, B>
+where
+    A: Support<N, F>,
+    B: Support<N, F>,
+    Cube<N, F>: SetOrd,
+{
     #[inline]
-    fn support_hint(&self) -> Cube<F, N> {
+    fn support_hint(&self) -> Cube<N, F> {
         self.0.support_hint().common(&self.1.support_hint())
     }
 
     #[inline]
-    fn in_support(&self, x : &Loc<F, N>) -> bool {
+    fn in_support(&self, x: &Loc<N, F>) -> bool {
         self.0.in_support(x) || self.1.in_support(x)
     }
 
     #[inline]
-    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
-        map2(self.0.bisection_hint(cube),
-             self.1.bisection_hint(cube),
-             |a, b| a.or(b))
+    fn bisection_hint(&self, cube: &Cube<N, F>) -> [Option<F>; N] {
+        map2(
+            self.0.bisection_hint(cube),
+            self.1.bisection_hint(cube),
+            |a, b| a.or(b),
+        )
     }
 }
 
-impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
-for SupportSum<A, B>
-where A : GlobalAnalysis<F, Bounds<F>>,
-      B : GlobalAnalysis<F, Bounds<F>> {
+impl<'a, A, B, F: Float> GlobalAnalysis<F, Bounds<F>> for SupportSum<A, B>
+where
+    A: GlobalAnalysis<F, Bounds<F>>,
+    B: GlobalAnalysis<F, Bounds<F>>,
+{
     #[inline]
     fn global_analysis(&self) -> Bounds<F> {
         self.0.global_analysis() + self.1.global_analysis()
     }
 }
 
-impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
-for SupportSum<A, B>
-where A : LocalAnalysis<F, Bounds<F>, N>,
-      B : LocalAnalysis<F, Bounds<F>, N>,
-      Cube<F, N> : SetOrd {
+impl<'a, A, B, F: Float, const N: usize> LocalAnalysis<F, Bounds<F>, N> for SupportSum<A, B>
+where
+    A: LocalAnalysis<F, Bounds<F>, N>,
+    B: LocalAnalysis<F, Bounds<F>, N>,
+    Cube<N, F>: SetOrd,
+{
     #[inline]
-    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+    fn local_analysis(&self, cube: &Cube<N, F>) -> Bounds<F> {
         self.0.local_analysis(cube) + self.1.local_analysis(cube)
     }
 }
 
-impl<F : Float, M : Copy, A, B> Lipschitz<M> for SupportSum<A, B>
-where A : Lipschitz<M, FloatType = F>,
-      B : Lipschitz<M, FloatType = F> {
+impl<F: Float, M: Copy, A, B> Lipschitz<M> for SupportSum<A, B>
+where
+    A: Lipschitz<M, FloatType = F>,
+    B: Lipschitz<M, FloatType = F>,
+{
     type FloatType = F;
 
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
-        match (self.0.lipschitz_factor(m), self.1.lipschitz_factor(m)) {
-            (Some(l0), Some(l1)) => Some(l0 + l1),
-            _ => None
-        }
+    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
+        Ok(self.0.lipschitz_factor(m)? * self.1.lipschitz_factor(m)?)
     }
 }
 
-impl<'b, F : Float, M : Copy, A, B, Domain> Lipschitz<M>
-for Differential<'b, Domain, SupportSum<A, B>>
+impl<'b, F: Float, M: Copy, A, B, Domain> LipschitzDifferentiableImpl<Domain, M>
+    for SupportSum<A, B>
 where
-    Domain : Space,
-    A : Clone + DifferentiableMapping<Domain, Codomain=F>,
-    B : Clone + DifferentiableMapping<Domain, Codomain=F>,
-    SupportSum<A, B> : DifferentiableMapping<Domain, Codomain=F>,
-    for<'a> A :: Differential<'a> : Lipschitz<M, FloatType = F>,
-    for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
+    Domain: Space,
+    Self: DifferentiableImpl<Domain>,
+    A: DifferentiableMapping<Domain, Codomain = F>,
+    B: DifferentiableMapping<Domain, Codomain = F>,
+    SupportSum<A, B>: DifferentiableMapping<Domain, Codomain = F>,
+    for<'a> A::Differential<'a>: Lipschitz<M, FloatType = F>,
+    for<'a> B::Differential<'a>: Lipschitz<M, FloatType = F>,
 {
     type FloatType = F;
 
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
-        let base = self.base_fn();
-        base.0.diff_ref().lipschitz_factor(m)
-            .zip(base.1.diff_ref().lipschitz_factor(m))
-            .map(|(a, b)| a + b)
+    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
+        Ok(self.0.diff_ref().lipschitz_factor(m)? + self.1.diff_ref().lipschitz_factor(m)?)
     }
 }
 
@@ -296,48 +274,52 @@
 /// The kernels typically implement [`Support`]s and [`Mapping`].
 //
 /// Trait implementations have to be on a case-by-case basis.
-#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)]
 pub struct Convolution<A, B>(
     /// First kernel
     pub A,
     /// Second kernel
-    pub B
+    pub B,
 );
 
-impl<F : Float, M, A, B> Lipschitz<M> for Convolution<A, B>
-where A : Norm<F, L1> ,
-      B : Lipschitz<M, FloatType = F> {
+impl<F: Float, M, A, B> Lipschitz<M> for Convolution<A, B>
+where
+    A: Norm<L1, F>,
+    B: Lipschitz<M, FloatType = F>,
+{
     type FloatType = F;
 
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
+    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
         // For [f * g](x) = ∫ f(x-y)g(y) dy we have
         // [f * g](x) - [f * g](z) = ∫ [f(x-y)-f(z-y)]g(y) dy.
         // Hence |[f * g](x) - [f * g](z)| ≤ ∫ |f(x-y)-f(z-y)|g(y)| dy.
         //                                 ≤ L|x-z| ∫ |g(y)| dy,
         // where L is the Lipschitz factor of f.
-        self.1.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
+        Ok(self.1.lipschitz_factor(m)? * self.0.norm(L1))
     }
 }
 
-impl<'b, F : Float, M, A, B, Domain> Lipschitz<M>
-for Differential<'b, Domain, Convolution<A, B>>
+impl<'b, F: Float, M, A, B, Domain> LipschitzDifferentiableImpl<Domain, M> for Convolution<A, B>
 where
-    Domain : Space,
-    A : Clone + Norm<F, L1> ,
-    Convolution<A, B> : DifferentiableMapping<Domain, Codomain=F>,
-    B : Clone + DifferentiableMapping<Domain, Codomain=F>,
-    for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
+    Domain: Space,
+    Self: DifferentiableImpl<Domain>,
+    A: Norm<L1, F>,
+    Convolution<A, B>: DifferentiableMapping<Domain, Codomain = F>,
+    B: DifferentiableMapping<Domain, Codomain = F>,
+    for<'a> B::Differential<'a>: Lipschitz<M, FloatType = F>,
 {
     type FloatType = F;
 
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
+    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
         // For [f * g](x) = ∫ f(x-y)g(y) dy we have
         // ∇[f * g](x) - ∇[f * g](z) = ∫ [∇f(x-y)-∇f(z-y)]g(y) dy.
         // Hence |∇[f * g](x) - ∇[f * g](z)| ≤ ∫ |∇f(x-y)-∇f(z-y)|g(y)| dy.
         //                                 ≤ L|x-z| ∫ |g(y)| dy,
         // where L is the Lipschitz factor of ∇f.
-        let base = self.base_fn();
-        base.1.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
+        self.1
+            .diff_ref()
+            .lipschitz_factor(m)
+            .map(|l| l * self.0.norm(L1))
     }
 }
 
@@ -346,78 +328,76 @@
 /// The kernel typically implements [`Support`] and [`Mapping`].
 ///
 /// Trait implementations have to be on a case-by-case basis.
-#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)]
 pub struct AutoConvolution<A>(
     /// The kernel to be autoconvolved
-    pub A
+    pub A,
 );
 
-impl<F : Float, M, C> Lipschitz<M> for AutoConvolution<C>
-where C : Lipschitz<M, FloatType = F> + Norm<F, L1> {
+impl<F: Float, M, C> Lipschitz<M> for AutoConvolution<C>
+where
+    C: Lipschitz<M, FloatType = F> + Norm<L1, F>,
+{
     type FloatType = F;
 
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
+    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
         self.0.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
     }
 }
 
-impl<'b, F : Float, M, C, Domain> Lipschitz<M>
-for Differential<'b, Domain, AutoConvolution<C>>
+impl<'b, F: Float, M, C, Domain> LipschitzDifferentiableImpl<Domain, M> for AutoConvolution<C>
 where
-    Domain : Space,
-    C : Clone + Norm<F, L1> + DifferentiableMapping<Domain, Codomain=F>,
-    AutoConvolution<C> : DifferentiableMapping<Domain, Codomain=F>,
-    for<'a> C :: Differential<'a> : Lipschitz<M, FloatType = F>
+    Domain: Space,
+    Self: DifferentiableImpl<Domain>,
+    C: Norm<L1, F> + DifferentiableMapping<Domain, Codomain = F>,
+    AutoConvolution<C>: DifferentiableMapping<Domain, Codomain = F>,
+    for<'a> C::Differential<'a>: Lipschitz<M, FloatType = F>,
 {
     type FloatType = F;
 
-    fn lipschitz_factor(&self, m : M) -> Option<F> {
-        let base = self.base_fn();
-        base.0.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
+    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
+        self.0
+            .diff_ref()
+            .lipschitz_factor(m)
+            .map(|l| l * self.0.norm(L1))
     }
 }
 
-
 /// Representation a multi-dimensional product of a one-dimensional kernel.
 ///
 /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
 /// The kernel $G$ typically implements [`Support`] and [`Mapping`]
-/// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`].
-#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+/// on [`Loc<1, F>`]. Then the product implements them on [`Loc<N, F>`].
+#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)]
 #[allow(dead_code)]
-struct UniformProduct<G, const N : usize>(
+struct UniformProduct<G, const N: usize>(
     /// The one-dimensional kernel
-    G
+    G,
 );
 
-impl<'a, G, F : Float, const N : usize> Mapping<Loc<F, N>>
-for UniformProduct<G, N>
+impl<'a, G, F: Float, const N: usize> Mapping<Loc<N, F>> for UniformProduct<G, N>
 where
-    G : Mapping<Loc<F, 1>, Codomain = F>
+    G: Mapping<Loc<1, F>, Codomain = F>,
 {
     type Codomain = F;
 
     #[inline]
-    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> F {
-        x.cow().iter().map(|&y| self.0.apply(Loc([y]))).product()
+    fn apply<I: Instance<Loc<N, F>>>(&self, x: I) -> F {
+        x.decompose()
+            .iter()
+            .map(|&y| self.0.apply(Loc([y])))
+            .product()
     }
 }
 
-
-
-impl<'a, G, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
-for UniformProduct<G, N>
+impl<'a, G, F: Float, const N: usize> DifferentiableImpl<Loc<N, F>> for UniformProduct<G, N>
 where
-    G : DifferentiableMapping<
-        Loc<F, 1>,
-        DerivativeDomain = F,
-        Codomain = F,
-    >
+    G: DifferentiableMapping<Loc<1, F>, DerivativeDomain = F, Codomain = F>,
 {
-    type Derivative = Loc<F, N>;
+    type Derivative = Loc<N, F>;
 
     #[inline]
-    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x0 : I) -> Loc<F, N> {
+    fn differential_impl<I: Instance<Loc<N, F>>>(&self, x0: I) -> Loc<N, F> {
         x0.eval(|x| {
             let vs = x.map(|y| self.0.apply(Loc([y])));
             product_differential(x, &vs, |y| self.0.differential(Loc([y])))
@@ -430,17 +410,19 @@
 /// The vector `x` is the location, `vs` consists of the values `g(x_i)`, and
 /// `gd` calculates the derivative `g'`.
 #[inline]
-pub(crate) fn product_differential<F : Float, G : Fn(F) -> F, const N : usize>(
-    x : &Loc<F, N>,
-    vs : &Loc<F, N>,
-    gd : G
-) -> Loc<F, N> {
+pub(crate) fn product_differential<F: Float, G: Fn(F) -> F, const N: usize>(
+    x: &Loc<N, F>,
+    vs: &Loc<N, F>,
+    gd: G,
+) -> Loc<N, F> {
     map1_indexed(x, |i, &y| {
-        gd(y) * vs.iter()
-                  .zip(0..)
-                  .filter_map(|(v, j)| (j != i).then_some(*v))
-                  .product()
-    }).into()
+        gd(y)
+            * vs.iter()
+                .zip(0..)
+                .filter_map(|(v, j)| (j != i).then_some(*v))
+                .product()
+    })
+    .into()
 }
 
 /// Helper function to calulate the Lipschitz factor of $∇f$ for $f(x)=∏_{i=1}^N g(x_i)$.
@@ -448,12 +430,12 @@
 /// The parameter `bound` is a bound on $|g|_∞$, `lip` is a Lipschitz factor for $g$,
 /// `dbound` is a bound on $|∇g|_∞$, and `dlip` a Lipschitz factor for $∇g$.
 #[inline]
-pub(crate) fn product_differential_lipschitz_factor<F : Float, const N : usize>(
-    bound : F,
-    lip : F,
-    dbound : F,
-    dlip : F
-) -> F {
+pub(crate) fn product_differential_lipschitz_factor<F: Float, const N: usize>(
+    bound: F,
+    lip: F,
+    dbound: F,
+    dlip: F,
+) -> DynResult<F> {
     // For arbitrary ψ(x) = ∏_{i=1}^n ψ_i(x_i), we have
     // ψ(x) - ψ(y) = ∑_i [ψ_i(x_i)-ψ_i(y_i)] ∏_{j ≠ i} ψ_j(x_j)
     // by a simple recursive argument. In particular, if ψ_i=g for all i, j, we have
@@ -470,31 +452,33 @@
     //                 = n [L_{∇g} M_g^{n-1} + (n-1) L_g M_g^{n-2} M_{∇g}].
     //                 = n M_g^{n-2}[L_{∇g} M_g + (n-1) L_g M_{∇g}].
     if N >= 2 {
-        F::cast_from(N) * bound.powi((N-2) as i32)
-                        * (dlip * bound  + F::cast_from(N-1) * lip * dbound)
-    } else if N==1 {
-        dlip
+        Ok(F::cast_from(N)
+            * bound.powi((N - 2) as i32)
+            * (dlip * bound + F::cast_from(N - 1) * lip * dbound))
+    } else if N == 1 {
+        Ok(dlip)
     } else {
-        panic!("Invalid dimension")
+        Err(anyhow!("Invalid dimension"))
     }
 }
 
-impl<G, F : Float, const N : usize> Support<F, N>
-for UniformProduct<G, N>
-where G : Support<F, 1> {
+impl<G, F: Float, const N: usize> Support<N, F> for UniformProduct<G, N>
+where
+    G: Support<1, F>,
+{
     #[inline]
-    fn support_hint(&self) -> Cube<F, N> {
-        let [a] : [[F; 2]; 1] = self.0.support_hint().into();
+    fn support_hint(&self) -> Cube<N, F> {
+        let [a]: [[F; 2]; 1] = self.0.support_hint().into();
         array_init(|| a.clone()).into()
     }
 
     #[inline]
-    fn in_support(&self, x : &Loc<F, N>) -> bool {
+    fn in_support(&self, x: &Loc<N, F>) -> bool {
         x.iter().all(|&y| self.0.in_support(&Loc([y])))
     }
 
     #[inline]
-    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+    fn bisection_hint(&self, cube: &Cube<N, F>) -> [Option<F>; N] {
         cube.map(|a, b| {
             let [h] = self.0.bisection_hint(&[[a, b]].into());
             h
@@ -502,9 +486,10 @@
     }
 }
 
-impl<G, F : Float, const N : usize> GlobalAnalysis<F, Bounds<F>>
-for UniformProduct<G, N>
-where G : GlobalAnalysis<F, Bounds<F>> {
+impl<G, F: Float, const N: usize> GlobalAnalysis<F, Bounds<F>> for UniformProduct<G, N>
+where
+    G: GlobalAnalysis<F, Bounds<F>>,
+{
     #[inline]
     fn global_analysis(&self) -> Bounds<F> {
         let g = self.0.global_analysis();
@@ -512,88 +497,91 @@
     }
 }
 
-impl<G, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
-for UniformProduct<G, N>
-where G : LocalAnalysis<F, Bounds<F>, 1> {
+impl<G, F: Float, const N: usize> LocalAnalysis<F, Bounds<F>, N> for UniformProduct<G, N>
+where
+    G: LocalAnalysis<F, Bounds<F>, 1>,
+{
     #[inline]
-    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
-        cube.iter_coords().map(
-            |&[a, b]| self.0.local_analysis(&([[a, b]].into()))
-        ).product()
+    fn local_analysis(&self, cube: &Cube<N, F>) -> Bounds<F> {
+        cube.iter_coords()
+            .map(|&[a, b]| self.0.local_analysis(&([[a, b]].into())))
+            .product()
     }
 }
 
 macro_rules! product_lpnorm {
     ($lp:ident) => {
-        impl<G, F : Float, const N : usize> Norm<F, $lp>
-        for UniformProduct<G, N>
-        where G : Norm<F, $lp> {
+        impl<G, F: Float, const N: usize> Norm<$lp, F> for UniformProduct<G, N>
+        where
+            G: Norm<$lp, F>,
+        {
             #[inline]
-            fn norm(&self, lp : $lp) -> F {
+            fn norm(&self, lp: $lp) -> F {
                 self.0.norm(lp).powi(N as i32)
             }
         }
-    }
+    };
 }
 
 product_lpnorm!(L1);
 product_lpnorm!(L2);
 product_lpnorm!(Linfinity);
 
-
 /// Trait for bounding one kernel with respect to another.
 ///
 /// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared.
-pub trait BoundedBy<F : Num, T> {
+pub trait BoundedBy<F: Num, T> {
     /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for
     /// $v$ `self` and $u$ `other`.
     ///
     /// If no such factors exits, `None` is returned.
-    fn bounding_factor(&self, other : &T) -> Option<F>;
+    fn bounding_factor(&self, other: &T) -> DynResult<F>;
 }
 
 /// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$.
 #[replace_float_literals(F::cast_from(literal))]
-impl<F, C, BaseP>
-BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>>
-for AutoConvolution<SupportProductFirst<C, BaseP>>
-where F : Float,
-      C : Clone + PartialEq,
-      BaseP :  Fourier<F> + PartialOrd, // TODO: replace by BoundedBy,
-      <BaseP as Fourier<F>>::Transformed : Bounded<F> + Norm<F, L1> {
-
-    fn bounding_factor(&self, kernel : &SupportProductFirst<AutoConvolution<C>, BaseP>) -> Option<F> {
+impl<F, C, BaseP> BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>>
+    for AutoConvolution<SupportProductFirst<C, BaseP>>
+where
+    F: Float,
+    C: PartialEq,
+    BaseP: Fourier<F> + PartialOrd, // TODO: replace by BoundedBy,
+    <BaseP as Fourier<F>>::Transformed: Bounded<F> + Norm<L1, F>,
+{
+    fn bounding_factor(
+        &self,
+        kernel: &SupportProductFirst<AutoConvolution<C>, BaseP>,
+    ) -> DynResult<F> {
         let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel;
         let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self;
         let v̂ = base_spread.fourier();
 
         // Verify that the cut-off and ideal physical model (base spread) are the same.
-        if cutoff == cutoff2
-           && base_spread <= base_spread2
-           && v̂.bounds().lower() >= 0.0 {
+        if cutoff == cutoff2 && base_spread <= base_spread2 && v̂.bounds().lower() >= 0.0 {
             // Calculate the factor between the convolution approximation
             // `AutoConvolution<SupportProductFirst<C, BaseP>>` of $A_*A$ and the
             // kernel of the seminorm. This depends on the physical model P being
             // `SupportProductFirst<C, BaseP>` with the kernel `K` being
             // a `SupportSum` involving `SupportProductFirst<AutoConvolution<C>, BaseP>`.
-            Some(v̂.norm(L1))
+            Ok(v̂.norm(L1))
         } else {
             // We cannot compare
-            None
+            Err(anyhow!("Incomprable kernels"))
         }
     }
 }
 
-impl<F : Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A
-where A : BoundedBy<F, B>,
-      C : Bounded<F> {
-
+impl<F: Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A
+where
+    A: BoundedBy<F, B>,
+    C: Bounded<F>,
+{
     #[replace_float_literals(F::cast_from(literal))]
-    fn bounding_factor(&self, SupportSum(ref kernel1, kernel2) : &SupportSum<B, C>) -> Option<F> {
+    fn bounding_factor(&self, SupportSum(ref kernel1, kernel2): &SupportSum<B, C>) -> DynResult<F> {
         if kernel2.bounds().lower() >= 0.0 {
             self.bounding_factor(kernel1)
         } else {
-            None
+            Err(anyhow!("Component kernel not lower-bounded by zero"))
         }
     }
 }
@@ -603,7 +591,7 @@
 /// It will attempt to place the subdivision point at $-r$ or $r$.
 /// If neither of these points lies within $[a, b]$, `None` is returned.
 #[inline]
-pub(super) fn symmetric_interval_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
+pub(super) fn symmetric_interval_hint<F: Float>(r: F, a: F, b: F) -> Option<F> {
     if a < -r && -r < b {
         Some(-r)
     } else if a < r && r < b {
@@ -622,7 +610,7 @@
 /// returned.
 #[replace_float_literals(F::cast_from(literal))]
 #[inline]
-pub(super) fn symmetric_peak_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
+pub(super) fn symmetric_peak_hint<F: Float>(r: F, a: F, b: F) -> Option<F> {
     let stage1 = if a < -r {
         if b <= -r {
             None
@@ -648,7 +636,7 @@
     // Ignore stage1 hint if either side of subdivision would be just a small fraction of the
     // interval
     match stage1 {
-        Some(h) if (h - a).min(b-h) >= 0.3 * r => Some(h),
-        _ => None
+        Some(h) if (h - a).min(b - h) >= 0.3 * r => Some(h),
+        _ => None,
     }
 }

mercurial