src/kernels/base.rs

changeset 0
eb3c7813b67a
child 3
0778a71cbb6a
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/base.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,404 @@
+
+//! Things for constructing new kernels from component kernels and traits for analysing them
+use serde::Serialize;
+use numeric_literals::replace_float_literals;
+
+use alg_tools::types::*;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+    Bounded,
+};
+use alg_tools::mapping::Apply;
+use alg_tools::maputil::{array_init, map2};
+use alg_tools::sets::SetOrd;
+
+use crate::fourier::Fourier;
+
+/// Representation of the product of two kernels.
+///
+/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+///
+/// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
+#[derive(Copy,Clone,Serialize,Debug)]
+pub struct SupportProductFirst<A, B>(
+    /// First kernel
+    pub A,
+    /// Second kernel
+    pub B
+);
+
+impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
+for SupportProductFirst<A, B>
+where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
+      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : Loc<F, N>) -> Self::Output {
+        self.0.apply(&x) * self.1.apply(&x)
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+for SupportProductFirst<A, B>
+where A : Apply<&'a Loc<F, N>, Output=F>,
+      B : Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
+        self.0.apply(x) * self.1.apply(x)
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> Support<F, N>
+for SupportProductFirst<A, B>
+where A : Support<F, N>,
+      B : Support<F, N> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        self.0.support_hint()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F, N>) -> bool {
+        self.0.in_support(x)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        self.0.bisection_hint(cube)
+    }
+}
+
+impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
+for SupportProductFirst<A, B>
+where A : GlobalAnalysis<F, Bounds<F>>,
+      B : GlobalAnalysis<F, Bounds<F>> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        self.0.global_analysis() * self.1.global_analysis()
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for SupportProductFirst<A, B>
+where A : LocalAnalysis<F, Bounds<F>, N>,
+      B : LocalAnalysis<F, Bounds<F>, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        self.0.local_analysis(cube) * self.1.local_analysis(cube)
+    }
+}
+
+/// Representation of the sum of two kernels
+///
+/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+///
+/// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
+#[derive(Copy,Clone,Serialize,Debug)]
+pub struct SupportSum<A, B>(
+    /// First kernel
+    pub A,
+    /// Second kernel
+    pub B
+);
+
+impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+for SupportSum<A, B>
+where A : Apply<&'a Loc<F, N>, Output=F>,
+      B : Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
+        self.0.apply(x) + self.1.apply(x)
+    }
+}
+
+impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
+for SupportSum<A, B>
+where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
+      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : Loc<F, N>) -> Self::Output {
+        self.0.apply(&x) + self.1.apply(&x)
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> Support<F, N>
+for SupportSum<A, B>
+where A : Support<F, N>,
+      B : Support<F, N>,
+      Cube<F, N> : SetOrd {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        self.0.support_hint().common(&self.1.support_hint())
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F, N>) -> bool {
+        self.0.in_support(x) || self.1.in_support(x)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        map2(self.0.bisection_hint(cube),
+             self.1.bisection_hint(cube),
+             |a, b| a.or(b))
+    }
+}
+
+impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
+for SupportSum<A, B>
+where A : GlobalAnalysis<F, Bounds<F>>,
+      B : GlobalAnalysis<F, Bounds<F>> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        self.0.global_analysis() + self.1.global_analysis()
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for SupportSum<A, B>
+where A : LocalAnalysis<F, Bounds<F>, N>,
+      B : LocalAnalysis<F, Bounds<F>, N>,
+      Cube<F, N> : SetOrd {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        self.0.local_analysis(cube) + self.1.local_analysis(cube)
+    }
+}
+
+/// Representation of the convolution of two kernels.
+///
+/// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping].
+//
+/// Trait implementations have to be on a case-by-case basis.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct Convolution<A, B>(
+    /// First kernel
+    pub A,
+    /// Second kernel
+    pub B
+);
+
+/// Representation of the autoconvolution of a kernel.
+///
+/// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+///
+/// Trait implementations have to be on a case-by-case basis.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct AutoConvolution<A>(
+    /// The kernel to be autoconvolved
+    pub A
+);
+
+/// Representation a multi-dimensional product of a one-dimensional kernel.
+///
+/// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
+/// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]
+/// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`].
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+struct UniformProduct<G, const N : usize>(
+    /// The one-dimensional kernel
+    G
+);
+
+impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+for UniformProduct<G, N>
+where G : Apply<Loc<F, 1>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : &'a Loc<F, N>) -> F {
+        x.iter().map(|&y| self.0.apply(Loc([y]))).product()
+    }
+}
+
+impl<G, F : Float, const N : usize> Apply<Loc<F, N>>
+for UniformProduct<G, N>
+where G : Apply<Loc<F, 1>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : Loc<F, N>) -> F {
+        x.into_iter().map(|y| self.0.apply(Loc([y]))).product()
+    }
+}
+
+impl<G, F : Float, const N : usize> Support<F, N>
+for UniformProduct<G, N>
+where G : Support<F, 1> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        let [a] : [[F; 2]; 1] = self.0.support_hint().into();
+        array_init(|| a.clone()).into()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F, N>) -> bool {
+        x.iter().all(|&y| self.0.in_support(&Loc([y])))
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        cube.map(|a, b| {
+            let [h] = self.0.bisection_hint(&[[a, b]].into());
+            h
+        })
+    }
+}
+
+impl<G, F : Float, const N : usize> GlobalAnalysis<F, Bounds<F>>
+for UniformProduct<G, N>
+where G : GlobalAnalysis<F, Bounds<F>> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        let g = self.0.global_analysis();
+        (0..N).map(|_| g).product()
+    }
+}
+
+impl<G, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for UniformProduct<G, N>
+where G : LocalAnalysis<F, Bounds<F>, 1> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        cube.iter_coords().map(
+            |&[a, b]| self.0.local_analysis(&([[a, b]].into()))
+        ).product()
+    }
+}
+
+macro_rules! product_lpnorm {
+    ($lp:ident) => {
+        impl<G, F : Float, const N : usize> Norm<F, $lp>
+        for UniformProduct<G, N>
+        where G : Norm<F, $lp> {
+            #[inline]
+            fn norm(&self, lp : $lp) -> F {
+                self.0.norm(lp).powi(N as i32)
+            }
+        }
+    }
+}
+
+product_lpnorm!(L1);
+product_lpnorm!(L2);
+product_lpnorm!(Linfinity);
+
+
+/// Trait for bounding one kernel with respect to another.
+///
+/// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared.
+pub trait BoundedBy<F : Num, T> {
+    /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for
+    /// $v$ `self` and $u$ `other`.
+    ///
+    /// If no such factors exits, `None` is returned.
+    fn bounding_factor(&self, other : &T) -> Option<F>;
+}
+
+/// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$.
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, C, BaseP>
+BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>>
+for AutoConvolution<SupportProductFirst<C, BaseP>>
+where F : Float,
+      C : Clone + PartialEq,
+      BaseP :  Fourier<F> + PartialOrd, // TODO: replace by BoundedBy,
+      <BaseP as Fourier<F>>::Transformed : Bounded<F> + Norm<F, L1> {
+
+    fn bounding_factor(&self, kernel : &SupportProductFirst<AutoConvolution<C>, BaseP>) -> Option<F> {
+        let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel;
+        let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self;
+        let v̂ = base_spread.fourier();
+
+        // Verify that the cut-off and ideal physical model (base spread) are the same.
+        if cutoff == cutoff2
+           && base_spread <= base_spread2
+           && v̂.bounds().lower() >= 0.0 {
+            // Calculate the factor between the convolution approximation
+            // `AutoConvolution<SupportProductFirst<C, BaseP>>` of $A_*A$ and the
+            // kernel of the seminorm. This depends on the physical model P being
+            // `SupportProductFirst<C, BaseP>` with the kernel `K` being
+            // a `SupportSum` involving `SupportProductFirst<AutoConvolution<C>, BaseP>`.
+            Some(v̂.norm(L1))
+        } else {
+            // We cannot compare
+            None
+        }
+    }
+}
+
+impl<F : Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A
+where A : BoundedBy<F, B>,
+      C : Bounded<F> {
+
+    #[replace_float_literals(F::cast_from(literal))]
+    fn bounding_factor(&self, SupportSum(ref kernel1, kernel2) : &SupportSum<B, C>) -> Option<F> {
+        if kernel2.bounds().lower() >= 0.0 {
+            self.bounding_factor(kernel1)
+        } else {
+            None
+        }
+    }
+}
+
+/// Generates on $[a, b]$ [`Support::support_hint`] for a symmetric interval $[-r, r]$.
+///
+/// It will attempt to place the subdivision point at $-r$ or $r$.
+/// If neither of these points lies within $[a, b]$, `None` is returned.
+#[inline]
+pub(super) fn symmetric_interval_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
+    if a < -r && -r < b {
+        Some(-r)
+    } else if a < r && r < b {
+        Some(r)
+    } else {
+        None
+    }
+}
+
+/// Generates on $[a, b]$ [`Support::support_hint`] for a function with monotone derivative,
+/// support on $[-r, r]$ and peak at $0.
+///
+/// It will attempt to place the subdivision point at $-r$, $r$, or $0$, depending on which
+/// gives the longer length for the shorter of the two subintervals. If none of these points
+/// lies within $[a, b]$, or the resulting interval would be shorter than $0.3r$, `None` is
+/// returned.
+#[replace_float_literals(F::cast_from(literal))]
+#[inline]
+pub(super) fn symmetric_peak_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
+    let stage1 = if a < -r {
+        if b <= -r {
+            None
+        } else if a + r < -b {
+            Some(-r)
+        } else {
+            Some(0.0)
+        }
+    } else if a < 0.0 {
+        if b <= 0.0 {
+            None
+        } else if a < r - b {
+            Some(0.0)
+        } else {
+            Some(r)
+        }
+    } else if a < r && b > r {
+        Some(r)
+    } else {
+        None
+    };
+
+    // Ignore stage1 hint if either side of subdivision would be just a small fraction of the
+    // interval
+    match stage1 {
+        Some(h) if (h - a).min(b-h) >= 0.3 * r => Some(h),
+        _ => None
+    }
+}

mercurial