src/kernels/hat_convolution.rs

branch
dev
changeset 32
56c8adc32b09
parent 0
eb3c7813b67a
child 34
efa60bc4f743
--- a/src/kernels/hat_convolution.rs	Fri Apr 28 13:15:19 2023 +0300
+++ b/src/kernels/hat_convolution.rs	Tue Dec 31 09:34:24 2024 -0500
@@ -14,9 +14,10 @@
     GlobalAnalysis,
     Bounded,
 };
-use alg_tools::mapping::Apply;
+use alg_tools::mapping::{Apply, Differentiable};
 use alg_tools::maputil::array_init;
 
+use crate::types::Lipschitz;
 use super::base::*;
 use super::ball_indicator::CubeIndicator;
 
@@ -81,6 +82,59 @@
     }
 }
 
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<S, const N : usize> Lipschitz<L1> for HatConv<S, N>
+where S : Constant {
+    type FloatType = S::Type;
+    #[inline]
+    fn lipschitz_factor(&self, L1 : L1) -> Option<Self::FloatType> {
+        // For any ψ_i, we have
+        // ∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)
+        // = [ψ_1(x_1)-ψ_1(y_1)] ∏_{i=2}^N ψ_i(x_i)
+        //   + ψ_1(y_1)[ ∏_{i=2}^N ψ_i(x_i) - ∏_{i=2}^N ψ_i(y_i)]
+        // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i)
+        // Thus
+        // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)|
+        // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_i |ψ_i|
+        let σ = self.radius();
+        Some((self.lipschitz_1d_σ1() / (σ*σ)) * (self.value_1d_σ1(0.0) / σ))
+    }
+}
+
+impl<S, const N : usize> Lipschitz<L2> for HatConv<S, N>
+where S : Constant {
+    type FloatType = S::Type;
+    #[inline]
+    fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> {
+        self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt())
+    }
+}
+
+
+impl<'a, S, const N : usize> Differentiable<&'a Loc<S::Type, N>> for HatConv<S, N>
+where S : Constant {
+    type Output = Loc<S::Type, N>;
+    #[inline]
+    fn differential(&self, y : &'a Loc<S::Type, N>) -> Self::Output {
+        let σ = self.radius();
+        let σ2 = σ * σ;
+        let vs = y.map(|x| {
+            self.value_1d_σ1(x  / σ) / σ
+        });
+        product_differential(y, &vs, |x| {
+            self.diff_1d_σ1(x  / σ) / σ2
+        })
+    }
+}
+
+impl<'a, S, const N : usize> Differentiable<Loc<S::Type, N>> for HatConv<S, N>
+where S : Constant {
+    type Output = Loc<S::Type, N>;
+    #[inline]
+    fn differential(&self, y : Loc<S::Type, N>) -> Self::Output {
+        self.differential(&y)
+    }
+}
 
 #[replace_float_literals(S::Type::cast_from(literal))]
 impl<'a, F : Float, S, const N : usize> HatConv<S, N>
@@ -97,6 +151,26 @@
             (4.0/3.0) + 8.0 * y * y * (y - 1.0)
         }
     }
+
+    /// Computes the differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn diff_1d_σ1(&self, x : F) -> F {
+        let y = x.abs();
+        if y >= 1.0 {
+            0.0
+        } else if y > 0.5 {
+            - 8.0 * (y - 1.0).powi(2)
+        } else /* 0 ≤ y ≤ 0.5 */ {
+            (24.0 * y - 16.0) * y
+        }
+    }
+
+    /// Computes the Lipschitz factor of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn lipschitz_1d_σ1(&self) -> F {
+        // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis
+        2.0
+    }
 }
 
 impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N>
@@ -201,11 +275,77 @@
     }
 }
 
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, const N : usize> Differentiable<&'a Loc<F, N>>
+for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+
+    type Output = Loc<F, N>;
+
+    #[inline]
+    fn differential(&self, y : &'a Loc<F, N>) -> Loc<F, N> {
+        let Convolution(ref ind, ref hatconv) = self;
+        let β = ind.r.value();
+        let σ = hatconv.radius();
+        let σ2 = σ * σ;
+
+        let vs = y.map(|x| {
+            self.value_1d_σ1(x / σ, β / σ)
+        });
+        product_differential(y, &vs, |x| {
+            self.diff_1d_σ1(x  / σ, β / σ) / σ2
+        })
+    }
+}
+
+impl<'a, F : Float, R, C, const N : usize> Differentiable<Loc<F, N>>
+for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+
+    type Output = Loc<F, N>;
+
+    #[inline]
+    fn differential(&self, y : Loc<F, N>) -> Loc<F, N> {
+        self.differential(&y)
+    }
+}
+
+/// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
+/// If $b > d$, add $g()$ to the result.
+#[inline]
+#[replace_float_literals(F::cast_from(literal))]
+fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F,
+                g : impl Fn() -> F) -> F {
+    if b < c {
+        0.0
+    } else if b <= d {
+        if a <= c {
+            f(b) - f(c)
+        } else {
+            f(b) - f(a)
+        }
+    } else /* b > d */ {
+        g() + if a <= c {
+            f(d) - f(c)
+        } else if a < d {
+            f(d) - f(a)
+        } else {
+            0.0
+        }
+    }
+}
 
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F> {
+      
+    /// Calculates the value of the 1D hat convolution further convolved by a interval indicator.
+    /// As both functions are piecewise polynomials, this is implemented by explicit integral over
+    /// all subintervals of polynomiality of the cube indicator, using easily formed
+    /// antiderivatives.
     #[inline]
     pub fn value_1d_σ1(&self, x : F, β : F) -> F {
         // The integration interval
@@ -218,34 +358,10 @@
             y * y
         }
         
-        /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
-        /// If $b > d$, add $g()$ to the result.
-        #[inline]
-        fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F,
-                       g : impl Fn() -> F) -> F {
-            if b < c {
-                0.0
-            } else if b <= d {
-                if a <= c {
-                    f(b) - f(c)
-                } else {
-                    f(b) - f(a)
-                }
-            } else /* b > d */ {
-                g() + if a <= c {
-                    f(d) - f(c)
-                } else if a < d {
-                    f(d) - f(a)
-                } else {
-                    0.0
-                }
-            }
-        }
-
         // Observe the factor 1/6 at the front from the antiderivatives below.
         // The factor 4 is from normalisation of the original function.
         (4.0/6.0) * i(a, b, -1.0, -0.5,
-                // (2/3) (y+1)^3  on  -1 < y ≤ - 1/2
+                // (2/3) (y+1)^3  on  -1 < y ≤ -1/2
                 // The antiderivative is  (2/12)(y+1)^4 = (1/6)(y+1)^4
                 |y| pow4(y+1.0),
                 || i(a, b, -0.5, 0.0,
@@ -266,6 +382,35 @@
                 )
         )
     }
+
+    /// Calculates the derivative of the 1D hat convolution further convolved by a interval
+    /// indicator. The implementation is similar to [`Self::value_1d_σ1`], using the fact that
+    /// $(θ * ψ)' = θ * ψ'$.
+    #[inline]
+    pub fn diff_1d_σ1(&self, x : F, β : F) -> F {
+        // The integration interval
+        let a = x - β;
+        let b = x + β;
+
+        // The factor 4 is from normalisation of the original function.
+        4.0 * i(a, b, -1.0, -0.5,
+                // (2/3) (y+1)^3  on  -1 < y ≤ -1/2
+                |y| (2.0/3.0) * (y + 1.0).powi(3),
+                || i(a, b, -0.5, 0.0,
+                    // -2 y^3 - 2 y^2 + 1/3  on  -1/2 < y ≤ 0
+                    |y| -2.0*(y - 1.0) * y * y + (1.0/3.0),
+                    || i(a, b, 0.0, 0.5,
+                            // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
+                            |y| 2.0*(y - 1.0) * y * y + (1.0/3.0),
+                            || i(a, b, 0.5, 1.0,
+                                // -(2/3) (y-1)^3  on  1/2 < y ≤ 1
+                                |y| -(2.0/3.0) * (y - 1.0).powi(3),
+                                || 0.0
+                            )
+                    )
+                )
+        )
+    }
 }
 
 impl<F : Float, R, C, const N : usize>

mercurial