diff -r efa60bc4f743 -r b087e3eab191 src/kernels/hat_convolution.rs --- a/src/kernels/hat_convolution.rs Thu Aug 29 00:00:00 2024 -0500 +++ b/src/kernels/hat_convolution.rs Tue Dec 31 09:25:45 2024 -0500 @@ -14,7 +14,12 @@ GlobalAnalysis, Bounded, }; -use alg_tools::mapping::{Apply, Differentiable}; +use alg_tools::mapping::{ + Mapping, + Instance, + DifferentiableImpl, + Differential, +}; use alg_tools::maputil::array_init; use crate::types::Lipschitz; @@ -39,6 +44,31 @@ /// -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\ /// \end{cases} /// $$ +// Hence +// $$ +// (h\*h)'(y) = +// \begin{cases} +// 2 (y+1)^2 & -1 { /// The parameter $σ$ of the kernel. @@ -61,27 +91,19 @@ } } -impl<'a, S, const N : usize> Apply<&'a Loc> for HatConv +impl<'a, S, const N : usize> Mapping> for HatConv where S : Constant { - type Output = S::Type; + type Codomain = S::Type; + #[inline] - fn apply(&self, y : &'a Loc) -> Self::Output { + fn apply>>(&self, y : I) -> Self::Codomain { let σ = self.radius(); - y.product_map(|x| { + y.cow().product_map(|x| { self.value_1d_σ1(x / σ) / σ }) } } -impl<'a, S, const N : usize> Apply> for HatConv -where S : Constant { - type Output = S::Type; - #[inline] - fn apply(&self, y : Loc) -> Self::Output { - self.apply(&y) - } -} - #[replace_float_literals(S::Type::cast_from(literal))] impl Lipschitz for HatConv where S : Constant { @@ -95,7 +117,7 @@ // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i) // Thus // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)| - // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_i |ψ_i| + // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_j |ψ_j| let σ = self.radius(); let l1d = self.lipschitz_1d_σ1() / (σ*σ); let m1d = self.value_1d_σ1(0.0) / σ; @@ -113,31 +135,45 @@ } -impl<'a, S, const N : usize> Differentiable<&'a Loc> for HatConv +impl<'a, S, const N : usize> DifferentiableImpl> for HatConv where S : Constant { - type Output = Loc; + type Derivative = Loc; + #[inline] - fn differential(&self, y : &'a Loc) -> Self::Output { + fn differential_impl>>(&self, y0 : I) -> Self::Derivative { + let y = y0.cow(); let σ = self.radius(); let σ2 = σ * σ; let vs = y.map(|x| { self.value_1d_σ1(x / σ) / σ }); - product_differential(y, &vs, |x| { + product_differential(&*y, &vs, |x| { self.diff_1d_σ1(x / σ) / σ2 }) } } -impl<'a, S, const N : usize> Differentiable> for HatConv -where S : Constant { - type Output = Loc; + +#[replace_float_literals(S::Type::cast_from(literal))] +impl<'a, F : Float, S, const N : usize> Lipschitz +for Differential<'a, Loc, HatConv> +where S : Constant { + type FloatType = F; + #[inline] - fn differential(&self, y : Loc) -> Self::Output { - self.differential(&y) + fn lipschitz_factor(&self, _l2 : L2) -> Option { + let h = self.base_fn(); + let σ = h.radius(); + Some(product_differential_lipschitz_factor::( + h.value_1d_σ1(0.0) / σ, + h.lipschitz_1d_σ1() / (σ*σ), + h.maxabsdiff_1d_σ1() / (σ*σ), + h.lipschitz_diff_1d_σ1() / (σ*σ), + )) } } + #[replace_float_literals(S::Type::cast_from(literal))] impl<'a, F : Float, S, const N : usize> HatConv where S : Constant { @@ -173,6 +209,34 @@ // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis 2.0 } + + /// Computes the maximum absolute differential of the kernel for $n=1$ with $σ=1$. + #[inline] + fn maxabsdiff_1d_σ1(&self) -> F { + // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis + 2.0 + } + + /// Computes the second differential of the kernel for $n=1$ with $σ=1$. + #[inline] + #[allow(dead_code)] + fn diff2_1d_σ1(&self, x : F) -> F { + let y = x.abs(); + if y >= 1.0 { + 0.0 + } else if y > 0.5 { + - 16.0 * (y - 1.0) + } else /* 0 ≤ y ≤ 0.5 */ { + 48.0 * y - 16.0 + } + } + + /// Computes the differential of the kernel for $n=1$ with $σ=1$. + #[inline] + fn lipschitz_diff_1d_σ1(&self) -> F { + // Maximal absolute second differential achieved at 0 by diff2_1d_σ1 analysis + 16.0 + } } impl<'a, S, const N : usize> Support for HatConv @@ -235,21 +299,21 @@ } #[replace_float_literals(F::cast_from(literal))] -impl<'a, F : Float, R, C, const N : usize> Apply<&'a Loc> +impl<'a, F : Float, R, C, const N : usize> Mapping> for Convolution, HatConv> where R : Constant, C : Constant { - type Output = F; + type Codomain = F; #[inline] - fn apply(&self, y : &'a Loc) -> F { + fn apply>>(&self, y : I) -> F { let Convolution(ref ind, ref hatconv) = self; let β = ind.r.value(); let σ = hatconv.radius(); // This is just a product of one-dimensional versions - y.product_map(|x| { + y.cow().product_map(|x| { // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution // we have // $$ @@ -264,29 +328,17 @@ } } -impl<'a, F : Float, R, C, const N : usize> Apply> +#[replace_float_literals(F::cast_from(literal))] +impl<'a, F : Float, R, C, const N : usize> DifferentiableImpl> for Convolution, HatConv> where R : Constant, C : Constant { - type Output = F; + type Derivative = Loc; #[inline] - fn apply(&self, y : Loc) -> F { - self.apply(&y) - } -} - -#[replace_float_literals(F::cast_from(literal))] -impl<'a, F : Float, R, C, const N : usize> Differentiable<&'a Loc> -for Convolution, HatConv> -where R : Constant, - C : Constant { - - type Output = Loc; - - #[inline] - fn differential(&self, y : &'a Loc) -> Loc { + fn differential_impl>>(&self, y0 : I) -> Loc { + let y = y0.cow(); let Convolution(ref ind, ref hatconv) = self; let β = ind.r.value(); let σ = hatconv.radius(); @@ -295,24 +347,12 @@ let vs = y.map(|x| { self.value_1d_σ1(x / σ, β / σ) }); - product_differential(y, &vs, |x| { + product_differential(&*y, &vs, |x| { self.diff_1d_σ1(x / σ, β / σ) / σ2 }) } } -impl<'a, F : Float, R, C, const N : usize> Differentiable> -for Convolution, HatConv> -where R : Constant, - C : Constant { - - type Output = Loc; - - #[inline] - fn differential(&self, y : Loc) -> Loc { - self.differential(&y) - } -} /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$. /// If $b > d$, add $g()$ to the result. @@ -415,6 +455,22 @@ } } +/* +impl<'a, F : Float, R, C, const N : usize> Lipschitz +for Differential, Convolution, HatConv>> +where R : Constant, + C : Constant { + + type FloatType = F; + + #[inline] + fn lipschitz_factor(&self, _l2 : L2) -> Option { + dbg!("unimplemented"); + None + } +} +*/ + impl Convolution, HatConv> where R : Constant, @@ -556,7 +612,7 @@ #[cfg(test)] mod tests { use alg_tools::lingrid::linspace; - use alg_tools::mapping::Apply; + use alg_tools::mapping::Mapping; use alg_tools::norms::Linfinity; use alg_tools::loc::Loc; use crate::kernels::{BallIndicator, CubeIndicator, Convolution};