diff -r 000000000000 -r eb3c7813b67a src/kernels/hat_convolution.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/kernels/hat_convolution.rs Thu Dec 01 23:07:35 2022 +0200 @@ -0,0 +1,450 @@ +//! Implementation of the convolution of two hat functions, +//! and its convolution with a [`CubeIndicator`]. +use numeric_literals::replace_float_literals; +use serde::Serialize; +use alg_tools::types::*; +use alg_tools::norms::*; +use alg_tools::loc::Loc; +use alg_tools::sets::Cube; +use alg_tools::bisection_tree::{ + Support, + Constant, + Bounds, + LocalAnalysis, + GlobalAnalysis, + Bounded, +}; +use alg_tools::mapping::Apply; +use alg_tools::maputil::array_init; + +use super::base::*; +use super::ball_indicator::CubeIndicator; + +/// Hat convolution kernel. +/// +/// This struct represents the function +/// $$ +/// f(x\_1, …, x\_n) = \prod\_{i=1}^n \frac{4}{σ} (h\*h)(x\_i/σ) +/// $$ +/// where the “hat function” $h(y)= \max(0, 1 - |2y|)$. +/// The factor $4/σ$ normalises $∫ f d x = 1$. +/// We have +/// $$ +/// (h*h)(y) = +/// \begin{cases} +/// \frac{2}{3} (y+1)^3 & -1 { + /// The parameter $σ$ of the kernel. + pub radius : S, +} + +impl PartialEq> for HatConv +where S1 : Constant, + S2 : Constant { + fn eq(&self, other : &HatConv) -> bool { + self.radius.value() == other.radius.value() + } +} + +impl<'a, S, const N : usize> HatConv where S : Constant { + /// Returns the $σ$ parameter of the kernel. + #[inline] + pub fn radius(&self) -> S::Type { + self.radius.value() + } +} + +impl<'a, S, const N : usize> Apply<&'a Loc> for HatConv +where S : Constant { + type Output = S::Type; + #[inline] + fn apply(&self, y : &'a Loc) -> Self::Output { + let σ = self.radius(); + y.product_map(|x| { + self.value_1d_σ1(x / σ) / σ + }) + } +} + +impl<'a, S, const N : usize> Apply> for HatConv +where S : Constant { + type Output = S::Type; + #[inline] + fn apply(&self, y : Loc) -> Self::Output { + self.apply(&y) + } +} + + +#[replace_float_literals(S::Type::cast_from(literal))] +impl<'a, F : Float, S, const N : usize> HatConv +where S : Constant { + /// Computes the value of the kernel for $n=1$ with $σ=1$. + #[inline] + fn value_1d_σ1(&self, x : F) -> F { + let y = x.abs(); + if y >= 1.0 { + 0.0 + } else if y > 0.5 { + - (8.0/3.0) * (y - 1.0).powi(3) + } else /* 0 ≤ y ≤ 0.5 */ { + (4.0/3.0) + 8.0 * y * y * (y - 1.0) + } + } +} + +impl<'a, S, const N : usize> Support for HatConv +where S : Constant { + #[inline] + fn support_hint(&self) -> Cube { + let σ = self.radius(); + array_init(|| [-σ, σ]).into() + } + + #[inline] + fn in_support(&self, y : &Loc) -> bool { + let σ = self.radius(); + y.iter().all(|x| x.abs() <= σ) + } + + #[inline] + fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + let σ = self.radius(); + cube.map(|c, d| symmetric_peak_hint(σ, c, d)) + } +} + +#[replace_float_literals(S::Type::cast_from(literal))] +impl GlobalAnalysis> for HatConv +where S : Constant { + #[inline] + fn global_analysis(&self) -> Bounds { + Bounds(0.0, self.apply(Loc::ORIGIN)) + } +} + +impl LocalAnalysis, N> for HatConv +where S : Constant { + #[inline] + fn local_analysis(&self, cube : &Cube) -> Bounds { + // The function is maximised/minimised where the 2-norm is minimised/maximised. + let lower = self.apply(cube.maxnorm_point()); + let upper = self.apply(cube.minnorm_point()); + Bounds(lower, upper) + } +} + +#[replace_float_literals(C::Type::cast_from(literal))] +impl<'a, C : Constant, const N : usize> Norm +for HatConv { + #[inline] + fn norm(&self, _ : L1) -> C::Type { + 1.0 + } +} + +#[replace_float_literals(C::Type::cast_from(literal))] +impl<'a, C : Constant, const N : usize> Norm +for HatConv { + #[inline] + fn norm(&self, _ : Linfinity) -> C::Type { + self.bounds().upper() + } +} + +#[replace_float_literals(F::cast_from(literal))] +impl<'a, F : Float, R, C, const N : usize> Apply<&'a Loc> +for Convolution, HatConv> +where R : Constant, + C : Constant { + + type Output = F; + + #[inline] + fn apply(&self, y : &'a Loc) -> F { + let Convolution(ref ind, ref hatconv) = self; + let β = ind.r.value(); + let σ = hatconv.radius(); + + // This is just a product of one-dimensional versions + y.product_map(|x| { + // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution + // we have + // $$ + // [χ_{-β,β} * u_σ](x) + // = ∫_{x-β}^{x+β} u_σ(z) d z + // = (1/σ)∫_{x-β}^{x+β} u_1(z/σ) d z + // = ∫_{(x-β)/σ}^{(x+β)/σ} u_1(z) d z + // = [χ_{-β/σ, β/σ} * u_1](x/σ) + // $$ + self.value_1d_σ1(x / σ, β / σ) + }) + } +} + +impl<'a, F : Float, R, C, const N : usize> Apply> +for Convolution, HatConv> +where R : Constant, + C : Constant { + + type Output = F; + + #[inline] + fn apply(&self, y : Loc) -> F { + self.apply(&y) + } +} + + +#[replace_float_literals(F::cast_from(literal))] +impl Convolution, HatConv> +where R : Constant, + C : Constant { + #[inline] + pub fn value_1d_σ1(&self, x : F, β : F) -> F { + // The integration interval + let a = x - β; + let b = x + β; + + #[inline] + fn pow4(x : F) -> F { + let y = x * x; + y * y + } + + /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$. + /// If $b > d$, add $g()$ to the result. + #[inline] + fn i(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F, + g : impl Fn() -> F) -> F { + if b < c { + 0.0 + } else if b <= d { + if a <= c { + f(b) - f(c) + } else { + f(b) - f(a) + } + } else /* b > d */ { + g() + if a <= c { + f(d) - f(c) + } else if a < d { + f(d) - f(a) + } else { + 0.0 + } + } + } + + // Observe the factor 1/6 at the front from the antiderivatives below. + // The factor 4 is from normalisation of the original function. + (4.0/6.0) * i(a, b, -1.0, -0.5, + // (2/3) (y+1)^3 on -1 < y ≤ - 1/2 + // The antiderivative is (2/12)(y+1)^4 = (1/6)(y+1)^4 + |y| pow4(y+1.0), + || i(a, b, -0.5, 0.0, + // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0 + // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y + |y| y*(-y*y*(y*3.0 + 4.0) + 2.0), + || i(a, b, 0.0, 0.5, + // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2 + // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y + |y| y*(y*y*(y*3.0 - 4.0) + 2.0), + || i(a, b, 0.5, 1.0, + // -(2/3) (y-1)^3 on 1/2 < y ≤ 1 + // The antiderivative is -(2/12)(y-1)^4 = -(1/6)(y-1)^4 + |y| -pow4(y-1.0), + || 0.0 + ) + ) + ) + ) + } +} + +impl +Convolution, HatConv> +where R : Constant, + C : Constant { + + #[inline] + fn get_r(&self) -> F { + let Convolution(ref ind, ref hatconv) = self; + ind.r.value() + hatconv.radius() + } +} + +impl Support +for Convolution, HatConv> +where R : Constant, + C : Constant { + + #[inline] + fn support_hint(&self) -> Cube { + let r = self.get_r(); + array_init(|| [-r, r]).into() + } + + #[inline] + fn in_support(&self, y : &Loc) -> bool { + let r = self.get_r(); + y.iter().all(|x| x.abs() <= r) + } + + #[inline] + fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + // It is not difficult to verify that [`HatConv`] is C^2. + // Therefore, so is [`Convolution, HatConv>`] so that a finer + // subdivision for the hint than this is not particularly useful. + let r = self.get_r(); + cube.map(|c, d| symmetric_peak_hint(r, c, d)) + } +} + +impl GlobalAnalysis> +for Convolution, HatConv> +where R : Constant, + C : Constant { + #[inline] + fn global_analysis(&self) -> Bounds { + Bounds(F::ZERO, self.apply(Loc::ORIGIN)) + } +} + +impl LocalAnalysis, N> +for Convolution, HatConv> +where R : Constant, + C : Constant { + #[inline] + fn local_analysis(&self, cube : &Cube) -> Bounds { + // The function is maximised/minimised where the absolute value is minimised/maximised. + let lower = self.apply(cube.maxnorm_point()); + let upper = self.apply(cube.minnorm_point()); + //assert!(upper >= lower); + if upper < lower { + let Convolution(ref ind, ref hatconv) = self; + let β = ind.r.value(); + let σ = hatconv.radius(); + eprintln!("WARNING: Hat convolution {β} {σ} upper bound {upper} < lower bound {lower} on {cube:?} with min-norm point {:?} and max-norm point {:?}", cube.minnorm_point(), cube.maxnorm_point()); + Bounds(upper, lower) + } else { + Bounds(lower, upper) + } + } +} + + +/// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and +/// $ψ = χ_{[-a,a]^N}$ for some $a>0$. +/// +/// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$, +/// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat +/// convolution. +#[replace_float_literals(F::cast_from(literal))] +impl +BoundedBy>, HatConv>> +for AutoConvolution> +where F : Float, + C : Constant, + S : Constant { + + fn bounding_factor( + &self, + kernel : &SupportProductFirst>, HatConv> + ) -> Option { + // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with + // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and + // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed + // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$. + let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel; + let σ = self.0.radius(); + let a = ind.r.value(); + let bounding_1d = 4.0 / (3.0 * σ); + + // Check that the cutting indicator of the comparison + // `SupportProductFirst>, HatConv>` + // is wide enough, and that the hat convolution has the same radius as ours. + if σ <= a && hatconv2 == &self.0 { + Some(bounding_1d.powi(N as i32)) + } else { + // We cannot compare + None + } + } +} + +/// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution. +/// +/// This is based on Example 3.3 in the manuscript. +#[replace_float_literals(F::cast_from(literal))] +impl +BoundedBy> +for AutoConvolution> +where F : Float, + C : Constant { + + /// Returns an estimate of the factor $L_1$. + /// + /// Returns `None` if `kernel` does not have the same width as hat convolution that `self` + /// is based on. + fn bounding_factor( + &self, + kernel : &HatConv + ) -> Option { + if kernel == &self.0 { + Some(1.0) + } else { + // We cannot compare + None + } + } +} + +#[cfg(test)] +mod tests { + use alg_tools::lingrid::linspace; + use alg_tools::mapping::Apply; + use alg_tools::norms::Linfinity; + use alg_tools::loc::Loc; + use crate::kernels::{BallIndicator, CubeIndicator, Convolution}; + use super::HatConv; + + /// Tests numerically that [`HatConv`] is monotone. + #[test] + fn hatconv_monotonicity() { + let grid = linspace(0.0, 1.0, 100000); + let hatconv : HatConv = HatConv{ radius : 1.0 }; + let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); + let first = vals.next().unwrap(); + let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; + assert!(monotone); + } + + /// Tests numerically that [`Convolution, HatConv>`] is monotone. + #[test] + fn convolution_cubeind_hatconv_monotonicity() { + let grid = linspace(-2.0, 0.0, 100000); + let hatconv : Convolution, HatConv> + = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, + HatConv{ radius : 1.0 } ); + let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); + let first = vals.next().unwrap(); + let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev <= t)).1; + assert!(monotone); + + let grid = linspace(0.0, 2.0, 100000); + let hatconv : Convolution, HatConv> + = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, + HatConv{ radius : 1.0 } ); + let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); + let first = vals.next().unwrap(); + let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; + assert!(monotone); + } +}