src/kernels/hat_convolution.rs

Wed, 07 Dec 2022 06:59:19 +0200

author
Tuomo Valkonen <tuomov@iki.fi>
date
Wed, 07 Dec 2022 06:59:19 +0200
changeset 15
0194715c1ad6
parent 0
eb3c7813b67a
permissions
-rw-r--r--

Added tag v1.0.0 for changeset 23821a01d013

//! Implementation of the convolution of two hat functions,
//! and its convolution with a [`CubeIndicator`].
use numeric_literals::replace_float_literals;
use serde::Serialize;
use alg_tools::types::*;
use alg_tools::norms::*;
use alg_tools::loc::Loc;
use alg_tools::sets::Cube;
use alg_tools::bisection_tree::{
    Support,
    Constant,
    Bounds,
    LocalAnalysis,
    GlobalAnalysis,
    Bounded,
};
use alg_tools::mapping::Apply;
use alg_tools::maputil::array_init;

use super::base::*;
use super::ball_indicator::CubeIndicator;

/// Hat convolution kernel.
///
/// This struct represents the function
/// $$
///     f(x\_1, …, x\_n) = \prod\_{i=1}^n \frac{4}{σ} (h\*h)(x\_i/σ)
/// $$
/// where the “hat function” $h(y)= \max(0, 1 - |2y|)$.
/// The factor $4/σ$ normalises $∫ f d x = 1$.
/// We have
/// $$
///     (h*h)(y) =
///     \begin{cases}
///         \frac{2}{3} (y+1)^3 & -1<y\leq -\frac{1}{2}, \\\\
///         -2 y^3-2 y^2+\frac{1}{3} & -\frac{1}{2}<y\leq 0, \\\\
///         2 y^3-2 y^2+\frac{1}{3} & 0<y<\frac{1}{2}, \\\\
///         -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\
///     \end{cases}
/// $$
#[derive(Copy,Clone,Debug,Serialize,Eq)]
pub struct HatConv<S : Constant, const N : usize> {
    /// The parameter $σ$ of the kernel.
    pub radius : S,
}

impl<S1, S2, const N : usize> PartialEq<HatConv<S2, N>> for HatConv<S1, N>
where S1 : Constant,
      S2 : Constant<Type=S1::Type> {
    fn eq(&self, other : &HatConv<S2, N>) -> bool {
        self.radius.value() == other.radius.value()
    }
}

impl<'a, S, const N : usize> HatConv<S, N> where S : Constant {
    /// Returns the $σ$ parameter of the kernel.
    #[inline]
    pub fn radius(&self) -> S::Type {
        self.radius.value()
    }
}

impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for HatConv<S, N>
where S : Constant {
    type Output = S::Type;
    #[inline]
    fn apply(&self, y : &'a Loc<S::Type, N>) -> Self::Output {
        let σ = self.radius();
        y.product_map(|x| {
            self.value_1d_σ1(x  / σ) / σ
        })
    }
}

impl<'a, S, const N : usize> Apply<Loc<S::Type, N>> for HatConv<S, N>
where S : Constant {
    type Output = S::Type;
    #[inline]
    fn apply(&self, y : Loc<S::Type, N>) -> Self::Output {
        self.apply(&y)
    }
}


#[replace_float_literals(S::Type::cast_from(literal))]
impl<'a, F : Float, S, const N : usize> HatConv<S, N>
where S : Constant<Type=F> {
    /// Computes the value of the kernel for $n=1$ with $σ=1$.
    #[inline]
    fn value_1d_σ1(&self, x : F) -> F {
        let y = x.abs();
        if y >= 1.0 {
            0.0
        } else if y > 0.5 {
            - (8.0/3.0) * (y - 1.0).powi(3)
        } else /* 0 ≤ y ≤ 0.5 */ {
            (4.0/3.0) + 8.0 * y * y * (y - 1.0)
        }
    }
}

impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N>
where S : Constant {
    #[inline]
    fn support_hint(&self) -> Cube<S::Type,N> {
        let σ = self.radius();
        array_init(|| [-σ, σ]).into()
    }

    #[inline]
    fn in_support(&self, y : &Loc<S::Type,N>) -> bool {
        let σ = self.radius();
        y.iter().all(|x| x.abs() <= σ)
    }

    #[inline]
    fn bisection_hint(&self, cube : &Cube<S::Type, N>) -> [Option<S::Type>; N] {
        let σ = self.radius();
        cube.map(|c, d| symmetric_peak_hint(σ, c, d))
    }
}

#[replace_float_literals(S::Type::cast_from(literal))]
impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>>  for HatConv<S, N>
where S : Constant {
    #[inline]
    fn global_analysis(&self) -> Bounds<S::Type> {
        Bounds(0.0, self.apply(Loc::ORIGIN))
    }
}

impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N>  for HatConv<S, N>
where S : Constant {
    #[inline]
    fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> {
        // The function is maximised/minimised where the 2-norm is minimised/maximised.
        let lower = self.apply(cube.maxnorm_point());
        let upper = self.apply(cube.minnorm_point());
        Bounds(lower, upper)
    }
}

#[replace_float_literals(C::Type::cast_from(literal))]
impl<'a, C : Constant, const N : usize> Norm<C::Type, L1>
for HatConv<C, N> {
    #[inline]
    fn norm(&self, _ : L1) -> C::Type {
        1.0
    }
}

#[replace_float_literals(C::Type::cast_from(literal))]
impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity>
for HatConv<C, N> {
    #[inline]
    fn norm(&self, _ : Linfinity) -> C::Type {
        self.bounds().upper()
    }
}

#[replace_float_literals(F::cast_from(literal))]
impl<'a, F : Float, R, C, const N : usize> Apply<&'a Loc<F, N>>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {

    type Output = F;

    #[inline]
    fn apply(&self, y : &'a Loc<F, N>) -> F {
        let Convolution(ref ind, ref hatconv) = self;
        let β = ind.r.value();
        let σ = hatconv.radius();

        // This is just a product of one-dimensional versions
        y.product_map(|x| {
            // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution
            // we have
            // $$
            //      [χ_{-β,β} * u_σ](x)
            //      = ∫_{x-β}^{x+β} u_σ(z) d z
            //      = (1/σ)∫_{x-β}^{x+β} u_1(z/σ) d z
            //      = ∫_{(x-β)/σ}^{(x+β)/σ} u_1(z) d z
            //      = [χ_{-β/σ, β/σ} * u_1](x/σ)
            // $$
            self.value_1d_σ1(x / σ, β / σ)
        })
    }
}

impl<'a, F : Float, R, C, const N : usize> Apply<Loc<F, N>>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {

    type Output = F;

    #[inline]
    fn apply(&self, y : Loc<F, N>) -> F {
        self.apply(&y)
    }
}


#[replace_float_literals(F::cast_from(literal))]
impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
    #[inline]
    pub fn value_1d_σ1(&self, x : F, β : F) -> F {
        // The integration interval
        let a = x - β;
        let b = x + β;

        #[inline]
        fn pow4<F : Float>(x : F) -> F {
            let y = x * x;
            y * y
        }
        
        /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
        /// If $b > d$, add $g()$ to the result.
        #[inline]
        fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F,
                       g : impl Fn() -> F) -> F {
            if b < c {
                0.0
            } else if b <= d {
                if a <= c {
                    f(b) - f(c)
                } else {
                    f(b) - f(a)
                }
            } else /* b > d */ {
                g() + if a <= c {
                    f(d) - f(c)
                } else if a < d {
                    f(d) - f(a)
                } else {
                    0.0
                }
            }
        }

        // Observe the factor 1/6 at the front from the antiderivatives below.
        // The factor 4 is from normalisation of the original function.
        (4.0/6.0) * i(a, b, -1.0, -0.5,
                // (2/3) (y+1)^3  on  -1 < y ≤ - 1/2
                // The antiderivative is  (2/12)(y+1)^4 = (1/6)(y+1)^4
                |y| pow4(y+1.0),
                || i(a, b, -0.5, 0.0,
                    // -2 y^3 - 2 y^2 + 1/3  on  -1/2 < y ≤ 0
                    // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y
                    |y| y*(-y*y*(y*3.0 + 4.0) + 2.0),
                    || i(a, b, 0.0, 0.5,
                            // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
                            // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y
                            |y| y*(y*y*(y*3.0 - 4.0) + 2.0),
                            || i(a, b, 0.5, 1.0,
                                // -(2/3) (y-1)^3  on  1/2 < y ≤ 1
                                // The antiderivative is  -(2/12)(y-1)^4 = -(1/6)(y-1)^4
                                |y| -pow4(y-1.0),
                                || 0.0
                            )
                    )
                )
        )
    }
}

impl<F : Float, R, C, const N : usize>
Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {

    #[inline]
    fn get_r(&self) -> F {
        let Convolution(ref ind, ref hatconv) = self;
        ind.r.value() + hatconv.radius()
    }
}

impl<F : Float, R, C, const N : usize> Support<F, N>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
    
    #[inline]
    fn support_hint(&self) -> Cube<F, N> {
        let r = self.get_r();
        array_init(|| [-r, r]).into()
    }

    #[inline]
    fn in_support(&self, y : &Loc<F, N>) -> bool {
        let r = self.get_r();
        y.iter().all(|x| x.abs() <= r)
    }

    #[inline]
    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
        // It is not difficult to verify that [`HatConv`] is C^2.
        // Therefore, so is [`Convolution<CubeIndicator<R, N>, HatConv<C, N>>`] so that a finer
        // subdivision for the hint than this is not particularly useful.
        let r = self.get_r();
        cube.map(|c, d| symmetric_peak_hint(r, c, d))
    }
}

impl<F : Float, R, C, const N : usize> GlobalAnalysis<F, Bounds<F>>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
    #[inline]
    fn global_analysis(&self) -> Bounds<F> {
        Bounds(F::ZERO, self.apply(Loc::ORIGIN))
    }
}

impl<F : Float, R, C, const N : usize> LocalAnalysis<F, Bounds<F>, N>
for Convolution<CubeIndicator<R, N>, HatConv<C,  N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
    #[inline]
    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
        // The function is maximised/minimised where the absolute value is minimised/maximised.
        let lower = self.apply(cube.maxnorm_point());
        let upper = self.apply(cube.minnorm_point());
        //assert!(upper >= lower);
        if upper < lower {
            let Convolution(ref ind, ref hatconv) = self;
            let β = ind.r.value();
            let σ = hatconv.radius();
            eprintln!("WARNING: Hat convolution {β} {σ} upper bound {upper} < lower bound {lower} on {cube:?} with min-norm point {:?} and max-norm point {:?}", cube.minnorm_point(), cube.maxnorm_point());
            Bounds(upper, lower)
        } else {
            Bounds(lower, upper)
        }
    }
}


/// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and
/// $ψ = χ_{[-a,a]^N}$ for some $a>0$.
///
/// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$,
/// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat
/// convolution.
#[replace_float_literals(F::cast_from(literal))]
impl<F, C, S, const N : usize>
BoundedBy<F, SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>>
for AutoConvolution<HatConv<C, N>>
where F : Float,
      C : Constant<Type=F>,
      S : Constant<Type=F> {

    fn bounding_factor(
        &self,
        kernel : &SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>
    ) -> Option<F> {
        // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with
        // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and
        // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed
        // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$.
        let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel;
        let σ = self.0.radius();
        let a = ind.r.value();
        let bounding_1d = 4.0 / (3.0 * σ);

        // Check that the cutting indicator of the comparison
        // `SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>`
        // is wide enough, and that the hat convolution has the same radius as ours.
        if σ <= a && hatconv2 == &self.0 {
            Some(bounding_1d.powi(N as i32))
        } else {
            // We cannot compare
            None
        }
    }
}

/// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution.
///
/// This is based on Example 3.3 in the manuscript.
#[replace_float_literals(F::cast_from(literal))]
impl<F, C, const N : usize>
BoundedBy<F, HatConv<C, N>>
for AutoConvolution<HatConv<C, N>>
where F : Float,
      C : Constant<Type=F> {

    /// Returns an estimate of the factor $L_1$.
    ///
    /// Returns  `None` if `kernel` does not have the same width as hat convolution that `self`
    /// is based on.
    fn bounding_factor(
        &self,
        kernel : &HatConv<C, N>
    ) -> Option<F> {
        if kernel == &self.0 {
            Some(1.0)
        } else {
            // We cannot compare
            None
        }
    }
}

#[cfg(test)]
mod tests {
    use alg_tools::lingrid::linspace;
    use alg_tools::mapping::Apply;
    use alg_tools::norms::Linfinity;
    use alg_tools::loc::Loc;
    use crate::kernels::{BallIndicator, CubeIndicator, Convolution};
    use super::HatConv;

    /// Tests numerically that [`HatConv<f64, 1>`] is monotone.
    #[test]
    fn hatconv_monotonicity() {
        let grid = linspace(0.0, 1.0, 100000);
        let hatconv : HatConv<f64, 1> = HatConv{ radius : 1.0 };
        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
        let first = vals.next().unwrap();
        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1;
        assert!(monotone);
    }

    /// Tests numerically that [`Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>`] is monotone.
    #[test]
    fn convolution_cubeind_hatconv_monotonicity() {
        let grid = linspace(-2.0, 0.0, 100000);
        let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>
            = Convolution(BallIndicator { r : 0.5, exponent : Linfinity },
                          HatConv{ radius : 1.0 } );
        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
        let first = vals.next().unwrap();
        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev <= t)).1;
        assert!(monotone);

        let grid = linspace(0.0, 2.0, 100000);
        let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>
            = Convolution(BallIndicator { r : 0.5, exponent : Linfinity },
                          HatConv{ radius : 1.0 } );
        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
        let first = vals.next().unwrap();
        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1;
        assert!(monotone);
    }
}

mercurial