src/kernels/hat_convolution.rs

Tue, 31 Dec 2024 09:25:45 -0500

author
Tuomo Valkonen <tuomov@iki.fi>
date
Tue, 31 Dec 2024 09:25:45 -0500
branch
dev
changeset 35
b087e3eab191
parent 34
efa60bc4f743
child 38
0f59c0d02e13
permissions
-rw-r--r--

New version of sliding.

//! Implementation of the convolution of two hat functions,
//! and its convolution with a [`CubeIndicator`].
use numeric_literals::replace_float_literals;
use serde::Serialize;
use alg_tools::types::*;
use alg_tools::norms::*;
use alg_tools::loc::Loc;
use alg_tools::sets::Cube;
use alg_tools::bisection_tree::{
    Support,
    Constant,
    Bounds,
    LocalAnalysis,
    GlobalAnalysis,
    Bounded,
};
use alg_tools::mapping::{
    Mapping,
    Instance,
    DifferentiableImpl,
    Differential,
};
use alg_tools::maputil::array_init;

use crate::types::Lipschitz;
use super::base::*;
use super::ball_indicator::CubeIndicator;

/// Hat convolution kernel.
///
/// This struct represents the function
/// $$
///     f(x\_1, …, x\_n) = \prod\_{i=1}^n \frac{4}{σ} (h\*h)(x\_i/σ)
/// $$
/// where the “hat function” $h(y)= \max(0, 1 - |2y|)$.
/// The factor $4/σ$ normalises $∫ f d x = 1$.
/// We have
/// $$
///     (h*h)(y) =
///     \begin{cases}
///         \frac{2}{3} (y+1)^3 & -1<y\leq -\frac{1}{2}, \\\\
///         -2 y^3-2 y^2+\frac{1}{3} & -\frac{1}{2}<y\leq 0, \\\\
///         2 y^3-2 y^2+\frac{1}{3} & 0<y<\frac{1}{2}, \\\\
///         -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\
///     \end{cases}
/// $$
// Hence
// $$
//     (h\*h)'(y) =
//     \begin{cases}
//         2 (y+1)^2 & -1<y\leq -\frac{1}{2}, \\\\
//         -6 y^2-4 y & -\frac{1}{2}<y\leq 0, \\\\
//         6 y^2-4 y & 0<y<\frac{1}{2}, \\\\
//         -2 (y-1)^2 & \frac{1}{2}\leq y<1. \\\\
//     \end{cases}
// $$
// as well as
// $$
//     (h\*h)''(y) =
//     \begin{cases}
//         4 (y+1) & -1<y\leq -\frac{1}{2}, \\\\
//         -12 y-4 & -\frac{1}{2}<y\leq 0, \\\\
//         12 y-4 & 0<y<\frac{1}{2}, \\\\
//         -4 (y-1) & \frac{1}{2}\leq y<1. \\\\
//     \end{cases}
// $$
// This is maximised at y=±1/2 with value 2, and minimised at y=0 with value -4.
// Now observe that
// $$
//     [∇f(x\_1, …, x\_n)]_j = \frac{4}{σ} (h\*h)'(x\_j/σ) \prod\_{j ≠ i} \frac{4}{σ} (h\*h)(x\_i/σ)
// $$
#[derive(Copy,Clone,Debug,Serialize,Eq)]
pub struct HatConv<S : Constant, const N : usize> {
    /// The parameter $σ$ of the kernel.
    pub radius : S,
}

impl<S1, S2, const N : usize> PartialEq<HatConv<S2, N>> for HatConv<S1, N>
where S1 : Constant,
      S2 : Constant<Type=S1::Type> {
    fn eq(&self, other : &HatConv<S2, N>) -> bool {
        self.radius.value() == other.radius.value()
    }
}

impl<'a, S, const N : usize> HatConv<S, N> where S : Constant {
    /// Returns the $σ$ parameter of the kernel.
    #[inline]
    pub fn radius(&self) -> S::Type {
        self.radius.value()
    }
}

impl<'a, S, const N : usize> Mapping<Loc<S::Type, N>> for HatConv<S, N>
where S : Constant {
    type Codomain = S::Type;

    #[inline]
    fn apply<I : Instance<Loc<S::Type, N>>>(&self, y : I) -> Self::Codomain {
        let σ = self.radius();
        y.cow().product_map(|x| {
            self.value_1d_σ1(x  / σ) / σ
        })
    }
}

#[replace_float_literals(S::Type::cast_from(literal))]
impl<S, const N : usize> Lipschitz<L1> for HatConv<S, N>
where S : Constant {
    type FloatType = S::Type;
    #[inline]
    fn lipschitz_factor(&self, L1 : L1) -> Option<Self::FloatType> {
        // For any ψ_i, we have
        // ∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)
        // = [ψ_1(x_1)-ψ_1(y_1)] ∏_{i=2}^N ψ_i(x_i)
        //   + ψ_1(y_1)[ ∏_{i=2}^N ψ_i(x_i) - ∏_{i=2}^N ψ_i(y_i)]
        // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i)
        // Thus
        // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)|
        // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_j |ψ_j|
        let σ = self.radius();
        let l1d = self.lipschitz_1d_σ1() / (σ*σ);
        let m1d = self.value_1d_σ1(0.0) / σ;
        Some(l1d * m1d.powi(N as i32 - 1))
    }
}

impl<S, const N : usize> Lipschitz<L2> for HatConv<S, N>
where S : Constant {
    type FloatType = S::Type;
    #[inline]
    fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> {
        self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt())
    }
}


impl<'a, S, const N : usize> DifferentiableImpl<Loc<S::Type, N>> for HatConv<S, N>
where S : Constant {
    type Derivative = Loc<S::Type, N>;

    #[inline]
    fn differential_impl<I : Instance<Loc<S::Type, N>>>(&self, y0 : I) -> Self::Derivative {
        let y = y0.cow();
        let σ = self.radius();
        let σ2 = σ * σ;
        let vs = y.map(|x| {
            self.value_1d_σ1(x  / σ) / σ
        });
        product_differential(&*y, &vs, |x| {
            self.diff_1d_σ1(x  / σ) / σ2
        })
    }
}


#[replace_float_literals(S::Type::cast_from(literal))]
impl<'a, F : Float, S, const N : usize> Lipschitz<L2>
for Differential<'a, Loc<F, N>, HatConv<S, N>>
where S : Constant<Type=F> {
    type FloatType = F;

    #[inline]
    fn lipschitz_factor(&self, _l2 : L2) -> Option<F> {
        let h = self.base_fn();
        let σ = h.radius();
        Some(product_differential_lipschitz_factor::<F, N>(
            h.value_1d_σ1(0.0) / σ,
            h.lipschitz_1d_σ1() / (σ*σ),
            h.maxabsdiff_1d_σ1() / (σ*σ),
            h.lipschitz_diff_1d_σ1() / (σ*σ),
        ))
    }
}


#[replace_float_literals(S::Type::cast_from(literal))]
impl<'a, F : Float, S, const N : usize> HatConv<S, N>
where S : Constant<Type=F> {
    /// Computes the value of the kernel for $n=1$ with $σ=1$.
    #[inline]
    fn value_1d_σ1(&self, x : F) -> F {
        let y = x.abs();
        if y >= 1.0 {
            0.0
        } else if y > 0.5 {
            - (8.0/3.0) * (y - 1.0).powi(3)
        } else /* 0 ≤ y ≤ 0.5 */ {
            (4.0/3.0) + 8.0 * y * y * (y - 1.0)
        }
    }

    /// Computes the differential of the kernel for $n=1$ with $σ=1$.
    #[inline]
    fn diff_1d_σ1(&self, x : F) -> F {
        let y = x.abs();
        if y >= 1.0 {
            0.0
        } else if y > 0.5 {
            - 8.0 * (y - 1.0).powi(2)
        } else /* 0 ≤ y ≤ 0.5 */ {
            (24.0 * y - 16.0) * y
        }
    }

    /// Computes the Lipschitz factor of the kernel for $n=1$ with $σ=1$.
    #[inline]
    fn lipschitz_1d_σ1(&self) -> F {
        // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis
        2.0
    }

    /// Computes the maximum absolute differential of the kernel for $n=1$ with $σ=1$.
    #[inline]
    fn maxabsdiff_1d_σ1(&self) -> F {
        // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis
        2.0
    }

    /// Computes the second differential of the kernel for $n=1$ with $σ=1$.
    #[inline]
    #[allow(dead_code)]
    fn diff2_1d_σ1(&self, x : F) -> F {
        let y = x.abs();
        if y >= 1.0 {
            0.0
        } else if y > 0.5 {
            - 16.0 * (y - 1.0)
        } else /* 0 ≤ y ≤ 0.5 */ {
            48.0 * y - 16.0
        }
    }

    /// Computes the differential of the kernel for $n=1$ with $σ=1$.
    #[inline]
    fn lipschitz_diff_1d_σ1(&self) -> F {
        // Maximal absolute second differential achieved at 0 by diff2_1d_σ1 analysis
        16.0
    }
}

impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N>
where S : Constant {
    #[inline]
    fn support_hint(&self) -> Cube<S::Type,N> {
        let σ = self.radius();
        array_init(|| [-σ, σ]).into()
    }

    #[inline]
    fn in_support(&self, y : &Loc<S::Type,N>) -> bool {
        let σ = self.radius();
        y.iter().all(|x| x.abs() <= σ)
    }

    #[inline]
    fn bisection_hint(&self, cube : &Cube<S::Type, N>) -> [Option<S::Type>; N] {
        let σ = self.radius();
        cube.map(|c, d| symmetric_peak_hint(σ, c, d))
    }
}

#[replace_float_literals(S::Type::cast_from(literal))]
impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>>  for HatConv<S, N>
where S : Constant {
    #[inline]
    fn global_analysis(&self) -> Bounds<S::Type> {
        Bounds(0.0, self.apply(Loc::ORIGIN))
    }
}

impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N>  for HatConv<S, N>
where S : Constant {
    #[inline]
    fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> {
        // The function is maximised/minimised where the 2-norm is minimised/maximised.
        let lower = self.apply(cube.maxnorm_point());
        let upper = self.apply(cube.minnorm_point());
        Bounds(lower, upper)
    }
}

#[replace_float_literals(C::Type::cast_from(literal))]
impl<'a, C : Constant, const N : usize> Norm<C::Type, L1>
for HatConv<C, N> {
    #[inline]
    fn norm(&self, _ : L1) -> C::Type {
        1.0
    }
}

#[replace_float_literals(C::Type::cast_from(literal))]
impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity>
for HatConv<C, N> {
    #[inline]
    fn norm(&self, _ : Linfinity) -> C::Type {
        self.bounds().upper()
    }
}

#[replace_float_literals(F::cast_from(literal))]
impl<'a, F : Float, R, C, const N : usize> Mapping<Loc<F, N>>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {

    type Codomain = F;

    #[inline]
    fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F {
        let Convolution(ref ind, ref hatconv) = self;
        let β = ind.r.value();
        let σ = hatconv.radius();

        // This is just a product of one-dimensional versions
        y.cow().product_map(|x| {
            // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution
            // we have
            // $$
            //      [χ_{-β,β} * u_σ](x)
            //      = ∫_{x-β}^{x+β} u_σ(z) d z
            //      = (1/σ)∫_{x-β}^{x+β} u_1(z/σ) d z
            //      = ∫_{(x-β)/σ}^{(x+β)/σ} u_1(z) d z
            //      = [χ_{-β/σ, β/σ} * u_1](x/σ)
            // $$
            self.value_1d_σ1(x / σ, β / σ)
        })
    }
}

#[replace_float_literals(F::cast_from(literal))]
impl<'a, F : Float, R, C, const N : usize> DifferentiableImpl<Loc<F, N>>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {

    type Derivative = Loc<F, N>;

    #[inline]
    fn differential_impl<I : Instance<Loc<F, N>>>(&self, y0 : I) -> Loc<F, N> {
        let y = y0.cow();
        let Convolution(ref ind, ref hatconv) = self;
        let β = ind.r.value();
        let σ = hatconv.radius();
        let σ2 = σ * σ;

        let vs = y.map(|x| {
            self.value_1d_σ1(x / σ, β / σ)
        });
        product_differential(&*y, &vs, |x| {
            self.diff_1d_σ1(x  / σ, β / σ) / σ2
        })
    }
}


/// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
/// If $b > d$, add $g()$ to the result.
#[inline]
#[replace_float_literals(F::cast_from(literal))]
fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F,
                g : impl Fn() -> F) -> F {
    if b < c {
        0.0
    } else if b <= d {
        if a <= c {
            f(b) - f(c)
        } else {
            f(b) - f(a)
        }
    } else /* b > d */ {
        g() + if a <= c {
            f(d) - f(c)
        } else if a < d {
            f(d) - f(a)
        } else {
            0.0
        }
    }
}

#[replace_float_literals(F::cast_from(literal))]
impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
      
    /// Calculates the value of the 1D hat convolution further convolved by a interval indicator.
    /// As both functions are piecewise polynomials, this is implemented by explicit integral over
    /// all subintervals of polynomiality of the cube indicator, using easily formed
    /// antiderivatives.
    #[inline]
    pub fn value_1d_σ1(&self, x : F, β : F) -> F {
        // The integration interval
        let a = x - β;
        let b = x + β;

        #[inline]
        fn pow4<F : Float>(x : F) -> F {
            let y = x * x;
            y * y
        }
        
        // Observe the factor 1/6 at the front from the antiderivatives below.
        // The factor 4 is from normalisation of the original function.
        (4.0/6.0) * i(a, b, -1.0, -0.5,
                // (2/3) (y+1)^3  on  -1 < y ≤ -1/2
                // The antiderivative is  (2/12)(y+1)^4 = (1/6)(y+1)^4
                |y| pow4(y+1.0),
                || i(a, b, -0.5, 0.0,
                    // -2 y^3 - 2 y^2 + 1/3  on  -1/2 < y ≤ 0
                    // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y
                    |y| y*(-y*y*(y*3.0 + 4.0) + 2.0),
                    || i(a, b, 0.0, 0.5,
                            // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
                            // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y
                            |y| y*(y*y*(y*3.0 - 4.0) + 2.0),
                            || i(a, b, 0.5, 1.0,
                                // -(2/3) (y-1)^3  on  1/2 < y ≤ 1
                                // The antiderivative is  -(2/12)(y-1)^4 = -(1/6)(y-1)^4
                                |y| -pow4(y-1.0),
                                || 0.0
                            )
                    )
                )
        )
    }

    /// Calculates the derivative of the 1D hat convolution further convolved by a interval
    /// indicator. The implementation is similar to [`Self::value_1d_σ1`], using the fact that
    /// $(θ * ψ)' = θ * ψ'$.
    #[inline]
    pub fn diff_1d_σ1(&self, x : F, β : F) -> F {
        // The integration interval
        let a = x - β;
        let b = x + β;

        // The factor 4 is from normalisation of the original function.
        4.0 * i(a, b, -1.0, -0.5,
                // (2/3) (y+1)^3  on  -1 < y ≤ -1/2
                |y| (2.0/3.0) * (y + 1.0).powi(3),
                || i(a, b, -0.5, 0.0,
                    // -2 y^3 - 2 y^2 + 1/3  on  -1/2 < y ≤ 0
                    |y| -2.0*(y + 1.0) * y * y + (1.0/3.0),
                    || i(a, b, 0.0, 0.5,
                            // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
                            |y| 2.0*(y - 1.0) * y * y + (1.0/3.0),
                            || i(a, b, 0.5, 1.0,
                                // -(2/3) (y-1)^3  on  1/2 < y ≤ 1
                                |y| -(2.0/3.0) * (y - 1.0).powi(3),
                                || 0.0
                            )
                    )
                )
        )
    }
}

/*
impl<'a, F : Float, R, C, const N : usize> Lipschitz<L2>
for Differential<Loc<F, N>, Convolution<CubeIndicator<R, N>, HatConv<C, N>>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {

    type FloatType = F;

    #[inline]
    fn lipschitz_factor(&self, _l2 : L2) -> Option<F> {
        dbg!("unimplemented");
        None
    }
}
*/

impl<F : Float, R, C, const N : usize>
Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {

    #[inline]
    fn get_r(&self) -> F {
        let Convolution(ref ind, ref hatconv) = self;
        ind.r.value() + hatconv.radius()
    }
}

impl<F : Float, R, C, const N : usize> Support<F, N>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
    
    #[inline]
    fn support_hint(&self) -> Cube<F, N> {
        let r = self.get_r();
        array_init(|| [-r, r]).into()
    }

    #[inline]
    fn in_support(&self, y : &Loc<F, N>) -> bool {
        let r = self.get_r();
        y.iter().all(|x| x.abs() <= r)
    }

    #[inline]
    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
        // It is not difficult to verify that [`HatConv`] is C^2.
        // Therefore, so is [`Convolution<CubeIndicator<R, N>, HatConv<C, N>>`] so that a finer
        // subdivision for the hint than this is not particularly useful.
        let r = self.get_r();
        cube.map(|c, d| symmetric_peak_hint(r, c, d))
    }
}

impl<F : Float, R, C, const N : usize> GlobalAnalysis<F, Bounds<F>>
for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
    #[inline]
    fn global_analysis(&self) -> Bounds<F> {
        Bounds(F::ZERO, self.apply(Loc::ORIGIN))
    }
}

impl<F : Float, R, C, const N : usize> LocalAnalysis<F, Bounds<F>, N>
for Convolution<CubeIndicator<R, N>, HatConv<C,  N>>
where R : Constant<Type=F>,
      C : Constant<Type=F> {
    #[inline]
    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
        // The function is maximised/minimised where the absolute value is minimised/maximised.
        let lower = self.apply(cube.maxnorm_point());
        let upper = self.apply(cube.minnorm_point());
        //assert!(upper >= lower);
        if upper < lower {
            let Convolution(ref ind, ref hatconv) = self;
            let β = ind.r.value();
            let σ = hatconv.radius();
            eprintln!("WARNING: Hat convolution {β} {σ} upper bound {upper} < lower bound {lower} on {cube:?} with min-norm point {:?} and max-norm point {:?}", cube.minnorm_point(), cube.maxnorm_point());
            Bounds(upper, lower)
        } else {
            Bounds(lower, upper)
        }
    }
}


/// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and
/// $ψ = χ_{[-a,a]^N}$ for some $a>0$.
///
/// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$,
/// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat
/// convolution.
#[replace_float_literals(F::cast_from(literal))]
impl<F, C, S, const N : usize>
BoundedBy<F, SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>>
for AutoConvolution<HatConv<C, N>>
where F : Float,
      C : Constant<Type=F>,
      S : Constant<Type=F> {

    fn bounding_factor(
        &self,
        kernel : &SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>
    ) -> Option<F> {
        // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with
        // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and
        // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed
        // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$.
        let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel;
        let σ = self.0.radius();
        let a = ind.r.value();
        let bounding_1d = 4.0 / (3.0 * σ);

        // Check that the cutting indicator of the comparison
        // `SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>`
        // is wide enough, and that the hat convolution has the same radius as ours.
        if σ <= a && hatconv2 == &self.0 {
            Some(bounding_1d.powi(N as i32))
        } else {
            // We cannot compare
            None
        }
    }
}

/// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution.
///
/// This is based on Example 3.3 in the manuscript.
#[replace_float_literals(F::cast_from(literal))]
impl<F, C, const N : usize>
BoundedBy<F, HatConv<C, N>>
for AutoConvolution<HatConv<C, N>>
where F : Float,
      C : Constant<Type=F> {

    /// Returns an estimate of the factor $L_1$.
    ///
    /// Returns  `None` if `kernel` does not have the same width as hat convolution that `self`
    /// is based on.
    fn bounding_factor(
        &self,
        kernel : &HatConv<C, N>
    ) -> Option<F> {
        if kernel == &self.0 {
            Some(1.0)
        } else {
            // We cannot compare
            None
        }
    }
}

#[cfg(test)]
mod tests {
    use alg_tools::lingrid::linspace;
    use alg_tools::mapping::Mapping;
    use alg_tools::norms::Linfinity;
    use alg_tools::loc::Loc;
    use crate::kernels::{BallIndicator, CubeIndicator, Convolution};
    use super::HatConv;

    /// Tests numerically that [`HatConv<f64, 1>`] is monotone.
    #[test]
    fn hatconv_monotonicity() {
        let grid = linspace(0.0, 1.0, 100000);
        let hatconv : HatConv<f64, 1> = HatConv{ radius : 1.0 };
        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
        let first = vals.next().unwrap();
        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1;
        assert!(monotone);
    }

    /// Tests numerically that [`Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>`] is monotone.
    #[test]
    fn convolution_cubeind_hatconv_monotonicity() {
        let grid = linspace(-2.0, 0.0, 100000);
        let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>
            = Convolution(BallIndicator { r : 0.5, exponent : Linfinity },
                          HatConv{ radius : 1.0 } );
        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
        let first = vals.next().unwrap();
        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev <= t)).1;
        assert!(monotone);

        let grid = linspace(0.0, 2.0, 100000);
        let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>
            = Convolution(BallIndicator { r : 0.5, exponent : Linfinity },
                          HatConv{ radius : 1.0 } );
        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
        let first = vals.next().unwrap();
        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1;
        assert!(monotone);
    }
}

mercurial