src/kernels/base.rs

Fri, 16 Jan 2026 19:39:22 -0500

author
Tuomo Valkonen <tuomov@iki.fi>
date
Fri, 16 Jan 2026 19:39:22 -0500
branch
dev
changeset 62
32328a74c790
parent 61
4f468d35fa29
permissions
-rw-r--r--

Lipschitz estimation attempt (incomplete, not implemented for sliding. Doesn't work anyway for basic FB either.)

//! Things for constructing new kernels from component kernels and traits for analysing them
use numeric_literals::replace_float_literals;
use serde::Serialize;

use alg_tools::bisection_tree::Support;
use alg_tools::bounds::{Bounded, Bounds, GlobalAnalysis, LocalAnalysis};
use alg_tools::instance::{Instance, Space};
use alg_tools::loc::Loc;
use alg_tools::mapping::{
    DifferentiableImpl, DifferentiableMapping, LipschitzDifferentiableImpl, Mapping,
};
use alg_tools::maputil::{array_init, map1_indexed, map2};
use alg_tools::norms::*;
use alg_tools::sets::Cube;
use alg_tools::sets::SetOrd;
use alg_tools::types::*;
use anyhow::anyhow;

use crate::fourier::Fourier;
use crate::types::*;

/// Representation of the product of two kernels.
///
/// The kernels typically implement [`Support`] and [`Mapping`].
///
/// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
#[derive(Copy, Clone, Serialize, Debug)]
pub struct SupportProductFirst<A, B>(
    /// First kernel
    pub A,
    /// Second kernel
    pub B,
);

impl<A, B, F: Float, const N: usize> Mapping<Loc<N, F>> for SupportProductFirst<A, B>
where
    A: Mapping<Loc<N, F>, Codomain = F>,
    B: Mapping<Loc<N, F>, Codomain = F>,
{
    type Codomain = F;

    #[inline]
    fn apply<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Codomain {
        x.eval_ref(|r| self.0.apply(r)) * self.1.apply(x)
    }
}

impl<A, B, F: Float, const N: usize> DifferentiableImpl<Loc<N, F>> for SupportProductFirst<A, B>
where
    A: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>, Codomain = F>,
    B: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>, Codomain = F>,
{
    type Derivative = Loc<N, F>;

    #[inline]
    fn differential_impl<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Derivative {
        x.eval_ref(|xr| {
            self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(xr)
        })
    }
}

impl<A, B, M: Copy, F: Float> Lipschitz<M> for SupportProductFirst<A, B>
where
    A: Lipschitz<M, FloatType = F> + Bounded<F>,
    B: Lipschitz<M, FloatType = F> + Bounded<F>,
{
    type FloatType = F;
    #[inline]
    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
        // f(x)g(x) - f(y)g(y) = f(x)[g(x)-g(y)] - [f(y)-f(x)]g(y)
        let &SupportProductFirst(ref f, ref g) = self;
        Ok(f.lipschitz_factor(m)? * g.bounds().uniform()
            + g.lipschitz_factor(m)? * f.bounds().uniform())
    }
}

impl<'a, A, B, M: Copy, Domain, F: Float> LipschitzDifferentiableImpl<Domain, M>
    for SupportProductFirst<A, B>
where
    Domain: Space,
    Self: DifferentiableImpl<Domain>,
    A: DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
    B: DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
    SupportProductFirst<A, B>: DifferentiableMapping<Domain>,
    for<'b> A::Differential<'b>: Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType = F>,
    for<'b> B::Differential<'b>: Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType = F>,
{
    type FloatType = F;
    #[inline]
    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
        // ∇[gf] = f∇g + g∇f
        // ⟹ ∇[gf](x) - ∇[gf](y) = f(x)∇g(x) + g(x)∇f(x) - f(y)∇g(y) + g(y)∇f(y)
        //                        = f(x)[∇g(x)-∇g(y)] + g(x)∇f(x) - [f(y)-f(x)]∇g(y) + g(y)∇f(y)
        //                        = f(x)[∇g(x)-∇g(y)] + g(x)[∇f(x)-∇f(y)]
        //                          - [f(y)-f(x)]∇g(y) + [g(y)-g(x)]∇f(y)
        let &SupportProductFirst(ref f, ref g) = self;
        let (df, dg) = (f.diff_ref(), g.diff_ref());
        Ok([
            df.lipschitz_factor(m)? * g.bounds().uniform(),
            dg.lipschitz_factor(m)? * f.bounds().uniform(),
            f.lipschitz_factor(m)? * dg.norm_bound(L2),
            g.lipschitz_factor(m)? * df.norm_bound(L2),
        ]
        .into_iter()
        .sum())
    }
}

impl<'a, A, B, F: Float, const N: usize> Support<N, F> for SupportProductFirst<A, B>
where
    A: Support<N, F>,
    B: Support<N, F>,
{
    #[inline]
    fn support_hint(&self) -> Cube<N, F> {
        self.0.support_hint()
    }

    #[inline]
    fn in_support(&self, x: &Loc<N, F>) -> bool {
        self.0.in_support(x)
    }

    #[inline]
    fn bisection_hint(&self, cube: &Cube<N, F>) -> [Option<F>; N] {
        self.0.bisection_hint(cube)
    }
}

impl<'a, A, B, F: Float> GlobalAnalysis<F, Bounds<F>> for SupportProductFirst<A, B>
where
    A: GlobalAnalysis<F, Bounds<F>>,
    B: GlobalAnalysis<F, Bounds<F>>,
{
    #[inline]
    fn global_analysis(&self) -> Bounds<F> {
        self.0.global_analysis() * self.1.global_analysis()
    }
}

impl<'a, A, B, F: Float, const N: usize> LocalAnalysis<F, Bounds<F>, N>
    for SupportProductFirst<A, B>
where
    A: LocalAnalysis<F, Bounds<F>, N>,
    B: LocalAnalysis<F, Bounds<F>, N>,
{
    #[inline]
    fn local_analysis(&self, cube: &Cube<N, F>) -> Bounds<F> {
        self.0.local_analysis(cube) * self.1.local_analysis(cube)
    }
}

/// Representation of the sum of two kernels
///
/// The kernels typically implement [`Support`] and [`Mapping`].
///
/// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
#[derive(Copy, Clone, Serialize, Debug)]
pub struct SupportSum<A, B>(
    /// First kernel
    pub A,
    /// Second kernel
    pub B,
);

impl<'a, A, B, F: Float, const N: usize> Mapping<Loc<N, F>> for SupportSum<A, B>
where
    A: Mapping<Loc<N, F>, Codomain = F>,
    B: Mapping<Loc<N, F>, Codomain = F>,
{
    type Codomain = F;

    #[inline]
    fn apply<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Codomain {
        x.eval_ref(|r| self.0.apply(r)) + self.1.apply(x)
    }
}

impl<'a, A, B, F: Float, const N: usize> DifferentiableImpl<Loc<N, F>> for SupportSum<A, B>
where
    A: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>>,
    B: DifferentiableMapping<Loc<N, F>, DerivativeDomain = Loc<N, F>>,
{
    type Derivative = Loc<N, F>;

    #[inline]
    fn differential_impl<I: Instance<Loc<N, F>>>(&self, x: I) -> Self::Derivative {
        x.eval_ref(|r| self.0.differential(r)) + self.1.differential(x)
    }
}

impl<'a, A, B, F: Float, const N: usize> Support<N, F> for SupportSum<A, B>
where
    A: Support<N, F>,
    B: Support<N, F>,
    Cube<N, F>: SetOrd,
{
    #[inline]
    fn support_hint(&self) -> Cube<N, F> {
        self.0.support_hint().common(&self.1.support_hint())
    }

    #[inline]
    fn in_support(&self, x: &Loc<N, F>) -> bool {
        self.0.in_support(x) || self.1.in_support(x)
    }

    #[inline]
    fn bisection_hint(&self, cube: &Cube<N, F>) -> [Option<F>; N] {
        map2(
            self.0.bisection_hint(cube),
            self.1.bisection_hint(cube),
            |a, b| a.or(b),
        )
    }
}

impl<'a, A, B, F: Float> GlobalAnalysis<F, Bounds<F>> for SupportSum<A, B>
where
    A: GlobalAnalysis<F, Bounds<F>>,
    B: GlobalAnalysis<F, Bounds<F>>,
{
    #[inline]
    fn global_analysis(&self) -> Bounds<F> {
        self.0.global_analysis() + self.1.global_analysis()
    }
}

impl<'a, A, B, F: Float, const N: usize> LocalAnalysis<F, Bounds<F>, N> for SupportSum<A, B>
where
    A: LocalAnalysis<F, Bounds<F>, N>,
    B: LocalAnalysis<F, Bounds<F>, N>,
    Cube<N, F>: SetOrd,
{
    #[inline]
    fn local_analysis(&self, cube: &Cube<N, F>) -> Bounds<F> {
        self.0.local_analysis(cube) + self.1.local_analysis(cube)
    }
}

impl<F: Float, M: Copy, A, B> Lipschitz<M> for SupportSum<A, B>
where
    A: Lipschitz<M, FloatType = F>,
    B: Lipschitz<M, FloatType = F>,
{
    type FloatType = F;

    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
        Ok(self.0.lipschitz_factor(m)? * self.1.lipschitz_factor(m)?)
    }
}

impl<'b, F: Float, M: Copy, A, B, Domain> LipschitzDifferentiableImpl<Domain, M>
    for SupportSum<A, B>
where
    Domain: Space,
    Self: DifferentiableImpl<Domain>,
    A: DifferentiableMapping<Domain, Codomain = F>,
    B: DifferentiableMapping<Domain, Codomain = F>,
    SupportSum<A, B>: DifferentiableMapping<Domain, Codomain = F>,
    for<'a> A::Differential<'a>: Lipschitz<M, FloatType = F>,
    for<'a> B::Differential<'a>: Lipschitz<M, FloatType = F>,
{
    type FloatType = F;

    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
        Ok(self.0.diff_ref().lipschitz_factor(m)? + self.1.diff_ref().lipschitz_factor(m)?)
    }
}

/// Representation of the convolution of two kernels.
///
/// The kernels typically implement [`Support`]s and [`Mapping`].
//
/// Trait implementations have to be on a case-by-case basis.
#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)]
pub struct Convolution<A, B>(
    /// First kernel
    pub A,
    /// Second kernel
    pub B,
);

impl<F: Float, M, A, B> Lipschitz<M> for Convolution<A, B>
where
    A: Norm<L1, F>,
    B: Lipschitz<M, FloatType = F>,
{
    type FloatType = F;

    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
        // For [f * g](x) = ∫ f(x-y)g(y) dy we have
        // [f * g](x) - [f * g](z) = ∫ [f(x-y)-f(z-y)]g(y) dy.
        // Hence |[f * g](x) - [f * g](z)| ≤ ∫ |f(x-y)-f(z-y)|g(y)| dy.
        //                                 ≤ L|x-z| ∫ |g(y)| dy,
        // where L is the Lipschitz factor of f.
        Ok(self.1.lipschitz_factor(m)? * self.0.norm(L1))
    }
}

impl<'b, F: Float, M, A, B, Domain> LipschitzDifferentiableImpl<Domain, M> for Convolution<A, B>
where
    Domain: Space,
    Self: DifferentiableImpl<Domain>,
    A: Norm<L1, F>,
    Convolution<A, B>: DifferentiableMapping<Domain, Codomain = F>,
    B: DifferentiableMapping<Domain, Codomain = F>,
    for<'a> B::Differential<'a>: Lipschitz<M, FloatType = F>,
{
    type FloatType = F;

    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
        // For [f * g](x) = ∫ f(x-y)g(y) dy we have
        // ∇[f * g](x) - ∇[f * g](z) = ∫ [∇f(x-y)-∇f(z-y)]g(y) dy.
        // Hence |∇[f * g](x) - ∇[f * g](z)| ≤ ∫ |∇f(x-y)-∇f(z-y)|g(y)| dy.
        //                                 ≤ L|x-z| ∫ |g(y)| dy,
        // where L is the Lipschitz factor of ∇f.
        self.1
            .diff_ref()
            .lipschitz_factor(m)
            .map(|l| l * self.0.norm(L1))
    }
}

/// Representation of the autoconvolution of a kernel.
///
/// The kernel typically implements [`Support`] and [`Mapping`].
///
/// Trait implementations have to be on a case-by-case basis.
#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)]
pub struct AutoConvolution<A>(
    /// The kernel to be autoconvolved
    pub A,
);

impl<F: Float, M, C> Lipschitz<M> for AutoConvolution<C>
where
    C: Lipschitz<M, FloatType = F> + Norm<L1, F>,
{
    type FloatType = F;

    fn lipschitz_factor(&self, m: M) -> DynResult<F> {
        self.0.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
    }
}

impl<'b, F: Float, M, C, Domain> LipschitzDifferentiableImpl<Domain, M> for AutoConvolution<C>
where
    Domain: Space,
    Self: DifferentiableImpl<Domain>,
    C: Norm<L1, F> + DifferentiableMapping<Domain, Codomain = F>,
    AutoConvolution<C>: DifferentiableMapping<Domain, Codomain = F>,
    for<'a> C::Differential<'a>: Lipschitz<M, FloatType = F>,
{
    type FloatType = F;

    fn diff_lipschitz_factor(&self, m: M) -> DynResult<F> {
        self.0
            .diff_ref()
            .lipschitz_factor(m)
            .map(|l| l * self.0.norm(L1))
    }
}

/// Representation a multi-dimensional product of a one-dimensional kernel.
///
/// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
/// The kernel $G$ typically implements [`Support`] and [`Mapping`]
/// on [`Loc<1, F>`]. Then the product implements them on [`Loc<N, F>`].
#[derive(Copy, Clone, Serialize, Debug, Eq, PartialEq)]
#[allow(dead_code)]
struct UniformProduct<G, const N: usize>(
    /// The one-dimensional kernel
    G,
);

impl<'a, G, F: Float, const N: usize> Mapping<Loc<N, F>> for UniformProduct<G, N>
where
    G: Mapping<Loc<1, F>, Codomain = F>,
{
    type Codomain = F;

    #[inline]
    fn apply<I: Instance<Loc<N, F>>>(&self, x: I) -> F {
        x.decompose()
            .iter()
            .map(|&y| self.0.apply(Loc([y])))
            .product()
    }
}

impl<'a, G, F: Float, const N: usize> DifferentiableImpl<Loc<N, F>> for UniformProduct<G, N>
where
    G: DifferentiableMapping<Loc<1, F>, DerivativeDomain = F, Codomain = F>,
{
    type Derivative = Loc<N, F>;

    #[inline]
    fn differential_impl<I: Instance<Loc<N, F>>>(&self, x0: I) -> Loc<N, F> {
        x0.eval(|x| {
            let vs = x.map(|y| self.0.apply(Loc([y])));
            product_differential(x, &vs, |y| self.0.differential(Loc([y])))
        })
    }
}

/// Helper function to calulate the differential of $f(x)=∏_{i=1}^N g(x_i)$.
///
/// The vector `x` is the location, `vs` consists of the values `g(x_i)`, and
/// `gd` calculates the derivative `g'`.
#[inline]
pub(crate) fn product_differential<F: Float, G: Fn(F) -> F, const N: usize>(
    x: &Loc<N, F>,
    vs: &Loc<N, F>,
    gd: G,
) -> Loc<N, F> {
    map1_indexed(x, |i, &y| {
        gd(y)
            * vs.iter()
                .zip(0..)
                .filter_map(|(v, j)| (j != i).then_some(*v))
                .product()
    })
    .into()
}

/// Helper function to calulate the Lipschitz factor of $∇f$ for $f(x)=∏_{i=1}^N g(x_i)$.
///
/// The parameter `bound` is a bound on $|g|_∞$, `lip` is a Lipschitz factor for $g$,
/// `dbound` is a bound on $|∇g|_∞$, and `dlip` a Lipschitz factor for $∇g$.
#[inline]
pub(crate) fn product_differential_lipschitz_factor<F: Float, const N: usize>(
    bound: F,
    lip: F,
    dbound: F,
    dlip: F,
) -> DynResult<F> {
    // For arbitrary ψ(x) = ∏_{i=1}^n ψ_i(x_i), we have
    // ψ(x) - ψ(y) = ∑_i [ψ_i(x_i)-ψ_i(y_i)] ∏_{j ≠ i} ψ_j(x_j)
    // by a simple recursive argument. In particular, if ψ_i=g for all i, j, we have
    // |ψ(x) - ψ(y)| ≤ ∑_i L_g M_g^{n-1}|x-y|, where L_g is the Lipschitz factor of g, and
    // M_g a bound on it.
    //
    // We also have in the general case ∇ψ(x) = ∑_i ∇ψ_i(x_i) ∏_{j ≠ i} ψ_j(x_j), whence
    // using the previous formula for each i with f_i=∇ψ_i and f_j=ψ_j for j ≠ i, we get
    //  ∇ψ(x) - ∇ψ(y) = ∑_i[ ∇ψ_i(x_i)∏_{j ≠ i} ψ_j(x_j) - ∇ψ_i(y_i)∏_{j ≠ i} ψ_j(y_j)]
    //                = ∑_i[ [∇ψ_i(x_i) - ∇ψ_j(x_j)] ∏_{j ≠ i}ψ_j(x_j)
    //                       + [∑_{k ≠ i} [ψ_k(x_k) - ∇ψ_k(x_k)] ∏_{j ≠ i, k}ψ_j(x_j)]∇ψ_i(x_i)].
    // With $ψ_i=g for all i, j, it follows that
    // |∇ψ(x) - ∇ψ(y)| ≤ ∑_i L_{∇g} M_g^{n-1} + ∑_{k ≠ i} L_g M_g^{n-2} M_{∇g}
    //                 = n [L_{∇g} M_g^{n-1} + (n-1) L_g M_g^{n-2} M_{∇g}].
    //                 = n M_g^{n-2}[L_{∇g} M_g + (n-1) L_g M_{∇g}].
    if N >= 2 {
        Ok(F::cast_from(N)
            * bound.powi((N - 2) as i32)
            * (dlip * bound + F::cast_from(N - 1) * lip * dbound))
    } else if N == 1 {
        Ok(dlip)
    } else {
        Err(anyhow!("Invalid dimension"))
    }
}

impl<G, F: Float, const N: usize> Support<N, F> for UniformProduct<G, N>
where
    G: Support<1, F>,
{
    #[inline]
    fn support_hint(&self) -> Cube<N, F> {
        let [a]: [[F; 2]; 1] = self.0.support_hint().into();
        array_init(|| a.clone()).into()
    }

    #[inline]
    fn in_support(&self, x: &Loc<N, F>) -> bool {
        x.iter().all(|&y| self.0.in_support(&Loc([y])))
    }

    #[inline]
    fn bisection_hint(&self, cube: &Cube<N, F>) -> [Option<F>; N] {
        cube.map(|a, b| {
            let [h] = self.0.bisection_hint(&[[a, b]].into());
            h
        })
    }
}

impl<G, F: Float, const N: usize> GlobalAnalysis<F, Bounds<F>> for UniformProduct<G, N>
where
    G: GlobalAnalysis<F, Bounds<F>>,
{
    #[inline]
    fn global_analysis(&self) -> Bounds<F> {
        let g = self.0.global_analysis();
        (0..N).map(|_| g).product()
    }
}

impl<G, F: Float, const N: usize> LocalAnalysis<F, Bounds<F>, N> for UniformProduct<G, N>
where
    G: LocalAnalysis<F, Bounds<F>, 1>,
{
    #[inline]
    fn local_analysis(&self, cube: &Cube<N, F>) -> Bounds<F> {
        cube.iter_coords()
            .map(|&[a, b]| self.0.local_analysis(&([[a, b]].into())))
            .product()
    }
}

macro_rules! product_lpnorm {
    ($lp:ident) => {
        impl<G, F: Float, const N: usize> Norm<$lp, F> for UniformProduct<G, N>
        where
            G: Norm<$lp, F>,
        {
            #[inline]
            fn norm(&self, lp: $lp) -> F {
                self.0.norm(lp).powi(N as i32)
            }
        }
    };
}

product_lpnorm!(L1);
product_lpnorm!(L2);
product_lpnorm!(Linfinity);

/// Trait for bounding one kernel with respect to another.
///
/// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared.
pub trait BoundedBy<F: Num, T> {
    /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for
    /// $v$ `self` and $u$ `other`.
    ///
    /// If no such factors exits, `None` is returned.
    fn bounding_factor(&self, other: &T) -> DynResult<F>;
}

/// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$.
#[replace_float_literals(F::cast_from(literal))]
impl<F, C, BaseP> BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>>
    for AutoConvolution<SupportProductFirst<C, BaseP>>
where
    F: Float,
    C: PartialEq,
    BaseP: Fourier<F> + PartialOrd, // TODO: replace by BoundedBy,
    <BaseP as Fourier<F>>::Transformed: Bounded<F> + Norm<L1, F>,
{
    fn bounding_factor(
        &self,
        kernel: &SupportProductFirst<AutoConvolution<C>, BaseP>,
    ) -> DynResult<F> {
        let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel;
        let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self;
        let v̂ = base_spread.fourier();

        // Verify that the cut-off and ideal physical model (base spread) are the same.
        if cutoff == cutoff2 && base_spread <= base_spread2 && v̂.bounds().lower() >= 0.0 {
            // Calculate the factor between the convolution approximation
            // `AutoConvolution<SupportProductFirst<C, BaseP>>` of $A_*A$ and the
            // kernel of the seminorm. This depends on the physical model P being
            // `SupportProductFirst<C, BaseP>` with the kernel `K` being
            // a `SupportSum` involving `SupportProductFirst<AutoConvolution<C>, BaseP>`.
            Ok(v̂.norm(L1))
        } else {
            // We cannot compare
            Err(anyhow!("Incomprable kernels"))
        }
    }
}

impl<F: Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A
where
    A: BoundedBy<F, B>,
    C: Bounded<F>,
{
    #[replace_float_literals(F::cast_from(literal))]
    fn bounding_factor(&self, SupportSum(ref kernel1, kernel2): &SupportSum<B, C>) -> DynResult<F> {
        if kernel2.bounds().lower() >= 0.0 {
            self.bounding_factor(kernel1)
        } else {
            Err(anyhow!("Component kernel not lower-bounded by zero"))
        }
    }
}

/// Generates on $[a, b]$ [`Support::support_hint`] for a symmetric interval $[-r, r]$.
///
/// It will attempt to place the subdivision point at $-r$ or $r$.
/// If neither of these points lies within $[a, b]$, `None` is returned.
#[inline]
pub(super) fn symmetric_interval_hint<F: Float>(r: F, a: F, b: F) -> Option<F> {
    if a < -r && -r < b {
        Some(-r)
    } else if a < r && r < b {
        Some(r)
    } else {
        None
    }
}

/// Generates on $[a, b]$ [`Support::support_hint`] for a function with monotone derivative,
/// support on $[-r, r]$ and peak at $0.
///
/// It will attempt to place the subdivision point at $-r$, $r$, or $0$, depending on which
/// gives the longer length for the shorter of the two subintervals. If none of these points
/// lies within $[a, b]$, or the resulting interval would be shorter than $0.3r$, `None` is
/// returned.
#[replace_float_literals(F::cast_from(literal))]
#[inline]
pub(super) fn symmetric_peak_hint<F: Float>(r: F, a: F, b: F) -> Option<F> {
    let stage1 = if a < -r {
        if b <= -r {
            None
        } else if a + r < -b {
            Some(-r)
        } else {
            Some(0.0)
        }
    } else if a < 0.0 {
        if b <= 0.0 {
            None
        } else if a < r - b {
            Some(0.0)
        } else {
            Some(r)
        }
    } else if a < r && b > r {
        Some(r)
    } else {
        None
    };

    // Ignore stage1 hint if either side of subdivision would be just a small fraction of the
    // interval
    match stage1 {
        Some(h) if (h - a).min(b - h) >= 0.3 * r => Some(h),
        _ => None,
    }
}

mercurial