--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/seminorms.rs Thu Dec 01 23:07:35 2022 +0200 @@ -0,0 +1,378 @@ +/*! This module implements the convolution operators $𝒟$. + +The principal data type of the module is [`ConvolutionOp`] and the main abstraction +the trait [`DiscreteMeasureOp`]. +*/ + +use std::iter::Zip; +use std::ops::RangeFrom; +use alg_tools::types::*; +use alg_tools::loc::Loc; +use alg_tools::sets::Cube; +use alg_tools::bisection_tree::*; +use alg_tools::mapping::RealMapping; +use alg_tools::iter::{Mappable, FilterMapX}; +use alg_tools::linops::{Apply, Linear, BoundedLinear}; +use alg_tools::nalgebra_support::ToNalgebraRealField; +use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter}; +use nalgebra::DMatrix; +use std::marker::PhantomData; +use itertools::Itertools; + +/// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$. +/// +/// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`]. +pub trait DiscreteMeasureOp<Domain, F> : BoundedLinear<DiscreteMeasure<Domain, F>, FloatType=F> +where F : Float + ToNalgebraRealField, + Domain : 'static { + /// The output type of [`Self::preapply`]. + type PreCodomain; + + /// Creates a finite-dimensional presentatin of the operator restricted to a fixed support. + /// + /// <p> + /// This returns the matrix $C_*𝒟C$, where $C ∈ 𝕃(ℝ^n; 𝒵(Ω))$, $Ca = ∑_{i=1}^n α_i δ_{x_i}$ + /// for a $x_1, …, x_n$ the coordinates given by the iterator `I`, and $a=(α_1,…,α_n)$. + /// Here $C_* ∈ 𝕃(C_c(Ω); ℝ^n) $ stands for the preadjoint. + /// </p> + fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> + where I : ExactSizeIterator<Item=&'a Domain> + Clone; + + /// [`Apply::apply`] that typically returns an uninitialised [`PreBTFN`] + /// instead of a full [`BTFN`]. + fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain; +} + +// Blanket implementation of a measure as a linear functional over a predual +// (that by assumption is a linear functional over a measure). +/*impl<F, Domain, Predual> Linear<Predual> +for DiscreteMeasure<Domain, F> +where F : Float + ToNalgebraRealField, + Predual : Linear<DiscreteMeasure<Domain, F>, Codomain=F> { + type Codomain = F; + + #[inline] + fn apply(&self, ω : &Predual) -> F { + ω.apply(self) + } +}*/ + +// +// Convolutions for discrete measures +// + +/// A trait alias for simple convolution kernels. +pub trait SimpleConvolutionKernel<F : Float, const N : usize> +: RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} + +impl<T, F : Float, const N : usize> SimpleConvolutionKernel<F, N> for T +where T : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} + +/// [`SupportGenerator`] for [`ConvolutionOp`]. +#[derive(Clone,Debug)] +pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize> +where K : SimpleConvolutionKernel<F, N> { + kernel : K, + centres : DiscreteMeasure<Loc<F, N>, F>, +} + +impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N> +where K : SimpleConvolutionKernel<F, N> { + + /// Construct the convolution kernel corresponding to `δ`, i.e., one centered at `δ.x` and + /// weighted by `δ.α`. + #[inline] + fn construct_kernel<'a>(&'a self, δ : &'a DeltaMeasure<Loc<F, N>, F>) + -> Weighted<Shift<K, F, N>, F> { + self.kernel.clone().shift(δ.x).weigh(δ.α) + } + + /// This is a helper method for the implementation of [`ConvolutionSupportGenerator::all_data`]. + /// It filters out `δ` with zero weight, and otherwise returns the corresponding convolution + /// kernel. The `id` is passed through as-is. + #[inline] + fn construct_kernel_and_id_filtered<'a>( + &'a self, + (id, δ) : (usize, &'a DeltaMeasure<Loc<F, N>, F>) + ) -> Option<(usize, Weighted<Shift<K, F, N>, F>)> { + (δ.α != F::ZERO).then(|| (id.into(), self.construct_kernel(δ))) + } +} + +impl<F : Float, K, const N : usize> SupportGenerator<F, N> +for ConvolutionSupportGenerator<F, K, N> +where K : SimpleConvolutionKernel<F, N> { + type Id = usize; + type SupportType = Weighted<Shift<K, F, N>, F>; + type AllDataIter<'a> = FilterMapX<'a, Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>, + Self, (Self::Id, Self::SupportType)>; + + #[inline] + fn support_for(&self, d : Self::Id) -> Self::SupportType { + self.construct_kernel(&self.centres[d]) + } + + #[inline] + fn support_count(&self) -> usize { + self.centres.len() + } + + #[inline] + fn all_data(&self) -> Self::AllDataIter<'_> { + (0..).zip(self.centres.iter_spikes()) + .filter_mapX(self, Self::construct_kernel_and_id_filtered) + } +} + +/// Representation of a convolution operator $𝒟$. +#[derive(Clone,Debug)] +pub struct ConvolutionOp<F, K, BT, const N : usize> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N> { + /// Depth of the [`BT`] bisection tree for the outputs [`Apply::apply`]. + depth : BT::Depth, + /// Domain of the [`BT`] bisection tree for the outputs [`Apply::apply`]. + domain : Cube<F, N>, + /// The convolution kernel + kernel : K, + _phantoms : PhantomData<(F,BT)>, +} + +impl<F, K, BT, const N : usize> ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N> { + + /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`. + /// + /// The output of [`Apply::apply`] is a [`BT`] of given `depth`. + pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self { + ConvolutionOp { + depth : depth, + domain : domain, + kernel : kernel, + _phantoms : PhantomData + } + } + + /// Returns the support generator for this convolution operator. + fn support_generator(&self, μ : DiscreteMeasure<Loc<F, N>, F>) + -> ConvolutionSupportGenerator<F, K, N> { + + // TODO: can we avoid cloning μ? + ConvolutionSupportGenerator { + kernel : self.kernel.clone(), + centres : μ + } + } + + /// Returns a reference to the kernel of this convolution operator. + pub fn kernel(&self) -> &K { + &self.kernel + } +} + +impl<F, K, BT, const N : usize> Apply<DiscreteMeasure<Loc<F, N>, F>> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N>, + Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { + + type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; + + fn apply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::Output { + let g = self.support_generator(μ); + BTFN::construct(self.domain.clone(), self.depth, g) + } +} + +impl<'a, F, K, BT, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N>, + Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { + + type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; + + fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> Self::Output { + self.apply(μ.clone()) + } +} + +/// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s. +impl<F, K, BT, const N : usize> Linear<DiscreteMeasure<Loc<F, N>, F>> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N>, + Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { + type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; +} + +impl<F, K, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N> { + + type Output = Weighted<Shift<K, F, N>, F>; + + #[inline] + fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> Self::Output { + self.kernel.clone().shift(δ.x).weigh(δ.α) + } +} + +impl<'a, F, K, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N> { + + type Output = Weighted<Shift<K, F, N>, F>; + + #[inline] + fn apply(&self, δ : &'a DeltaMeasure<Loc<F, N>, F>) -> Self::Output { + self.kernel.clone().shift(δ.x).weigh(δ.α) + } +} + +/// [`ConvolutionOp`]s as linear operators over [`DeltaMeasure`]s. +/// +/// The codomain is different from the implementation for [`DiscreteMeasure`]. +impl<F, K, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N> { + type Codomain = Weighted<Shift<K, F, N>, F>; +} + +impl<F, K, BT, const N : usize> BoundedLinear<DiscreteMeasure<Loc<F, N>, F>> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N>, + Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { + + type FloatType = F; + + fn opnorm_bound(&self) -> F { + // With μ = ∑_i α_i δ_{x_i}, we have + // |𝒟μ|_∞ + // = sup_z |∑_i α_i φ(z - x_i)| + // ≤ sup_z ∑_i |α_i| |φ(z - x_i)| + // ≤ ∑_i |α_i| |φ|_∞ + // = |μ|_ℳ |φ|_∞ + self.kernel.bounds().uniform() + } +} + + +impl<F, K, BT, const N : usize> DiscreteMeasureOp<Loc<F, N>, F> +for ConvolutionOp<F, K, BT, N> +where F : Float + ToNalgebraRealField, + BT : BTImpl<F, N, Data=usize>, + K : SimpleConvolutionKernel<F, N>, + Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { + type PreCodomain = PreBTFN<F, ConvolutionSupportGenerator<F, K, N>, N>; + + fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> + where I : ExactSizeIterator<Item=&'a Loc<F,N>> + Clone { + // TODO: Preliminary implementation. It be best to use sparse matrices or + // possibly explicit operators without matrices + let n = points.len(); + let points_clone = points.clone(); + let pairs = points.cartesian_product(points_clone); + let kernel = &self.kernel; + let values = pairs.map(|(x, y)| kernel.apply(y-x).to_nalgebra_mixed()); + DMatrix::from_iterator(n, n, values) + } + + /// A version of [`Apply::apply`] that does not instantiate the [`BTFN`] codomain with + /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when + /// the output is to be added as the right-hand-side operand to a proper BTFN. + fn preapply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::PreCodomain { + BTFN::new_pre(self.support_generator(μ)) + } +} + +/// Generates an scalar operation (e.g. [`std::ops::Mul`], [`std::ops::Div`]) +/// for [`ConvolutionSupportGenerator`]. +macro_rules! make_convolutionsupportgenerator_scalarop_rhs { + ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { + impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> + std::ops::$trait_assign<F> + for ConvolutionSupportGenerator<F, K, N> { + fn $fn_assign(&mut self, t : F) { + self.centres.$fn_assign(t); + } + } + + impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> + std::ops::$trait<F> + for ConvolutionSupportGenerator<F, K, N> { + type Output = ConvolutionSupportGenerator<F, K, N>; + fn $fn(mut self, t : F) -> Self::Output { + std::ops::$trait_assign::$fn_assign(&mut self.centres, t); + self + } + } + impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> + std::ops::$trait<F> + for &'a ConvolutionSupportGenerator<F, K, N> { + type Output = ConvolutionSupportGenerator<F, K, N>; + fn $fn(self, t : F) -> Self::Output { + ConvolutionSupportGenerator{ + kernel : self.kernel.clone(), + centres : (&self.centres).$fn(t), + } + } + } + } +} + +make_convolutionsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); +make_convolutionsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign); + + +/// Generates an unary operation (e.g. [`std::ops::Neg`]) for [`ConvolutionSupportGenerator`]. +macro_rules! make_convolutionsupportgenerator_unaryop { + ($trait:ident, $fn:ident) => { + impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> + std::ops::$trait + for ConvolutionSupportGenerator<F, K, N> { + type Output = ConvolutionSupportGenerator<F, K, N>; + fn $fn(mut self) -> Self::Output { + self.centres = self.centres.$fn(); + self + } + } + + impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> + std::ops::$trait + for &'a ConvolutionSupportGenerator<F, K, N> { + type Output = ConvolutionSupportGenerator<F, K, N>; + fn $fn(self) -> Self::Output { + ConvolutionSupportGenerator{ + kernel : self.kernel.clone(), + centres : (&self.centres).$fn(), + } + } + } + } +} + +make_convolutionsupportgenerator_unaryop!(Neg, neg); + +/// Trait for indicating that `Self` is Lipschitz with respect to the seminorm `D`. +pub trait Lipschitz<D> { + /// The type of floats + type FloatType : Float; + + /// Returns the Lipschitz factor of `self` with respect to the seminorm `D`. + fn lipschitz_factor(&self, seminorm : &D) -> Option<Self::FloatType>; +}