Thu, 01 Dec 2022 23:07:35 +0200
Initial version
0 | 1 | /*! This module implements the convolution operators $𝒟$. |
2 | ||
3 | The principal data type of the module is [`ConvolutionOp`] and the main abstraction | |
4 | the trait [`DiscreteMeasureOp`]. | |
5 | */ | |
6 | ||
7 | use std::iter::Zip; | |
8 | use std::ops::RangeFrom; | |
9 | use alg_tools::types::*; | |
10 | use alg_tools::loc::Loc; | |
11 | use alg_tools::sets::Cube; | |
12 | use alg_tools::bisection_tree::*; | |
13 | use alg_tools::mapping::RealMapping; | |
14 | use alg_tools::iter::{Mappable, FilterMapX}; | |
15 | use alg_tools::linops::{Apply, Linear, BoundedLinear}; | |
16 | use alg_tools::nalgebra_support::ToNalgebraRealField; | |
17 | use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter}; | |
18 | use nalgebra::DMatrix; | |
19 | use std::marker::PhantomData; | |
20 | use itertools::Itertools; | |
21 | ||
22 | /// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$. | |
23 | /// | |
24 | /// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`]. | |
25 | pub trait DiscreteMeasureOp<Domain, F> : BoundedLinear<DiscreteMeasure<Domain, F>, FloatType=F> | |
26 | where F : Float + ToNalgebraRealField, | |
27 | Domain : 'static { | |
28 | /// The output type of [`Self::preapply`]. | |
29 | type PreCodomain; | |
30 | ||
31 | /// Creates a finite-dimensional presentatin of the operator restricted to a fixed support. | |
32 | /// | |
33 | /// <p> | |
34 | /// This returns the matrix $C_*𝒟C$, where $C ∈ 𝕃(ℝ^n; 𝒵(Ω))$, $Ca = ∑_{i=1}^n α_i δ_{x_i}$ | |
35 | /// for a $x_1, …, x_n$ the coordinates given by the iterator `I`, and $a=(α_1,…,α_n)$. | |
36 | /// Here $C_* ∈ 𝕃(C_c(Ω); ℝ^n) $ stands for the preadjoint. | |
37 | /// </p> | |
38 | fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> | |
39 | where I : ExactSizeIterator<Item=&'a Domain> + Clone; | |
40 | ||
41 | /// [`Apply::apply`] that typically returns an uninitialised [`PreBTFN`] | |
42 | /// instead of a full [`BTFN`]. | |
43 | fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain; | |
44 | } | |
45 | ||
46 | // Blanket implementation of a measure as a linear functional over a predual | |
47 | // (that by assumption is a linear functional over a measure). | |
48 | /*impl<F, Domain, Predual> Linear<Predual> | |
49 | for DiscreteMeasure<Domain, F> | |
50 | where F : Float + ToNalgebraRealField, | |
51 | Predual : Linear<DiscreteMeasure<Domain, F>, Codomain=F> { | |
52 | type Codomain = F; | |
53 | ||
54 | #[inline] | |
55 | fn apply(&self, ω : &Predual) -> F { | |
56 | ω.apply(self) | |
57 | } | |
58 | }*/ | |
59 | ||
60 | // | |
61 | // Convolutions for discrete measures | |
62 | // | |
63 | ||
64 | /// A trait alias for simple convolution kernels. | |
65 | pub trait SimpleConvolutionKernel<F : Float, const N : usize> | |
66 | : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} | |
67 | ||
68 | impl<T, F : Float, const N : usize> SimpleConvolutionKernel<F, N> for T | |
69 | where T : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} | |
70 | ||
71 | /// [`SupportGenerator`] for [`ConvolutionOp`]. | |
72 | #[derive(Clone,Debug)] | |
73 | pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize> | |
74 | where K : SimpleConvolutionKernel<F, N> { | |
75 | kernel : K, | |
76 | centres : DiscreteMeasure<Loc<F, N>, F>, | |
77 | } | |
78 | ||
79 | impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N> | |
80 | where K : SimpleConvolutionKernel<F, N> { | |
81 | ||
82 | /// Construct the convolution kernel corresponding to `δ`, i.e., one centered at `δ.x` and | |
83 | /// weighted by `δ.α`. | |
84 | #[inline] | |
85 | fn construct_kernel<'a>(&'a self, δ : &'a DeltaMeasure<Loc<F, N>, F>) | |
86 | -> Weighted<Shift<K, F, N>, F> { | |
87 | self.kernel.clone().shift(δ.x).weigh(δ.α) | |
88 | } | |
89 | ||
90 | /// This is a helper method for the implementation of [`ConvolutionSupportGenerator::all_data`]. | |
91 | /// It filters out `δ` with zero weight, and otherwise returns the corresponding convolution | |
92 | /// kernel. The `id` is passed through as-is. | |
93 | #[inline] | |
94 | fn construct_kernel_and_id_filtered<'a>( | |
95 | &'a self, | |
96 | (id, δ) : (usize, &'a DeltaMeasure<Loc<F, N>, F>) | |
97 | ) -> Option<(usize, Weighted<Shift<K, F, N>, F>)> { | |
98 | (δ.α != F::ZERO).then(|| (id.into(), self.construct_kernel(δ))) | |
99 | } | |
100 | } | |
101 | ||
102 | impl<F : Float, K, const N : usize> SupportGenerator<F, N> | |
103 | for ConvolutionSupportGenerator<F, K, N> | |
104 | where K : SimpleConvolutionKernel<F, N> { | |
105 | type Id = usize; | |
106 | type SupportType = Weighted<Shift<K, F, N>, F>; | |
107 | type AllDataIter<'a> = FilterMapX<'a, Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>, | |
108 | Self, (Self::Id, Self::SupportType)>; | |
109 | ||
110 | #[inline] | |
111 | fn support_for(&self, d : Self::Id) -> Self::SupportType { | |
112 | self.construct_kernel(&self.centres[d]) | |
113 | } | |
114 | ||
115 | #[inline] | |
116 | fn support_count(&self) -> usize { | |
117 | self.centres.len() | |
118 | } | |
119 | ||
120 | #[inline] | |
121 | fn all_data(&self) -> Self::AllDataIter<'_> { | |
122 | (0..).zip(self.centres.iter_spikes()) | |
123 | .filter_mapX(self, Self::construct_kernel_and_id_filtered) | |
124 | } | |
125 | } | |
126 | ||
127 | /// Representation of a convolution operator $𝒟$. | |
128 | #[derive(Clone,Debug)] | |
129 | pub struct ConvolutionOp<F, K, BT, const N : usize> | |
130 | where F : Float + ToNalgebraRealField, | |
131 | BT : BTImpl<F, N, Data=usize>, | |
132 | K : SimpleConvolutionKernel<F, N> { | |
133 | /// Depth of the [`BT`] bisection tree for the outputs [`Apply::apply`]. | |
134 | depth : BT::Depth, | |
135 | /// Domain of the [`BT`] bisection tree for the outputs [`Apply::apply`]. | |
136 | domain : Cube<F, N>, | |
137 | /// The convolution kernel | |
138 | kernel : K, | |
139 | _phantoms : PhantomData<(F,BT)>, | |
140 | } | |
141 | ||
142 | impl<F, K, BT, const N : usize> ConvolutionOp<F, K, BT, N> | |
143 | where F : Float + ToNalgebraRealField, | |
144 | BT : BTImpl<F, N, Data=usize>, | |
145 | K : SimpleConvolutionKernel<F, N> { | |
146 | ||
147 | /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`. | |
148 | /// | |
149 | /// The output of [`Apply::apply`] is a [`BT`] of given `depth`. | |
150 | pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self { | |
151 | ConvolutionOp { | |
152 | depth : depth, | |
153 | domain : domain, | |
154 | kernel : kernel, | |
155 | _phantoms : PhantomData | |
156 | } | |
157 | } | |
158 | ||
159 | /// Returns the support generator for this convolution operator. | |
160 | fn support_generator(&self, μ : DiscreteMeasure<Loc<F, N>, F>) | |
161 | -> ConvolutionSupportGenerator<F, K, N> { | |
162 | ||
163 | // TODO: can we avoid cloning μ? | |
164 | ConvolutionSupportGenerator { | |
165 | kernel : self.kernel.clone(), | |
166 | centres : μ | |
167 | } | |
168 | } | |
169 | ||
170 | /// Returns a reference to the kernel of this convolution operator. | |
171 | pub fn kernel(&self) -> &K { | |
172 | &self.kernel | |
173 | } | |
174 | } | |
175 | ||
176 | impl<F, K, BT, const N : usize> Apply<DiscreteMeasure<Loc<F, N>, F>> | |
177 | for ConvolutionOp<F, K, BT, N> | |
178 | where F : Float + ToNalgebraRealField, | |
179 | BT : BTImpl<F, N, Data=usize>, | |
180 | K : SimpleConvolutionKernel<F, N>, | |
181 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { | |
182 | ||
183 | type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; | |
184 | ||
185 | fn apply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::Output { | |
186 | let g = self.support_generator(μ); | |
187 | BTFN::construct(self.domain.clone(), self.depth, g) | |
188 | } | |
189 | } | |
190 | ||
191 | impl<'a, F, K, BT, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>> | |
192 | for ConvolutionOp<F, K, BT, N> | |
193 | where F : Float + ToNalgebraRealField, | |
194 | BT : BTImpl<F, N, Data=usize>, | |
195 | K : SimpleConvolutionKernel<F, N>, | |
196 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { | |
197 | ||
198 | type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; | |
199 | ||
200 | fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> Self::Output { | |
201 | self.apply(μ.clone()) | |
202 | } | |
203 | } | |
204 | ||
205 | /// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s. | |
206 | impl<F, K, BT, const N : usize> Linear<DiscreteMeasure<Loc<F, N>, F>> | |
207 | for ConvolutionOp<F, K, BT, N> | |
208 | where F : Float + ToNalgebraRealField, | |
209 | BT : BTImpl<F, N, Data=usize>, | |
210 | K : SimpleConvolutionKernel<F, N>, | |
211 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { | |
212 | type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; | |
213 | } | |
214 | ||
215 | impl<F, K, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>> | |
216 | for ConvolutionOp<F, K, BT, N> | |
217 | where F : Float + ToNalgebraRealField, | |
218 | BT : BTImpl<F, N, Data=usize>, | |
219 | K : SimpleConvolutionKernel<F, N> { | |
220 | ||
221 | type Output = Weighted<Shift<K, F, N>, F>; | |
222 | ||
223 | #[inline] | |
224 | fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> Self::Output { | |
225 | self.kernel.clone().shift(δ.x).weigh(δ.α) | |
226 | } | |
227 | } | |
228 | ||
229 | impl<'a, F, K, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>> | |
230 | for ConvolutionOp<F, K, BT, N> | |
231 | where F : Float + ToNalgebraRealField, | |
232 | BT : BTImpl<F, N, Data=usize>, | |
233 | K : SimpleConvolutionKernel<F, N> { | |
234 | ||
235 | type Output = Weighted<Shift<K, F, N>, F>; | |
236 | ||
237 | #[inline] | |
238 | fn apply(&self, δ : &'a DeltaMeasure<Loc<F, N>, F>) -> Self::Output { | |
239 | self.kernel.clone().shift(δ.x).weigh(δ.α) | |
240 | } | |
241 | } | |
242 | ||
243 | /// [`ConvolutionOp`]s as linear operators over [`DeltaMeasure`]s. | |
244 | /// | |
245 | /// The codomain is different from the implementation for [`DiscreteMeasure`]. | |
246 | impl<F, K, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>> | |
247 | for ConvolutionOp<F, K, BT, N> | |
248 | where F : Float + ToNalgebraRealField, | |
249 | BT : BTImpl<F, N, Data=usize>, | |
250 | K : SimpleConvolutionKernel<F, N> { | |
251 | type Codomain = Weighted<Shift<K, F, N>, F>; | |
252 | } | |
253 | ||
254 | impl<F, K, BT, const N : usize> BoundedLinear<DiscreteMeasure<Loc<F, N>, F>> | |
255 | for ConvolutionOp<F, K, BT, N> | |
256 | where F : Float + ToNalgebraRealField, | |
257 | BT : BTImpl<F, N, Data=usize>, | |
258 | K : SimpleConvolutionKernel<F, N>, | |
259 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { | |
260 | ||
261 | type FloatType = F; | |
262 | ||
263 | fn opnorm_bound(&self) -> F { | |
264 | // With μ = ∑_i α_i δ_{x_i}, we have | |
265 | // |𝒟μ|_∞ | |
266 | // = sup_z |∑_i α_i φ(z - x_i)| | |
267 | // ≤ sup_z ∑_i |α_i| |φ(z - x_i)| | |
268 | // ≤ ∑_i |α_i| |φ|_∞ | |
269 | // = |μ|_ℳ |φ|_∞ | |
270 | self.kernel.bounds().uniform() | |
271 | } | |
272 | } | |
273 | ||
274 | ||
275 | impl<F, K, BT, const N : usize> DiscreteMeasureOp<Loc<F, N>, F> | |
276 | for ConvolutionOp<F, K, BT, N> | |
277 | where F : Float + ToNalgebraRealField, | |
278 | BT : BTImpl<F, N, Data=usize>, | |
279 | K : SimpleConvolutionKernel<F, N>, | |
280 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { | |
281 | type PreCodomain = PreBTFN<F, ConvolutionSupportGenerator<F, K, N>, N>; | |
282 | ||
283 | fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> | |
284 | where I : ExactSizeIterator<Item=&'a Loc<F,N>> + Clone { | |
285 | // TODO: Preliminary implementation. It be best to use sparse matrices or | |
286 | // possibly explicit operators without matrices | |
287 | let n = points.len(); | |
288 | let points_clone = points.clone(); | |
289 | let pairs = points.cartesian_product(points_clone); | |
290 | let kernel = &self.kernel; | |
291 | let values = pairs.map(|(x, y)| kernel.apply(y-x).to_nalgebra_mixed()); | |
292 | DMatrix::from_iterator(n, n, values) | |
293 | } | |
294 | ||
295 | /// A version of [`Apply::apply`] that does not instantiate the [`BTFN`] codomain with | |
296 | /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when | |
297 | /// the output is to be added as the right-hand-side operand to a proper BTFN. | |
298 | fn preapply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::PreCodomain { | |
299 | BTFN::new_pre(self.support_generator(μ)) | |
300 | } | |
301 | } | |
302 | ||
303 | /// Generates an scalar operation (e.g. [`std::ops::Mul`], [`std::ops::Div`]) | |
304 | /// for [`ConvolutionSupportGenerator`]. | |
305 | macro_rules! make_convolutionsupportgenerator_scalarop_rhs { | |
306 | ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { | |
307 | impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
308 | std::ops::$trait_assign<F> | |
309 | for ConvolutionSupportGenerator<F, K, N> { | |
310 | fn $fn_assign(&mut self, t : F) { | |
311 | self.centres.$fn_assign(t); | |
312 | } | |
313 | } | |
314 | ||
315 | impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
316 | std::ops::$trait<F> | |
317 | for ConvolutionSupportGenerator<F, K, N> { | |
318 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
319 | fn $fn(mut self, t : F) -> Self::Output { | |
320 | std::ops::$trait_assign::$fn_assign(&mut self.centres, t); | |
321 | self | |
322 | } | |
323 | } | |
324 | impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
325 | std::ops::$trait<F> | |
326 | for &'a ConvolutionSupportGenerator<F, K, N> { | |
327 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
328 | fn $fn(self, t : F) -> Self::Output { | |
329 | ConvolutionSupportGenerator{ | |
330 | kernel : self.kernel.clone(), | |
331 | centres : (&self.centres).$fn(t), | |
332 | } | |
333 | } | |
334 | } | |
335 | } | |
336 | } | |
337 | ||
338 | make_convolutionsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); | |
339 | make_convolutionsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign); | |
340 | ||
341 | ||
342 | /// Generates an unary operation (e.g. [`std::ops::Neg`]) for [`ConvolutionSupportGenerator`]. | |
343 | macro_rules! make_convolutionsupportgenerator_unaryop { | |
344 | ($trait:ident, $fn:ident) => { | |
345 | impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
346 | std::ops::$trait | |
347 | for ConvolutionSupportGenerator<F, K, N> { | |
348 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
349 | fn $fn(mut self) -> Self::Output { | |
350 | self.centres = self.centres.$fn(); | |
351 | self | |
352 | } | |
353 | } | |
354 | ||
355 | impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
356 | std::ops::$trait | |
357 | for &'a ConvolutionSupportGenerator<F, K, N> { | |
358 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
359 | fn $fn(self) -> Self::Output { | |
360 | ConvolutionSupportGenerator{ | |
361 | kernel : self.kernel.clone(), | |
362 | centres : (&self.centres).$fn(), | |
363 | } | |
364 | } | |
365 | } | |
366 | } | |
367 | } | |
368 | ||
369 | make_convolutionsupportgenerator_unaryop!(Neg, neg); | |
370 | ||
371 | /// Trait for indicating that `Self` is Lipschitz with respect to the seminorm `D`. | |
372 | pub trait Lipschitz<D> { | |
373 | /// The type of floats | |
374 | type FloatType : Float; | |
375 | ||
376 | /// Returns the Lipschitz factor of `self` with respect to the seminorm `D`. | |
377 | fn lipschitz_factor(&self, seminorm : &D) -> Option<Self::FloatType>; | |
378 | } |