Mon, 17 Feb 2025 13:51:50 -0500
Update documentation references
Also allow Zed to reindent.
0 | 1 | /*! This module implements the convolution operators $𝒟$. |
2 | ||
3 | The principal data type of the module is [`ConvolutionOp`] and the main abstraction | |
4 | the trait [`DiscreteMeasureOp`]. | |
5 | */ | |
6 | ||
7 | use std::iter::Zip; | |
8 | use std::ops::RangeFrom; | |
9 | use alg_tools::types::*; | |
10 | use alg_tools::loc::Loc; | |
11 | use alg_tools::sets::Cube; | |
12 | use alg_tools::bisection_tree::*; | |
13 | use alg_tools::mapping::RealMapping; | |
14 | use alg_tools::iter::{Mappable, FilterMapX}; | |
35 | 15 | use alg_tools::linops::{Mapping, Linear, BoundedLinear}; |
16 | use alg_tools::instance::Instance; | |
0 | 17 | use alg_tools::nalgebra_support::ToNalgebraRealField; |
35 | 18 | use alg_tools::norms::Linfinity; |
19 | use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter, Radon, RNDM}; | |
0 | 20 | use nalgebra::DMatrix; |
21 | use std::marker::PhantomData; | |
22 | use itertools::Itertools; | |
23 | ||
24 | /// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$. | |
25 | /// | |
26 | /// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`]. | |
35 | 27 | pub trait DiscreteMeasureOp<Domain, F> |
28 | : BoundedLinear<DiscreteMeasure<Domain, F>, Radon, Linfinity, F> | |
29 | where | |
30 | F : Float + ToNalgebraRealField, | |
31 | Domain : 'static + Clone + PartialEq, | |
32 | { | |
0 | 33 | /// The output type of [`Self::preapply`]. |
34 | type PreCodomain; | |
35 | ||
36 | /// Creates a finite-dimensional presentatin of the operator restricted to a fixed support. | |
37 | /// | |
38 | /// <p> | |
39 | /// This returns the matrix $C_*𝒟C$, where $C ∈ 𝕃(ℝ^n; 𝒵(Ω))$, $Ca = ∑_{i=1}^n α_i δ_{x_i}$ | |
40 | /// for a $x_1, …, x_n$ the coordinates given by the iterator `I`, and $a=(α_1,…,α_n)$. | |
41 | /// Here $C_* ∈ 𝕃(C_c(Ω); ℝ^n) $ stands for the preadjoint. | |
42 | /// </p> | |
43 | fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> | |
44 | where I : ExactSizeIterator<Item=&'a Domain> + Clone; | |
45 | ||
35 | 46 | /// [`Mapping`] that typically returns an uninitialised [`PreBTFN`] |
0 | 47 | /// instead of a full [`BTFN`]. |
48 | fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain; | |
49 | } | |
50 | ||
51 | // Blanket implementation of a measure as a linear functional over a predual | |
52 | // (that by assumption is a linear functional over a measure). | |
53 | /*impl<F, Domain, Predual> Linear<Predual> | |
54 | for DiscreteMeasure<Domain, F> | |
55 | where F : Float + ToNalgebraRealField, | |
56 | Predual : Linear<DiscreteMeasure<Domain, F>, Codomain=F> { | |
57 | type Codomain = F; | |
58 | ||
59 | #[inline] | |
60 | fn apply(&self, ω : &Predual) -> F { | |
61 | ω.apply(self) | |
62 | } | |
63 | }*/ | |
64 | ||
65 | // | |
66 | // Convolutions for discrete measures | |
67 | // | |
68 | ||
69 | /// A trait alias for simple convolution kernels. | |
70 | pub trait SimpleConvolutionKernel<F : Float, const N : usize> | |
71 | : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} | |
72 | ||
73 | impl<T, F : Float, const N : usize> SimpleConvolutionKernel<F, N> for T | |
74 | where T : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} | |
75 | ||
76 | /// [`SupportGenerator`] for [`ConvolutionOp`]. | |
77 | #[derive(Clone,Debug)] | |
78 | pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize> | |
79 | where K : SimpleConvolutionKernel<F, N> { | |
80 | kernel : K, | |
35 | 81 | centres : RNDM<F, N>, |
0 | 82 | } |
83 | ||
84 | impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N> | |
85 | where K : SimpleConvolutionKernel<F, N> { | |
86 | ||
87 | /// Construct the convolution kernel corresponding to `δ`, i.e., one centered at `δ.x` and | |
88 | /// weighted by `δ.α`. | |
89 | #[inline] | |
90 | fn construct_kernel<'a>(&'a self, δ : &'a DeltaMeasure<Loc<F, N>, F>) | |
91 | -> Weighted<Shift<K, F, N>, F> { | |
92 | self.kernel.clone().shift(δ.x).weigh(δ.α) | |
93 | } | |
94 | ||
95 | /// This is a helper method for the implementation of [`ConvolutionSupportGenerator::all_data`]. | |
96 | /// It filters out `δ` with zero weight, and otherwise returns the corresponding convolution | |
97 | /// kernel. The `id` is passed through as-is. | |
98 | #[inline] | |
99 | fn construct_kernel_and_id_filtered<'a>( | |
100 | &'a self, | |
101 | (id, δ) : (usize, &'a DeltaMeasure<Loc<F, N>, F>) | |
102 | ) -> Option<(usize, Weighted<Shift<K, F, N>, F>)> { | |
103 | (δ.α != F::ZERO).then(|| (id.into(), self.construct_kernel(δ))) | |
104 | } | |
105 | } | |
106 | ||
107 | impl<F : Float, K, const N : usize> SupportGenerator<F, N> | |
108 | for ConvolutionSupportGenerator<F, K, N> | |
109 | where K : SimpleConvolutionKernel<F, N> { | |
110 | type Id = usize; | |
111 | type SupportType = Weighted<Shift<K, F, N>, F>; | |
112 | type AllDataIter<'a> = FilterMapX<'a, Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>, | |
113 | Self, (Self::Id, Self::SupportType)>; | |
114 | ||
115 | #[inline] | |
116 | fn support_for(&self, d : Self::Id) -> Self::SupportType { | |
117 | self.construct_kernel(&self.centres[d]) | |
118 | } | |
119 | ||
120 | #[inline] | |
121 | fn support_count(&self) -> usize { | |
122 | self.centres.len() | |
123 | } | |
124 | ||
125 | #[inline] | |
126 | fn all_data(&self) -> Self::AllDataIter<'_> { | |
127 | (0..).zip(self.centres.iter_spikes()) | |
128 | .filter_mapX(self, Self::construct_kernel_and_id_filtered) | |
129 | } | |
130 | } | |
131 | ||
132 | /// Representation of a convolution operator $𝒟$. | |
133 | #[derive(Clone,Debug)] | |
134 | pub struct ConvolutionOp<F, K, BT, const N : usize> | |
135 | where F : Float + ToNalgebraRealField, | |
136 | BT : BTImpl<F, N, Data=usize>, | |
137 | K : SimpleConvolutionKernel<F, N> { | |
35 | 138 | /// Depth of the [`BT`] bisection tree for the outputs [`Mapping::apply`]. |
0 | 139 | depth : BT::Depth, |
35 | 140 | /// Domain of the [`BT`] bisection tree for the outputs [`Mapping::apply`]. |
0 | 141 | domain : Cube<F, N>, |
142 | /// The convolution kernel | |
143 | kernel : K, | |
144 | _phantoms : PhantomData<(F,BT)>, | |
145 | } | |
146 | ||
147 | impl<F, K, BT, const N : usize> ConvolutionOp<F, K, BT, N> | |
148 | where F : Float + ToNalgebraRealField, | |
149 | BT : BTImpl<F, N, Data=usize>, | |
150 | K : SimpleConvolutionKernel<F, N> { | |
151 | ||
152 | /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`. | |
153 | /// | |
35 | 154 | /// The output of [`Mapping::apply`] is a [`BT`] of given `depth`. |
0 | 155 | pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self { |
156 | ConvolutionOp { | |
157 | depth : depth, | |
158 | domain : domain, | |
159 | kernel : kernel, | |
160 | _phantoms : PhantomData | |
161 | } | |
162 | } | |
163 | ||
164 | /// Returns the support generator for this convolution operator. | |
35 | 165 | fn support_generator(&self, μ : RNDM<F, N>) |
0 | 166 | -> ConvolutionSupportGenerator<F, K, N> { |
167 | ||
168 | // TODO: can we avoid cloning μ? | |
169 | ConvolutionSupportGenerator { | |
170 | kernel : self.kernel.clone(), | |
171 | centres : μ | |
172 | } | |
173 | } | |
174 | ||
175 | /// Returns a reference to the kernel of this convolution operator. | |
176 | pub fn kernel(&self) -> &K { | |
177 | &self.kernel | |
178 | } | |
179 | } | |
180 | ||
35 | 181 | impl<F, K, BT, const N : usize> Mapping<RNDM<F, N>> |
0 | 182 | for ConvolutionOp<F, K, BT, N> |
35 | 183 | where |
184 | F : Float + ToNalgebraRealField, | |
185 | BT : BTImpl<F, N, Data=usize>, | |
186 | K : SimpleConvolutionKernel<F, N>, | |
187 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> | |
188 | { | |
0 | 189 | |
35 | 190 | type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; |
0 | 191 | |
35 | 192 | fn apply<I>(&self, μ : I) -> Self::Codomain |
193 | where I : Instance<RNDM<F, N>> { | |
194 | let g = self.support_generator(μ.own()); | |
0 | 195 | BTFN::construct(self.domain.clone(), self.depth, g) |
196 | } | |
197 | } | |
198 | ||
35 | 199 | /// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s. |
200 | impl<F, K, BT, const N : usize> Linear<RNDM<F, N>> | |
201 | for ConvolutionOp<F, K, BT, N> | |
202 | where | |
203 | F : Float + ToNalgebraRealField, | |
204 | BT : BTImpl<F, N, Data=usize>, | |
205 | K : SimpleConvolutionKernel<F, N>, | |
206 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> | |
207 | { } | |
208 | ||
209 | impl<F, K, BT, const N : usize> | |
210 | BoundedLinear<RNDM<F, N>, Radon, Linfinity, F> | |
0 | 211 | for ConvolutionOp<F, K, BT, N> |
212 | where F : Float + ToNalgebraRealField, | |
213 | BT : BTImpl<F, N, Data=usize>, | |
214 | K : SimpleConvolutionKernel<F, N>, | |
215 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { | |
216 | ||
35 | 217 | fn opnorm_bound(&self, _ : Radon, _ : Linfinity) -> F { |
0 | 218 | // With μ = ∑_i α_i δ_{x_i}, we have |
219 | // |𝒟μ|_∞ | |
220 | // = sup_z |∑_i α_i φ(z - x_i)| | |
221 | // ≤ sup_z ∑_i |α_i| |φ(z - x_i)| | |
222 | // ≤ ∑_i |α_i| |φ|_∞ | |
223 | // = |μ|_ℳ |φ|_∞ | |
224 | self.kernel.bounds().uniform() | |
225 | } | |
226 | } | |
227 | ||
228 | ||
229 | impl<F, K, BT, const N : usize> DiscreteMeasureOp<Loc<F, N>, F> | |
230 | for ConvolutionOp<F, K, BT, N> | |
231 | where F : Float + ToNalgebraRealField, | |
232 | BT : BTImpl<F, N, Data=usize>, | |
233 | K : SimpleConvolutionKernel<F, N>, | |
234 | Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { | |
235 | type PreCodomain = PreBTFN<F, ConvolutionSupportGenerator<F, K, N>, N>; | |
236 | ||
237 | fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> | |
238 | where I : ExactSizeIterator<Item=&'a Loc<F,N>> + Clone { | |
239 | // TODO: Preliminary implementation. It be best to use sparse matrices or | |
240 | // possibly explicit operators without matrices | |
241 | let n = points.len(); | |
242 | let points_clone = points.clone(); | |
243 | let pairs = points.cartesian_product(points_clone); | |
244 | let kernel = &self.kernel; | |
245 | let values = pairs.map(|(x, y)| kernel.apply(y-x).to_nalgebra_mixed()); | |
246 | DMatrix::from_iterator(n, n, values) | |
247 | } | |
248 | ||
35 | 249 | /// A version of [`Mapping::apply`] that does not instantiate the [`BTFN`] codomain with |
0 | 250 | /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when |
251 | /// the output is to be added as the right-hand-side operand to a proper BTFN. | |
35 | 252 | fn preapply(&self, μ : RNDM<F, N>) -> Self::PreCodomain { |
0 | 253 | BTFN::new_pre(self.support_generator(μ)) |
254 | } | |
255 | } | |
256 | ||
257 | /// Generates an scalar operation (e.g. [`std::ops::Mul`], [`std::ops::Div`]) | |
258 | /// for [`ConvolutionSupportGenerator`]. | |
259 | macro_rules! make_convolutionsupportgenerator_scalarop_rhs { | |
260 | ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { | |
261 | impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
262 | std::ops::$trait_assign<F> | |
263 | for ConvolutionSupportGenerator<F, K, N> { | |
264 | fn $fn_assign(&mut self, t : F) { | |
265 | self.centres.$fn_assign(t); | |
266 | } | |
267 | } | |
268 | ||
269 | impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
270 | std::ops::$trait<F> | |
271 | for ConvolutionSupportGenerator<F, K, N> { | |
272 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
273 | fn $fn(mut self, t : F) -> Self::Output { | |
274 | std::ops::$trait_assign::$fn_assign(&mut self.centres, t); | |
275 | self | |
276 | } | |
277 | } | |
278 | impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
279 | std::ops::$trait<F> | |
280 | for &'a ConvolutionSupportGenerator<F, K, N> { | |
281 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
282 | fn $fn(self, t : F) -> Self::Output { | |
283 | ConvolutionSupportGenerator{ | |
284 | kernel : self.kernel.clone(), | |
285 | centres : (&self.centres).$fn(t), | |
286 | } | |
287 | } | |
288 | } | |
289 | } | |
290 | } | |
291 | ||
292 | make_convolutionsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); | |
293 | make_convolutionsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign); | |
294 | ||
295 | ||
296 | /// Generates an unary operation (e.g. [`std::ops::Neg`]) for [`ConvolutionSupportGenerator`]. | |
297 | macro_rules! make_convolutionsupportgenerator_unaryop { | |
298 | ($trait:ident, $fn:ident) => { | |
299 | impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
300 | std::ops::$trait | |
301 | for ConvolutionSupportGenerator<F, K, N> { | |
302 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
303 | fn $fn(mut self) -> Self::Output { | |
304 | self.centres = self.centres.$fn(); | |
305 | self | |
306 | } | |
307 | } | |
308 | ||
309 | impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> | |
310 | std::ops::$trait | |
311 | for &'a ConvolutionSupportGenerator<F, K, N> { | |
312 | type Output = ConvolutionSupportGenerator<F, K, N>; | |
313 | fn $fn(self) -> Self::Output { | |
314 | ConvolutionSupportGenerator{ | |
315 | kernel : self.kernel.clone(), | |
316 | centres : (&self.centres).$fn(), | |
317 | } | |
318 | } | |
319 | } | |
320 | } | |
321 | } | |
322 | ||
323 | make_convolutionsupportgenerator_unaryop!(Neg, neg); | |
324 |