src/kernels/base.rs

changeset 0
eb3c7813b67a
child 3
0778a71cbb6a
equal deleted inserted replaced
-1:000000000000 0:eb3c7813b67a
1
2 //! Things for constructing new kernels from component kernels and traits for analysing them
3 use serde::Serialize;
4 use numeric_literals::replace_float_literals;
5
6 use alg_tools::types::*;
7 use alg_tools::norms::*;
8 use alg_tools::loc::Loc;
9 use alg_tools::sets::Cube;
10 use alg_tools::bisection_tree::{
11 Support,
12 Bounds,
13 LocalAnalysis,
14 GlobalAnalysis,
15 Bounded,
16 };
17 use alg_tools::mapping::Apply;
18 use alg_tools::maputil::{array_init, map2};
19 use alg_tools::sets::SetOrd;
20
21 use crate::fourier::Fourier;
22
23 /// Representation of the product of two kernels.
24 ///
25 /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
26 ///
27 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
28 #[derive(Copy,Clone,Serialize,Debug)]
29 pub struct SupportProductFirst<A, B>(
30 /// First kernel
31 pub A,
32 /// Second kernel
33 pub B
34 );
35
36 impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
37 for SupportProductFirst<A, B>
38 where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
39 B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
40 type Output = F;
41 #[inline]
42 fn apply(&self, x : Loc<F, N>) -> Self::Output {
43 self.0.apply(&x) * self.1.apply(&x)
44 }
45 }
46
47 impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
48 for SupportProductFirst<A, B>
49 where A : Apply<&'a Loc<F, N>, Output=F>,
50 B : Apply<&'a Loc<F, N>, Output=F> {
51 type Output = F;
52 #[inline]
53 fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
54 self.0.apply(x) * self.1.apply(x)
55 }
56 }
57
58 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
59 for SupportProductFirst<A, B>
60 where A : Support<F, N>,
61 B : Support<F, N> {
62 #[inline]
63 fn support_hint(&self) -> Cube<F, N> {
64 self.0.support_hint()
65 }
66
67 #[inline]
68 fn in_support(&self, x : &Loc<F, N>) -> bool {
69 self.0.in_support(x)
70 }
71
72 #[inline]
73 fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
74 self.0.bisection_hint(cube)
75 }
76 }
77
78 impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
79 for SupportProductFirst<A, B>
80 where A : GlobalAnalysis<F, Bounds<F>>,
81 B : GlobalAnalysis<F, Bounds<F>> {
82 #[inline]
83 fn global_analysis(&self) -> Bounds<F> {
84 self.0.global_analysis() * self.1.global_analysis()
85 }
86 }
87
88 impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
89 for SupportProductFirst<A, B>
90 where A : LocalAnalysis<F, Bounds<F>, N>,
91 B : LocalAnalysis<F, Bounds<F>, N> {
92 #[inline]
93 fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
94 self.0.local_analysis(cube) * self.1.local_analysis(cube)
95 }
96 }
97
98 /// Representation of the sum of two kernels
99 ///
100 /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
101 ///
102 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
103 #[derive(Copy,Clone,Serialize,Debug)]
104 pub struct SupportSum<A, B>(
105 /// First kernel
106 pub A,
107 /// Second kernel
108 pub B
109 );
110
111 impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
112 for SupportSum<A, B>
113 where A : Apply<&'a Loc<F, N>, Output=F>,
114 B : Apply<&'a Loc<F, N>, Output=F> {
115 type Output = F;
116 #[inline]
117 fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
118 self.0.apply(x) + self.1.apply(x)
119 }
120 }
121
122 impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
123 for SupportSum<A, B>
124 where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
125 B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
126 type Output = F;
127 #[inline]
128 fn apply(&self, x : Loc<F, N>) -> Self::Output {
129 self.0.apply(&x) + self.1.apply(&x)
130 }
131 }
132
133 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
134 for SupportSum<A, B>
135 where A : Support<F, N>,
136 B : Support<F, N>,
137 Cube<F, N> : SetOrd {
138 #[inline]
139 fn support_hint(&self) -> Cube<F, N> {
140 self.0.support_hint().common(&self.1.support_hint())
141 }
142
143 #[inline]
144 fn in_support(&self, x : &Loc<F, N>) -> bool {
145 self.0.in_support(x) || self.1.in_support(x)
146 }
147
148 #[inline]
149 fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
150 map2(self.0.bisection_hint(cube),
151 self.1.bisection_hint(cube),
152 |a, b| a.or(b))
153 }
154 }
155
156 impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
157 for SupportSum<A, B>
158 where A : GlobalAnalysis<F, Bounds<F>>,
159 B : GlobalAnalysis<F, Bounds<F>> {
160 #[inline]
161 fn global_analysis(&self) -> Bounds<F> {
162 self.0.global_analysis() + self.1.global_analysis()
163 }
164 }
165
166 impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
167 for SupportSum<A, B>
168 where A : LocalAnalysis<F, Bounds<F>, N>,
169 B : LocalAnalysis<F, Bounds<F>, N>,
170 Cube<F, N> : SetOrd {
171 #[inline]
172 fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
173 self.0.local_analysis(cube) + self.1.local_analysis(cube)
174 }
175 }
176
177 /// Representation of the convolution of two kernels.
178 ///
179 /// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping].
180 //
181 /// Trait implementations have to be on a case-by-case basis.
182 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
183 pub struct Convolution<A, B>(
184 /// First kernel
185 pub A,
186 /// Second kernel
187 pub B
188 );
189
190 /// Representation of the autoconvolution of a kernel.
191 ///
192 /// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
193 ///
194 /// Trait implementations have to be on a case-by-case basis.
195 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
196 pub struct AutoConvolution<A>(
197 /// The kernel to be autoconvolved
198 pub A
199 );
200
201 /// Representation a multi-dimensional product of a one-dimensional kernel.
202 ///
203 /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
204 /// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]
205 /// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`].
206 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
207 struct UniformProduct<G, const N : usize>(
208 /// The one-dimensional kernel
209 G
210 );
211
212 impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>>
213 for UniformProduct<G, N>
214 where G : Apply<Loc<F, 1>, Output=F> {
215 type Output = F;
216 #[inline]
217 fn apply(&self, x : &'a Loc<F, N>) -> F {
218 x.iter().map(|&y| self.0.apply(Loc([y]))).product()
219 }
220 }
221
222 impl<G, F : Float, const N : usize> Apply<Loc<F, N>>
223 for UniformProduct<G, N>
224 where G : Apply<Loc<F, 1>, Output=F> {
225 type Output = F;
226 #[inline]
227 fn apply(&self, x : Loc<F, N>) -> F {
228 x.into_iter().map(|y| self.0.apply(Loc([y]))).product()
229 }
230 }
231
232 impl<G, F : Float, const N : usize> Support<F, N>
233 for UniformProduct<G, N>
234 where G : Support<F, 1> {
235 #[inline]
236 fn support_hint(&self) -> Cube<F, N> {
237 let [a] : [[F; 2]; 1] = self.0.support_hint().into();
238 array_init(|| a.clone()).into()
239 }
240
241 #[inline]
242 fn in_support(&self, x : &Loc<F, N>) -> bool {
243 x.iter().all(|&y| self.0.in_support(&Loc([y])))
244 }
245
246 #[inline]
247 fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
248 cube.map(|a, b| {
249 let [h] = self.0.bisection_hint(&[[a, b]].into());
250 h
251 })
252 }
253 }
254
255 impl<G, F : Float, const N : usize> GlobalAnalysis<F, Bounds<F>>
256 for UniformProduct<G, N>
257 where G : GlobalAnalysis<F, Bounds<F>> {
258 #[inline]
259 fn global_analysis(&self) -> Bounds<F> {
260 let g = self.0.global_analysis();
261 (0..N).map(|_| g).product()
262 }
263 }
264
265 impl<G, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
266 for UniformProduct<G, N>
267 where G : LocalAnalysis<F, Bounds<F>, 1> {
268 #[inline]
269 fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
270 cube.iter_coords().map(
271 |&[a, b]| self.0.local_analysis(&([[a, b]].into()))
272 ).product()
273 }
274 }
275
276 macro_rules! product_lpnorm {
277 ($lp:ident) => {
278 impl<G, F : Float, const N : usize> Norm<F, $lp>
279 for UniformProduct<G, N>
280 where G : Norm<F, $lp> {
281 #[inline]
282 fn norm(&self, lp : $lp) -> F {
283 self.0.norm(lp).powi(N as i32)
284 }
285 }
286 }
287 }
288
289 product_lpnorm!(L1);
290 product_lpnorm!(L2);
291 product_lpnorm!(Linfinity);
292
293
294 /// Trait for bounding one kernel with respect to another.
295 ///
296 /// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared.
297 pub trait BoundedBy<F : Num, T> {
298 /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for
299 /// $v$ `self` and $u$ `other`.
300 ///
301 /// If no such factors exits, `None` is returned.
302 fn bounding_factor(&self, other : &T) -> Option<F>;
303 }
304
305 /// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$.
306 #[replace_float_literals(F::cast_from(literal))]
307 impl<F, C, BaseP>
308 BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>>
309 for AutoConvolution<SupportProductFirst<C, BaseP>>
310 where F : Float,
311 C : Clone + PartialEq,
312 BaseP : Fourier<F> + PartialOrd, // TODO: replace by BoundedBy,
313 <BaseP as Fourier<F>>::Transformed : Bounded<F> + Norm<F, L1> {
314
315 fn bounding_factor(&self, kernel : &SupportProductFirst<AutoConvolution<C>, BaseP>) -> Option<F> {
316 let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel;
317 let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self;
318 let v̂ = base_spread.fourier();
319
320 // Verify that the cut-off and ideal physical model (base spread) are the same.
321 if cutoff == cutoff2
322 && base_spread <= base_spread2
323 && v̂.bounds().lower() >= 0.0 {
324 // Calculate the factor between the convolution approximation
325 // `AutoConvolution<SupportProductFirst<C, BaseP>>` of $A_*A$ and the
326 // kernel of the seminorm. This depends on the physical model P being
327 // `SupportProductFirst<C, BaseP>` with the kernel `K` being
328 // a `SupportSum` involving `SupportProductFirst<AutoConvolution<C>, BaseP>`.
329 Some(v̂.norm(L1))
330 } else {
331 // We cannot compare
332 None
333 }
334 }
335 }
336
337 impl<F : Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A
338 where A : BoundedBy<F, B>,
339 C : Bounded<F> {
340
341 #[replace_float_literals(F::cast_from(literal))]
342 fn bounding_factor(&self, SupportSum(ref kernel1, kernel2) : &SupportSum<B, C>) -> Option<F> {
343 if kernel2.bounds().lower() >= 0.0 {
344 self.bounding_factor(kernel1)
345 } else {
346 None
347 }
348 }
349 }
350
351 /// Generates on $[a, b]$ [`Support::support_hint`] for a symmetric interval $[-r, r]$.
352 ///
353 /// It will attempt to place the subdivision point at $-r$ or $r$.
354 /// If neither of these points lies within $[a, b]$, `None` is returned.
355 #[inline]
356 pub(super) fn symmetric_interval_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
357 if a < -r && -r < b {
358 Some(-r)
359 } else if a < r && r < b {
360 Some(r)
361 } else {
362 None
363 }
364 }
365
366 /// Generates on $[a, b]$ [`Support::support_hint`] for a function with monotone derivative,
367 /// support on $[-r, r]$ and peak at $0.
368 ///
369 /// It will attempt to place the subdivision point at $-r$, $r$, or $0$, depending on which
370 /// gives the longer length for the shorter of the two subintervals. If none of these points
371 /// lies within $[a, b]$, or the resulting interval would be shorter than $0.3r$, `None` is
372 /// returned.
373 #[replace_float_literals(F::cast_from(literal))]
374 #[inline]
375 pub(super) fn symmetric_peak_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
376 let stage1 = if a < -r {
377 if b <= -r {
378 None
379 } else if a + r < -b {
380 Some(-r)
381 } else {
382 Some(0.0)
383 }
384 } else if a < 0.0 {
385 if b <= 0.0 {
386 None
387 } else if a < r - b {
388 Some(0.0)
389 } else {
390 Some(r)
391 }
392 } else if a < r && b > r {
393 Some(r)
394 } else {
395 None
396 };
397
398 // Ignore stage1 hint if either side of subdivision would be just a small fraction of the
399 // interval
400 match stage1 {
401 Some(h) if (h - a).min(b-h) >= 0.3 * r => Some(h),
402 _ => None
403 }
404 }

mercurial