Fri, 07 Oct 2022 13:11:08 +0300
Taylor 2 model attempts
0 | 1 | |
2 | //! Things for constructing new kernels from component kernels and traits for analysing them | |
3 | use serde::Serialize; | |
4 | use numeric_literals::replace_float_literals; | |
5 | ||
6 | use alg_tools::types::*; | |
7 | use alg_tools::norms::*; | |
8 | use alg_tools::loc::Loc; | |
9 | use alg_tools::sets::Cube; | |
10 | use alg_tools::bisection_tree::{ | |
11 | Support, | |
12 | Bounds, | |
13 | LocalAnalysis, | |
14 | GlobalAnalysis, | |
15 | Bounded, | |
3 | 16 | Taylor2ModelParams |
0 | 17 | }; |
18 | use alg_tools::mapping::Apply; | |
19 | use alg_tools::maputil::{array_init, map2}; | |
20 | use alg_tools::sets::SetOrd; | |
21 | ||
22 | use crate::fourier::Fourier; | |
23 | ||
24 | /// Representation of the product of two kernels. | |
25 | /// | |
26 | /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. | |
27 | /// | |
28 | /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! | |
29 | #[derive(Copy,Clone,Serialize,Debug)] | |
30 | pub struct SupportProductFirst<A, B>( | |
31 | /// First kernel | |
32 | pub A, | |
33 | /// Second kernel | |
34 | pub B | |
35 | ); | |
36 | ||
37 | impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>> | |
38 | for SupportProductFirst<A, B> | |
39 | where A : for<'a> Apply<&'a Loc<F, N>, Output=F>, | |
40 | B : for<'a> Apply<&'a Loc<F, N>, Output=F> { | |
41 | type Output = F; | |
42 | #[inline] | |
43 | fn apply(&self, x : Loc<F, N>) -> Self::Output { | |
44 | self.0.apply(&x) * self.1.apply(&x) | |
45 | } | |
46 | } | |
47 | ||
48 | impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>> | |
49 | for SupportProductFirst<A, B> | |
50 | where A : Apply<&'a Loc<F, N>, Output=F>, | |
51 | B : Apply<&'a Loc<F, N>, Output=F> { | |
52 | type Output = F; | |
53 | #[inline] | |
54 | fn apply(&self, x : &'a Loc<F, N>) -> Self::Output { | |
55 | self.0.apply(x) * self.1.apply(x) | |
56 | } | |
57 | } | |
58 | ||
59 | impl<'a, A, B, F : Float, const N : usize> Support<F, N> | |
60 | for SupportProductFirst<A, B> | |
61 | where A : Support<F, N>, | |
62 | B : Support<F, N> { | |
63 | #[inline] | |
64 | fn support_hint(&self) -> Cube<F, N> { | |
65 | self.0.support_hint() | |
66 | } | |
67 | ||
68 | #[inline] | |
69 | fn in_support(&self, x : &Loc<F, N>) -> bool { | |
70 | self.0.in_support(x) | |
71 | } | |
72 | ||
73 | #[inline] | |
74 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
75 | self.0.bisection_hint(cube) | |
76 | } | |
77 | } | |
78 | ||
79 | impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>> | |
80 | for SupportProductFirst<A, B> | |
81 | where A : GlobalAnalysis<F, Bounds<F>>, | |
82 | B : GlobalAnalysis<F, Bounds<F>> { | |
83 | #[inline] | |
84 | fn global_analysis(&self) -> Bounds<F> { | |
85 | self.0.global_analysis() * self.1.global_analysis() | |
86 | } | |
87 | } | |
88 | ||
89 | impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
90 | for SupportProductFirst<A, B> | |
91 | where A : LocalAnalysis<F, Bounds<F>, N>, | |
92 | B : LocalAnalysis<F, Bounds<F>, N> { | |
93 | #[inline] | |
94 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
95 | self.0.local_analysis(cube) * self.1.local_analysis(cube) | |
96 | } | |
97 | } | |
98 | ||
99 | /// Representation of the sum of two kernels | |
100 | /// | |
101 | /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. | |
102 | /// | |
103 | /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! | |
104 | #[derive(Copy,Clone,Serialize,Debug)] | |
105 | pub struct SupportSum<A, B>( | |
106 | /// First kernel | |
107 | pub A, | |
108 | /// Second kernel | |
109 | pub B | |
110 | ); | |
111 | ||
112 | impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>> | |
113 | for SupportSum<A, B> | |
114 | where A : Apply<&'a Loc<F, N>, Output=F>, | |
115 | B : Apply<&'a Loc<F, N>, Output=F> { | |
116 | type Output = F; | |
117 | #[inline] | |
118 | fn apply(&self, x : &'a Loc<F, N>) -> Self::Output { | |
119 | self.0.apply(x) + self.1.apply(x) | |
120 | } | |
121 | } | |
122 | ||
123 | impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>> | |
124 | for SupportSum<A, B> | |
125 | where A : for<'a> Apply<&'a Loc<F, N>, Output=F>, | |
126 | B : for<'a> Apply<&'a Loc<F, N>, Output=F> { | |
127 | type Output = F; | |
128 | #[inline] | |
129 | fn apply(&self, x : Loc<F, N>) -> Self::Output { | |
130 | self.0.apply(&x) + self.1.apply(&x) | |
131 | } | |
132 | } | |
133 | ||
134 | impl<'a, A, B, F : Float, const N : usize> Support<F, N> | |
135 | for SupportSum<A, B> | |
136 | where A : Support<F, N>, | |
137 | B : Support<F, N>, | |
138 | Cube<F, N> : SetOrd { | |
139 | #[inline] | |
140 | fn support_hint(&self) -> Cube<F, N> { | |
141 | self.0.support_hint().common(&self.1.support_hint()) | |
142 | } | |
143 | ||
144 | #[inline] | |
145 | fn in_support(&self, x : &Loc<F, N>) -> bool { | |
146 | self.0.in_support(x) || self.1.in_support(x) | |
147 | } | |
148 | ||
149 | #[inline] | |
150 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
151 | map2(self.0.bisection_hint(cube), | |
152 | self.1.bisection_hint(cube), | |
153 | |a, b| a.or(b)) | |
154 | } | |
155 | } | |
156 | ||
157 | impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>> | |
158 | for SupportSum<A, B> | |
159 | where A : GlobalAnalysis<F, Bounds<F>>, | |
160 | B : GlobalAnalysis<F, Bounds<F>> { | |
161 | #[inline] | |
162 | fn global_analysis(&self) -> Bounds<F> { | |
163 | self.0.global_analysis() + self.1.global_analysis() | |
164 | } | |
165 | } | |
166 | ||
167 | impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
168 | for SupportSum<A, B> | |
169 | where A : LocalAnalysis<F, Bounds<F>, N>, | |
170 | B : LocalAnalysis<F, Bounds<F>, N>, | |
171 | Cube<F, N> : SetOrd { | |
172 | #[inline] | |
173 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
174 | self.0.local_analysis(cube) + self.1.local_analysis(cube) | |
175 | } | |
176 | } | |
177 | ||
178 | /// Representation of the convolution of two kernels. | |
179 | /// | |
180 | /// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping]. | |
181 | // | |
182 | /// Trait implementations have to be on a case-by-case basis. | |
183 | #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] | |
184 | pub struct Convolution<A, B>( | |
185 | /// First kernel | |
186 | pub A, | |
187 | /// Second kernel | |
188 | pub B | |
189 | ); | |
190 | ||
191 | /// Representation of the autoconvolution of a kernel. | |
192 | /// | |
193 | /// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. | |
194 | /// | |
195 | /// Trait implementations have to be on a case-by-case basis. | |
196 | #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] | |
197 | pub struct AutoConvolution<A>( | |
198 | /// The kernel to be autoconvolved | |
199 | pub A | |
200 | ); | |
201 | ||
202 | /// Representation a multi-dimensional product of a one-dimensional kernel. | |
203 | /// | |
204 | /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$. | |
205 | /// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping] | |
206 | /// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`]. | |
207 | #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] | |
208 | struct UniformProduct<G, const N : usize>( | |
209 | /// The one-dimensional kernel | |
210 | G | |
211 | ); | |
212 | ||
213 | impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>> | |
214 | for UniformProduct<G, N> | |
215 | where G : Apply<Loc<F, 1>, Output=F> { | |
216 | type Output = F; | |
217 | #[inline] | |
218 | fn apply(&self, x : &'a Loc<F, N>) -> F { | |
219 | x.iter().map(|&y| self.0.apply(Loc([y]))).product() | |
220 | } | |
221 | } | |
222 | ||
223 | impl<G, F : Float, const N : usize> Apply<Loc<F, N>> | |
224 | for UniformProduct<G, N> | |
225 | where G : Apply<Loc<F, 1>, Output=F> { | |
226 | type Output = F; | |
227 | #[inline] | |
228 | fn apply(&self, x : Loc<F, N>) -> F { | |
229 | x.into_iter().map(|y| self.0.apply(Loc([y]))).product() | |
230 | } | |
231 | } | |
232 | ||
233 | impl<G, F : Float, const N : usize> Support<F, N> | |
234 | for UniformProduct<G, N> | |
235 | where G : Support<F, 1> { | |
236 | #[inline] | |
237 | fn support_hint(&self) -> Cube<F, N> { | |
238 | let [a] : [[F; 2]; 1] = self.0.support_hint().into(); | |
239 | array_init(|| a.clone()).into() | |
240 | } | |
241 | ||
242 | #[inline] | |
243 | fn in_support(&self, x : &Loc<F, N>) -> bool { | |
244 | x.iter().all(|&y| self.0.in_support(&Loc([y]))) | |
245 | } | |
246 | ||
247 | #[inline] | |
248 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
249 | cube.map(|a, b| { | |
250 | let [h] = self.0.bisection_hint(&[[a, b]].into()); | |
251 | h | |
252 | }) | |
253 | } | |
254 | } | |
255 | ||
256 | impl<G, F : Float, const N : usize> GlobalAnalysis<F, Bounds<F>> | |
257 | for UniformProduct<G, N> | |
258 | where G : GlobalAnalysis<F, Bounds<F>> { | |
259 | #[inline] | |
260 | fn global_analysis(&self) -> Bounds<F> { | |
261 | let g = self.0.global_analysis(); | |
262 | (0..N).map(|_| g).product() | |
263 | } | |
264 | } | |
265 | ||
266 | impl<G, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
267 | for UniformProduct<G, N> | |
268 | where G : LocalAnalysis<F, Bounds<F>, 1> { | |
269 | #[inline] | |
270 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
271 | cube.iter_coords().map( | |
272 | |&[a, b]| self.0.local_analysis(&([[a, b]].into())) | |
273 | ).product() | |
274 | } | |
275 | } | |
276 | ||
277 | macro_rules! product_lpnorm { | |
278 | ($lp:ident) => { | |
279 | impl<G, F : Float, const N : usize> Norm<F, $lp> | |
280 | for UniformProduct<G, N> | |
281 | where G : Norm<F, $lp> { | |
282 | #[inline] | |
283 | fn norm(&self, lp : $lp) -> F { | |
284 | self.0.norm(lp).powi(N as i32) | |
285 | } | |
286 | } | |
287 | } | |
288 | } | |
289 | ||
290 | product_lpnorm!(L1); | |
291 | product_lpnorm!(L2); | |
292 | product_lpnorm!(Linfinity); | |
293 | ||
294 | ||
295 | /// Trait for bounding one kernel with respect to another. | |
296 | /// | |
297 | /// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared. | |
298 | pub trait BoundedBy<F : Num, T> { | |
299 | /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for | |
300 | /// $v$ `self` and $u$ `other`. | |
301 | /// | |
302 | /// If no such factors exits, `None` is returned. | |
303 | fn bounding_factor(&self, other : &T) -> Option<F>; | |
304 | } | |
305 | ||
306 | /// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$. | |
307 | #[replace_float_literals(F::cast_from(literal))] | |
308 | impl<F, C, BaseP> | |
309 | BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>> | |
310 | for AutoConvolution<SupportProductFirst<C, BaseP>> | |
311 | where F : Float, | |
312 | C : Clone + PartialEq, | |
313 | BaseP : Fourier<F> + PartialOrd, // TODO: replace by BoundedBy, | |
314 | <BaseP as Fourier<F>>::Transformed : Bounded<F> + Norm<F, L1> { | |
315 | ||
316 | fn bounding_factor(&self, kernel : &SupportProductFirst<AutoConvolution<C>, BaseP>) -> Option<F> { | |
317 | let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel; | |
318 | let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self; | |
319 | let v̂ = base_spread.fourier(); | |
320 | ||
321 | // Verify that the cut-off and ideal physical model (base spread) are the same. | |
322 | if cutoff == cutoff2 | |
323 | && base_spread <= base_spread2 | |
324 | && v̂.bounds().lower() >= 0.0 { | |
325 | // Calculate the factor between the convolution approximation | |
326 | // `AutoConvolution<SupportProductFirst<C, BaseP>>` of $A_*A$ and the | |
327 | // kernel of the seminorm. This depends on the physical model P being | |
328 | // `SupportProductFirst<C, BaseP>` with the kernel `K` being | |
329 | // a `SupportSum` involving `SupportProductFirst<AutoConvolution<C>, BaseP>`. | |
330 | Some(v̂.norm(L1)) | |
331 | } else { | |
332 | // We cannot compare | |
333 | None | |
334 | } | |
335 | } | |
336 | } | |
337 | ||
338 | impl<F : Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A | |
339 | where A : BoundedBy<F, B>, | |
340 | C : Bounded<F> { | |
341 | ||
342 | #[replace_float_literals(F::cast_from(literal))] | |
343 | fn bounding_factor(&self, SupportSum(ref kernel1, kernel2) : &SupportSum<B, C>) -> Option<F> { | |
344 | if kernel2.bounds().lower() >= 0.0 { | |
345 | self.bounding_factor(kernel1) | |
346 | } else { | |
347 | None | |
348 | } | |
349 | } | |
350 | } | |
351 | ||
352 | /// Generates on $[a, b]$ [`Support::support_hint`] for a symmetric interval $[-r, r]$. | |
353 | /// | |
354 | /// It will attempt to place the subdivision point at $-r$ or $r$. | |
355 | /// If neither of these points lies within $[a, b]$, `None` is returned. | |
356 | #[inline] | |
357 | pub(super) fn symmetric_interval_hint<F : Float>(r : F, a : F, b : F) -> Option<F> { | |
358 | if a < -r && -r < b { | |
359 | Some(-r) | |
360 | } else if a < r && r < b { | |
361 | Some(r) | |
362 | } else { | |
363 | None | |
364 | } | |
365 | } | |
366 | ||
367 | /// Generates on $[a, b]$ [`Support::support_hint`] for a function with monotone derivative, | |
368 | /// support on $[-r, r]$ and peak at $0. | |
369 | /// | |
370 | /// It will attempt to place the subdivision point at $-r$, $r$, or $0$, depending on which | |
371 | /// gives the longer length for the shorter of the two subintervals. If none of these points | |
372 | /// lies within $[a, b]$, or the resulting interval would be shorter than $0.3r$, `None` is | |
373 | /// returned. | |
374 | #[replace_float_literals(F::cast_from(literal))] | |
375 | #[inline] | |
376 | pub(super) fn symmetric_peak_hint<F : Float>(r : F, a : F, b : F) -> Option<F> { | |
377 | let stage1 = if a < -r { | |
378 | if b <= -r { | |
379 | None | |
380 | } else if a + r < -b { | |
381 | Some(-r) | |
382 | } else { | |
383 | Some(0.0) | |
384 | } | |
385 | } else if a < 0.0 { | |
386 | if b <= 0.0 { | |
387 | None | |
388 | } else if a < r - b { | |
389 | Some(0.0) | |
390 | } else { | |
391 | Some(r) | |
392 | } | |
393 | } else if a < r && b > r { | |
394 | Some(r) | |
395 | } else { | |
396 | None | |
397 | }; | |
398 | ||
399 | // Ignore stage1 hint if either side of subdivision would be just a small fraction of the | |
400 | // interval | |
401 | match stage1 { | |
402 | Some(h) if (h - a).min(b-h) >= 0.3 * r => Some(h), | |
403 | _ => None | |
404 | } | |
405 | } | |
3 | 406 | |
407 | pub trait Product2Taylor<F : Float, const N : usize> : Sized { | |
408 | fn product2taylor(self) -> Taylor2ModelParams<F, N>; | |
409 | ||
410 | fn product2taylor_scaled(self, sc : F) -> Taylor2ModelParams<F, N> { | |
411 | let (a, b, c, d) = self.product2taylor(); | |
412 | (a / sc, b / sc, c.map(|x| x / sc), d / sc) | |
413 | } | |
414 | } | |
415 | ||
416 | impl<F: Float> Product2Taylor<F, 1> for Loc<Loc<F, 3>, 1> { | |
417 | fn product2taylor(self) -> Taylor2ModelParams<F, 1> { | |
418 | let Loc([Loc([a, b, c])]) = self.into(); | |
419 | (a, Loc([b]), Loc([Loc([c])]), F::ZERO) | |
420 | } | |
421 | } | |
422 | ||
423 | impl<F: Float> Product2Taylor<F, 2> for Loc<Loc<F, 3>, 2> { | |
424 | fn product2taylor(self) -> Taylor2ModelParams<F, 2> { | |
425 | let Loc([Loc([a1, b1, c1]), Loc([a2, b2, c2])]) = self; | |
426 | (a1 * a2, | |
427 | Loc([b1 * a2, a1 * b2]), | |
428 | Loc([Loc([c1 * a2, b1 * b2]), Loc([b1 * b2, a1 * c2])]), | |
429 | F::ZERO) | |
430 | } | |
431 | } | |
432 | ||
433 | ||
434 | // // Zeroeth order term | |
435 | // let a = factors.iter().map(|&(p0, _p1, _p2)| p0).product() / sc; | |
436 | // // First order term | |
437 | // let b = factors.iter().zip(0..).map(|(&(_p0, p1, _p2), i)| { | |
438 | // (p1 / sc) * factors.iter() | |
439 | // .zip(0..) | |
440 | // .filter(|&(_, j)| j != i) | |
441 | // .map(|(&(q0, _q1, _q2), _)| q0) | |
442 | // .product() | |
443 | // }).into(); | |
444 | ||
445 | // // Second order term | |
446 | // let c = factors.iter().zip(0..).map(|(&(_p0, p1, p2), i)| { | |
447 | // factors.iter().zip(0..).map(|(&(_q0, q1, _q2), j)| { | |
448 | // if i == j { | |
449 | // (p2 /sc) * factors.iter() | |
450 | // .zip(0..) | |
451 | // .filter(|&(_, k)| k != i) | |
452 | // .map(|(&(w0, _w1, _w2), _)| w0) | |
453 | // .product() | |
454 | // } else { | |
455 | // (p1 * q1 / sc) * factors.iter() | |
456 | // .zip(0..) | |
457 | // .filter(|&(_, k)| k != i && k != j) | |
458 | // .map(|(&(w0, _w1, _w2), _)| w0) | |
459 | // .product() | |
460 | // } | |
461 | // }).into() | |
462 | // }).into(); | |
463 | // | |
464 | // (a, b, c, 0.0) |