Thu, 29 Aug 2024 00:00:00 -0500
Radon FB + sliding improvements
0 | 1 | |
2 | //! Things for constructing new kernels from component kernels and traits for analysing them | |
3 | use serde::Serialize; | |
4 | use numeric_literals::replace_float_literals; | |
5 | ||
6 | use alg_tools::types::*; | |
7 | use alg_tools::norms::*; | |
8 | use alg_tools::loc::Loc; | |
9 | use alg_tools::sets::Cube; | |
10 | use alg_tools::bisection_tree::{ | |
11 | Support, | |
12 | Bounds, | |
13 | LocalAnalysis, | |
14 | GlobalAnalysis, | |
15 | Bounded, | |
16 | }; | |
32 | 17 | use alg_tools::mapping::{Apply, Differentiable}; |
18 | use alg_tools::maputil::{array_init, map2, map1_indexed}; | |
0 | 19 | use alg_tools::sets::SetOrd; |
20 | ||
21 | use crate::fourier::Fourier; | |
32 | 22 | use crate::types::Lipschitz; |
0 | 23 | |
24 | /// Representation of the product of two kernels. | |
25 | /// | |
26 | /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. | |
27 | /// | |
28 | /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! | |
29 | #[derive(Copy,Clone,Serialize,Debug)] | |
30 | pub struct SupportProductFirst<A, B>( | |
31 | /// First kernel | |
32 | pub A, | |
33 | /// Second kernel | |
34 | pub B | |
35 | ); | |
36 | ||
37 | impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>> | |
38 | for SupportProductFirst<A, B> | |
39 | where A : for<'a> Apply<&'a Loc<F, N>, Output=F>, | |
40 | B : for<'a> Apply<&'a Loc<F, N>, Output=F> { | |
41 | type Output = F; | |
42 | #[inline] | |
43 | fn apply(&self, x : Loc<F, N>) -> Self::Output { | |
44 | self.0.apply(&x) * self.1.apply(&x) | |
45 | } | |
46 | } | |
47 | ||
48 | impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>> | |
49 | for SupportProductFirst<A, B> | |
50 | where A : Apply<&'a Loc<F, N>, Output=F>, | |
51 | B : Apply<&'a Loc<F, N>, Output=F> { | |
52 | type Output = F; | |
53 | #[inline] | |
54 | fn apply(&self, x : &'a Loc<F, N>) -> Self::Output { | |
55 | self.0.apply(x) * self.1.apply(x) | |
56 | } | |
57 | } | |
58 | ||
32 | 59 | impl<A, B, F : Float, const N : usize> Differentiable<Loc<F, N>> |
60 | for SupportProductFirst<A, B> | |
61 | where A : for<'a> Apply<&'a Loc<F, N>, Output=F> | |
62 | + for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>, | |
63 | B : for<'a> Apply<&'a Loc<F, N>, Output=F> | |
64 | + for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> { | |
65 | type Output = Loc<F, N>; | |
66 | #[inline] | |
67 | fn differential(&self, x : Loc<F, N>) -> Self::Output { | |
68 | self.0.differential(&x) * self.1.apply(&x) + self.1.differential(&x) * self.0.apply(&x) | |
69 | } | |
70 | } | |
71 | ||
72 | impl<'a, A, B, F : Float, const N : usize> Differentiable<&'a Loc<F, N>> | |
73 | for SupportProductFirst<A, B> | |
74 | where A : Apply<&'a Loc<F, N>, Output=F> | |
75 | + Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>, | |
76 | B : Apply<&'a Loc<F, N>, Output=F> | |
77 | + Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> { | |
78 | type Output = Loc<F, N>; | |
79 | #[inline] | |
80 | fn differential(&self, x : &'a Loc<F, N>) -> Self::Output { | |
81 | self.0.differential(&x) * self.1.apply(&x) + self.1.differential(&x) * self.0.apply(&x) | |
82 | } | |
83 | } | |
84 | ||
85 | ||
0 | 86 | impl<'a, A, B, F : Float, const N : usize> Support<F, N> |
87 | for SupportProductFirst<A, B> | |
88 | where A : Support<F, N>, | |
89 | B : Support<F, N> { | |
90 | #[inline] | |
91 | fn support_hint(&self) -> Cube<F, N> { | |
92 | self.0.support_hint() | |
93 | } | |
94 | ||
95 | #[inline] | |
96 | fn in_support(&self, x : &Loc<F, N>) -> bool { | |
97 | self.0.in_support(x) | |
98 | } | |
99 | ||
100 | #[inline] | |
101 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
102 | self.0.bisection_hint(cube) | |
103 | } | |
104 | } | |
105 | ||
106 | impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>> | |
107 | for SupportProductFirst<A, B> | |
108 | where A : GlobalAnalysis<F, Bounds<F>>, | |
109 | B : GlobalAnalysis<F, Bounds<F>> { | |
110 | #[inline] | |
111 | fn global_analysis(&self) -> Bounds<F> { | |
112 | self.0.global_analysis() * self.1.global_analysis() | |
113 | } | |
114 | } | |
115 | ||
116 | impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
117 | for SupportProductFirst<A, B> | |
118 | where A : LocalAnalysis<F, Bounds<F>, N>, | |
119 | B : LocalAnalysis<F, Bounds<F>, N> { | |
120 | #[inline] | |
121 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
122 | self.0.local_analysis(cube) * self.1.local_analysis(cube) | |
123 | } | |
124 | } | |
125 | ||
126 | /// Representation of the sum of two kernels | |
127 | /// | |
128 | /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. | |
129 | /// | |
130 | /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! | |
131 | #[derive(Copy,Clone,Serialize,Debug)] | |
132 | pub struct SupportSum<A, B>( | |
133 | /// First kernel | |
134 | pub A, | |
135 | /// Second kernel | |
136 | pub B | |
137 | ); | |
138 | ||
139 | impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>> | |
140 | for SupportSum<A, B> | |
141 | where A : Apply<&'a Loc<F, N>, Output=F>, | |
142 | B : Apply<&'a Loc<F, N>, Output=F> { | |
143 | type Output = F; | |
144 | #[inline] | |
145 | fn apply(&self, x : &'a Loc<F, N>) -> Self::Output { | |
146 | self.0.apply(x) + self.1.apply(x) | |
147 | } | |
148 | } | |
149 | ||
150 | impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>> | |
151 | for SupportSum<A, B> | |
152 | where A : for<'a> Apply<&'a Loc<F, N>, Output=F>, | |
153 | B : for<'a> Apply<&'a Loc<F, N>, Output=F> { | |
154 | type Output = F; | |
155 | #[inline] | |
156 | fn apply(&self, x : Loc<F, N>) -> Self::Output { | |
157 | self.0.apply(&x) + self.1.apply(&x) | |
158 | } | |
159 | } | |
160 | ||
32 | 161 | impl<'a, A, B, F : Float, const N : usize> Differentiable<&'a Loc<F, N>> |
162 | for SupportSum<A, B> | |
163 | where A : Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>, | |
164 | B : Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> { | |
165 | type Output = Loc<F, N>; | |
166 | #[inline] | |
167 | fn differential(&self, x : &'a Loc<F, N>) -> Self::Output { | |
168 | self.0.differential(x) + self.1.differential(x) | |
169 | } | |
170 | } | |
171 | ||
172 | impl<A, B, F : Float, const N : usize> Differentiable<Loc<F, N>> | |
173 | for SupportSum<A, B> | |
174 | where A : for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>, | |
175 | B : for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> { | |
176 | type Output = Loc<F, N>; | |
177 | #[inline] | |
178 | fn differential(&self, x : Loc<F, N>) -> Self::Output { | |
179 | self.0.differential(&x) + self.1.differential(&x) | |
180 | } | |
181 | } | |
182 | ||
0 | 183 | impl<'a, A, B, F : Float, const N : usize> Support<F, N> |
184 | for SupportSum<A, B> | |
185 | where A : Support<F, N>, | |
186 | B : Support<F, N>, | |
187 | Cube<F, N> : SetOrd { | |
188 | #[inline] | |
189 | fn support_hint(&self) -> Cube<F, N> { | |
190 | self.0.support_hint().common(&self.1.support_hint()) | |
191 | } | |
192 | ||
193 | #[inline] | |
194 | fn in_support(&self, x : &Loc<F, N>) -> bool { | |
195 | self.0.in_support(x) || self.1.in_support(x) | |
196 | } | |
197 | ||
198 | #[inline] | |
199 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
200 | map2(self.0.bisection_hint(cube), | |
201 | self.1.bisection_hint(cube), | |
202 | |a, b| a.or(b)) | |
203 | } | |
204 | } | |
205 | ||
206 | impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>> | |
207 | for SupportSum<A, B> | |
208 | where A : GlobalAnalysis<F, Bounds<F>>, | |
209 | B : GlobalAnalysis<F, Bounds<F>> { | |
210 | #[inline] | |
211 | fn global_analysis(&self) -> Bounds<F> { | |
212 | self.0.global_analysis() + self.1.global_analysis() | |
213 | } | |
214 | } | |
215 | ||
216 | impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
217 | for SupportSum<A, B> | |
218 | where A : LocalAnalysis<F, Bounds<F>, N>, | |
219 | B : LocalAnalysis<F, Bounds<F>, N>, | |
220 | Cube<F, N> : SetOrd { | |
221 | #[inline] | |
222 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
223 | self.0.local_analysis(cube) + self.1.local_analysis(cube) | |
224 | } | |
225 | } | |
226 | ||
32 | 227 | impl<F : Float, M : Copy, A, B> Lipschitz<M> for SupportSum<A, B> |
228 | where A : Lipschitz<M, FloatType = F>, | |
229 | B : Lipschitz<M, FloatType = F> { | |
230 | type FloatType = F; | |
231 | ||
232 | fn lipschitz_factor(&self, m : M) -> Option<F> { | |
233 | match (self.0.lipschitz_factor(m), self.1.lipschitz_factor(m)) { | |
234 | (Some(l0), Some(l1)) => Some(l0 + l1), | |
235 | _ => None | |
236 | } | |
237 | } | |
238 | } | |
239 | ||
240 | ||
0 | 241 | /// Representation of the convolution of two kernels. |
242 | /// | |
243 | /// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping]. | |
244 | // | |
245 | /// Trait implementations have to be on a case-by-case basis. | |
246 | #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] | |
247 | pub struct Convolution<A, B>( | |
248 | /// First kernel | |
249 | pub A, | |
250 | /// Second kernel | |
251 | pub B | |
252 | ); | |
253 | ||
32 | 254 | impl<F : Float, M, A, B> Lipschitz<M> for Convolution<A, B> |
255 | where A : Bounded<F> , | |
256 | B : Lipschitz<M, FloatType = F> { | |
257 | type FloatType = F; | |
258 | ||
259 | fn lipschitz_factor(&self, m : M) -> Option<F> { | |
260 | self.1.lipschitz_factor(m).map(|l| l * self.0.bounds().uniform()) | |
261 | } | |
262 | } | |
263 | ||
0 | 264 | /// Representation of the autoconvolution of a kernel. |
265 | /// | |
266 | /// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. | |
267 | /// | |
268 | /// Trait implementations have to be on a case-by-case basis. | |
269 | #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] | |
270 | pub struct AutoConvolution<A>( | |
271 | /// The kernel to be autoconvolved | |
272 | pub A | |
273 | ); | |
274 | ||
32 | 275 | impl<F : Float, M, C> Lipschitz<M> for AutoConvolution<C> |
276 | where C : Lipschitz<M, FloatType = F> + Bounded<F> { | |
277 | type FloatType = F; | |
278 | ||
279 | fn lipschitz_factor(&self, m : M) -> Option<F> { | |
280 | self.0.lipschitz_factor(m).map(|l| l * self.0.bounds().uniform()) | |
281 | } | |
282 | } | |
283 | ||
284 | ||
0 | 285 | /// Representation a multi-dimensional product of a one-dimensional kernel. |
286 | /// | |
287 | /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$. | |
288 | /// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping] | |
289 | /// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`]. | |
290 | #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] | |
291 | struct UniformProduct<G, const N : usize>( | |
292 | /// The one-dimensional kernel | |
293 | G | |
294 | ); | |
295 | ||
296 | impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>> | |
297 | for UniformProduct<G, N> | |
298 | where G : Apply<Loc<F, 1>, Output=F> { | |
299 | type Output = F; | |
300 | #[inline] | |
301 | fn apply(&self, x : &'a Loc<F, N>) -> F { | |
302 | x.iter().map(|&y| self.0.apply(Loc([y]))).product() | |
303 | } | |
304 | } | |
305 | ||
306 | impl<G, F : Float, const N : usize> Apply<Loc<F, N>> | |
307 | for UniformProduct<G, N> | |
308 | where G : Apply<Loc<F, 1>, Output=F> { | |
309 | type Output = F; | |
310 | #[inline] | |
311 | fn apply(&self, x : Loc<F, N>) -> F { | |
312 | x.into_iter().map(|y| self.0.apply(Loc([y]))).product() | |
313 | } | |
314 | } | |
315 | ||
32 | 316 | impl<'a, G, F : Float, const N : usize> Differentiable<&'a Loc<F, N>> |
317 | for UniformProduct<G, N> | |
318 | where G : Apply<Loc<F, 1>, Output=F> + Differentiable<Loc<F, 1>, Output=F> { | |
319 | type Output = Loc<F, N>; | |
320 | #[inline] | |
321 | fn differential(&self, x : &'a Loc<F, N>) -> Loc<F, N> { | |
322 | let vs = x.map(|y| self.0.apply(Loc([y]))); | |
323 | product_differential(x, &vs, |y| self.0.differential(Loc([y]))) | |
324 | } | |
325 | } | |
326 | ||
327 | /// Helper function to calulate the differential of $f(x)=∏_{i=1}^N g(x_i)$. | |
328 | /// | |
329 | /// The vector `x` is the location, `vs` consists of the values `g(x_i)`, and | |
330 | /// `gd` calculates the derivative `g'`. | |
331 | #[inline] | |
332 | pub(crate) fn product_differential<F : Float, G : Fn(F) -> F, const N : usize>( | |
333 | x : &Loc<F, N>, | |
334 | vs : &Loc<F, N>, | |
335 | gd : G | |
336 | ) -> Loc<F, N> { | |
337 | map1_indexed(x, |i, &y| { | |
338 | gd(y) * vs.iter() | |
339 | .zip(0..) | |
340 | .filter_map(|(v, j)| (j != i).then_some(*v)) | |
341 | .product() | |
342 | }).into() | |
343 | } | |
344 | ||
345 | impl<G, F : Float, const N : usize> Differentiable<Loc<F, N>> | |
346 | for UniformProduct<G, N> | |
347 | where G : Apply<Loc<F, 1>, Output=F> + Differentiable<Loc<F, 1>, Output=F> { | |
348 | type Output = Loc<F, N>; | |
349 | #[inline] | |
350 | fn differential(&self, x : Loc<F, N>) -> Loc<F, N> { | |
351 | self.differential(&x) | |
352 | } | |
353 | } | |
354 | ||
0 | 355 | impl<G, F : Float, const N : usize> Support<F, N> |
356 | for UniformProduct<G, N> | |
357 | where G : Support<F, 1> { | |
358 | #[inline] | |
359 | fn support_hint(&self) -> Cube<F, N> { | |
360 | let [a] : [[F; 2]; 1] = self.0.support_hint().into(); | |
361 | array_init(|| a.clone()).into() | |
362 | } | |
363 | ||
364 | #[inline] | |
365 | fn in_support(&self, x : &Loc<F, N>) -> bool { | |
366 | x.iter().all(|&y| self.0.in_support(&Loc([y]))) | |
367 | } | |
368 | ||
369 | #[inline] | |
370 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
371 | cube.map(|a, b| { | |
372 | let [h] = self.0.bisection_hint(&[[a, b]].into()); | |
373 | h | |
374 | }) | |
375 | } | |
376 | } | |
377 | ||
378 | impl<G, F : Float, const N : usize> GlobalAnalysis<F, Bounds<F>> | |
379 | for UniformProduct<G, N> | |
380 | where G : GlobalAnalysis<F, Bounds<F>> { | |
381 | #[inline] | |
382 | fn global_analysis(&self) -> Bounds<F> { | |
383 | let g = self.0.global_analysis(); | |
384 | (0..N).map(|_| g).product() | |
385 | } | |
386 | } | |
387 | ||
388 | impl<G, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
389 | for UniformProduct<G, N> | |
390 | where G : LocalAnalysis<F, Bounds<F>, 1> { | |
391 | #[inline] | |
392 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
393 | cube.iter_coords().map( | |
394 | |&[a, b]| self.0.local_analysis(&([[a, b]].into())) | |
395 | ).product() | |
396 | } | |
397 | } | |
398 | ||
399 | macro_rules! product_lpnorm { | |
400 | ($lp:ident) => { | |
401 | impl<G, F : Float, const N : usize> Norm<F, $lp> | |
402 | for UniformProduct<G, N> | |
403 | where G : Norm<F, $lp> { | |
404 | #[inline] | |
405 | fn norm(&self, lp : $lp) -> F { | |
406 | self.0.norm(lp).powi(N as i32) | |
407 | } | |
408 | } | |
409 | } | |
410 | } | |
411 | ||
412 | product_lpnorm!(L1); | |
413 | product_lpnorm!(L2); | |
414 | product_lpnorm!(Linfinity); | |
415 | ||
416 | ||
417 | /// Trait for bounding one kernel with respect to another. | |
418 | /// | |
419 | /// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared. | |
420 | pub trait BoundedBy<F : Num, T> { | |
421 | /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for | |
422 | /// $v$ `self` and $u$ `other`. | |
423 | /// | |
424 | /// If no such factors exits, `None` is returned. | |
425 | fn bounding_factor(&self, other : &T) -> Option<F>; | |
426 | } | |
427 | ||
428 | /// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$. | |
429 | #[replace_float_literals(F::cast_from(literal))] | |
430 | impl<F, C, BaseP> | |
431 | BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>> | |
432 | for AutoConvolution<SupportProductFirst<C, BaseP>> | |
433 | where F : Float, | |
434 | C : Clone + PartialEq, | |
435 | BaseP : Fourier<F> + PartialOrd, // TODO: replace by BoundedBy, | |
436 | <BaseP as Fourier<F>>::Transformed : Bounded<F> + Norm<F, L1> { | |
437 | ||
438 | fn bounding_factor(&self, kernel : &SupportProductFirst<AutoConvolution<C>, BaseP>) -> Option<F> { | |
439 | let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel; | |
440 | let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self; | |
441 | let v̂ = base_spread.fourier(); | |
442 | ||
443 | // Verify that the cut-off and ideal physical model (base spread) are the same. | |
444 | if cutoff == cutoff2 | |
445 | && base_spread <= base_spread2 | |
446 | && v̂.bounds().lower() >= 0.0 { | |
447 | // Calculate the factor between the convolution approximation | |
448 | // `AutoConvolution<SupportProductFirst<C, BaseP>>` of $A_*A$ and the | |
449 | // kernel of the seminorm. This depends on the physical model P being | |
450 | // `SupportProductFirst<C, BaseP>` with the kernel `K` being | |
451 | // a `SupportSum` involving `SupportProductFirst<AutoConvolution<C>, BaseP>`. | |
452 | Some(v̂.norm(L1)) | |
453 | } else { | |
454 | // We cannot compare | |
455 | None | |
456 | } | |
457 | } | |
458 | } | |
459 | ||
460 | impl<F : Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A | |
461 | where A : BoundedBy<F, B>, | |
462 | C : Bounded<F> { | |
463 | ||
464 | #[replace_float_literals(F::cast_from(literal))] | |
465 | fn bounding_factor(&self, SupportSum(ref kernel1, kernel2) : &SupportSum<B, C>) -> Option<F> { | |
466 | if kernel2.bounds().lower() >= 0.0 { | |
467 | self.bounding_factor(kernel1) | |
468 | } else { | |
469 | None | |
470 | } | |
471 | } | |
472 | } | |
473 | ||
474 | /// Generates on $[a, b]$ [`Support::support_hint`] for a symmetric interval $[-r, r]$. | |
475 | /// | |
476 | /// It will attempt to place the subdivision point at $-r$ or $r$. | |
477 | /// If neither of these points lies within $[a, b]$, `None` is returned. | |
478 | #[inline] | |
479 | pub(super) fn symmetric_interval_hint<F : Float>(r : F, a : F, b : F) -> Option<F> { | |
480 | if a < -r && -r < b { | |
481 | Some(-r) | |
482 | } else if a < r && r < b { | |
483 | Some(r) | |
484 | } else { | |
485 | None | |
486 | } | |
487 | } | |
488 | ||
489 | /// Generates on $[a, b]$ [`Support::support_hint`] for a function with monotone derivative, | |
490 | /// support on $[-r, r]$ and peak at $0. | |
491 | /// | |
492 | /// It will attempt to place the subdivision point at $-r$, $r$, or $0$, depending on which | |
493 | /// gives the longer length for the shorter of the two subintervals. If none of these points | |
494 | /// lies within $[a, b]$, or the resulting interval would be shorter than $0.3r$, `None` is | |
495 | /// returned. | |
496 | #[replace_float_literals(F::cast_from(literal))] | |
497 | #[inline] | |
498 | pub(super) fn symmetric_peak_hint<F : Float>(r : F, a : F, b : F) -> Option<F> { | |
499 | let stage1 = if a < -r { | |
500 | if b <= -r { | |
501 | None | |
502 | } else if a + r < -b { | |
503 | Some(-r) | |
504 | } else { | |
505 | Some(0.0) | |
506 | } | |
507 | } else if a < 0.0 { | |
508 | if b <= 0.0 { | |
509 | None | |
510 | } else if a < r - b { | |
511 | Some(0.0) | |
512 | } else { | |
513 | Some(r) | |
514 | } | |
515 | } else if a < r && b > r { | |
516 | Some(r) | |
517 | } else { | |
518 | None | |
519 | }; | |
520 | ||
521 | // Ignore stage1 hint if either side of subdivision would be just a small fraction of the | |
522 | // interval | |
523 | match stage1 { | |
524 | Some(h) if (h - a).min(b-h) >= 0.3 * r => Some(h), | |
525 | _ => None | |
526 | } | |
527 | } |