Mon, 05 Dec 2022 23:50:22 +0200
Zenodo packaging hacks
0 | 1 | //! Implementation of the gaussian kernel. |
2 | ||
3 | use float_extras::f64::erf; | |
4 | use numeric_literals::replace_float_literals; | |
5 | use serde::Serialize; | |
6 | use alg_tools::types::*; | |
7 | use alg_tools::euclidean::Euclidean; | |
8 | use alg_tools::norms::*; | |
9 | use alg_tools::loc::Loc; | |
10 | use alg_tools::sets::Cube; | |
11 | use alg_tools::bisection_tree::{ | |
12 | Support, | |
13 | Constant, | |
14 | Bounds, | |
15 | LocalAnalysis, | |
16 | GlobalAnalysis, | |
17 | Weighted, | |
18 | Bounded, | |
19 | }; | |
20 | use alg_tools::mapping::Apply; | |
21 | use alg_tools::maputil::array_init; | |
22 | ||
23 | use crate::fourier::Fourier; | |
24 | use super::base::*; | |
25 | use super::ball_indicator::CubeIndicator; | |
26 | ||
27 | /// Storage presentation of the the anisotropic gaussian kernel of `variance` $σ^2$. | |
28 | /// | |
29 | /// This is the function $f(x) = C e^{-\\|x\\|\_2^2/(2σ^2)}$ for $x ∈ ℝ^N$ | |
30 | /// with $C=1/(2πσ^2)^{N/2}$. | |
31 | #[derive(Copy,Clone,Debug,Serialize,Eq)] | |
32 | pub struct Gaussian<S : Constant, const N : usize> { | |
33 | /// The variance $σ^2$. | |
34 | pub variance : S, | |
35 | } | |
36 | ||
37 | impl<S1, S2, const N : usize> PartialEq<Gaussian<S2, N>> for Gaussian<S1, N> | |
38 | where S1 : Constant, | |
39 | S2 : Constant<Type=S1::Type> { | |
40 | fn eq(&self, other : &Gaussian<S2, N>) -> bool { | |
41 | self.variance.value() == other.variance.value() | |
42 | } | |
43 | } | |
44 | ||
45 | impl<S1, S2, const N : usize> PartialOrd<Gaussian<S2, N>> for Gaussian<S1, N> | |
46 | where S1 : Constant, | |
47 | S2 : Constant<Type=S1::Type> { | |
48 | ||
49 | fn partial_cmp(&self, other : &Gaussian<S2, N>) -> Option<std::cmp::Ordering> { | |
50 | // A gaussian is ≤ another gaussian if the Fourier transforms satisfy the | |
51 | // corresponding inequality. That in turns holds if and only if the variances | |
52 | // satisfy the opposite inequality. | |
53 | let σ1sq = self.variance.value(); | |
54 | let σ2sq = other.variance.value(); | |
55 | σ2sq.partial_cmp(&σ1sq) | |
56 | } | |
57 | } | |
58 | ||
59 | ||
60 | #[replace_float_literals(S::Type::cast_from(literal))] | |
61 | impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for Gaussian<S, N> | |
62 | where S : Constant { | |
63 | type Output = S::Type; | |
64 | // This is not normalised to neither to have value 1 at zero or integral 1 | |
65 | // (unless the cut-off ε=0). | |
66 | #[inline] | |
67 | fn apply(&self, x : &'a Loc<S::Type, N>) -> Self::Output { | |
68 | let d_squared = x.norm2_squared(); | |
69 | let σ2 = self.variance.value(); | |
70 | let scale = self.scale(); | |
71 | (-d_squared / (2.0 * σ2)).exp() / scale | |
72 | } | |
73 | } | |
74 | ||
75 | impl<S, const N : usize> Apply<Loc<S::Type, N>> for Gaussian<S, N> | |
76 | where S : Constant { | |
77 | type Output = S::Type; | |
78 | // This is not normalised to neither to have value 1 at zero or integral 1 | |
79 | // (unless the cut-off ε=0). | |
80 | #[inline] | |
81 | fn apply(&self, x : Loc<S::Type, N>) -> Self::Output { | |
82 | self.apply(&x) | |
83 | } | |
84 | } | |
85 | ||
86 | ||
87 | #[replace_float_literals(S::Type::cast_from(literal))] | |
88 | impl<'a, S, const N : usize> Gaussian<S, N> | |
89 | where S : Constant { | |
90 | ||
91 | /// Returns the (reciprocal) scaling constant $1/C=(2πσ^2)^{N/2}$. | |
92 | #[inline] | |
93 | pub fn scale(&self) -> S::Type { | |
94 | let π = S::Type::PI; | |
95 | let σ2 = self.variance.value(); | |
96 | (2.0*π*σ2).powi(N as i32).sqrt() | |
97 | } | |
98 | } | |
99 | ||
100 | impl<'a, S, const N : usize> Support<S::Type, N> for Gaussian<S, N> | |
101 | where S : Constant { | |
102 | #[inline] | |
103 | fn support_hint(&self) -> Cube<S::Type,N> { | |
104 | array_init(|| [S::Type::NEG_INFINITY, S::Type::INFINITY]).into() | |
105 | } | |
106 | ||
107 | #[inline] | |
108 | fn in_support(&self, _x : &Loc<S::Type,N>) -> bool { | |
109 | true | |
110 | } | |
111 | } | |
112 | ||
113 | #[replace_float_literals(S::Type::cast_from(literal))] | |
114 | impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>> for Gaussian<S, N> | |
115 | where S : Constant { | |
116 | #[inline] | |
117 | fn global_analysis(&self) -> Bounds<S::Type> { | |
118 | Bounds(0.0, 1.0/self.scale()) | |
119 | } | |
120 | } | |
121 | ||
122 | impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N> for Gaussian<S, N> | |
123 | where S : Constant { | |
124 | #[inline] | |
125 | fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> { | |
126 | // The function is maximised/minimised where the 2-norm is minimised/maximised. | |
127 | let lower = self.apply(cube.maxnorm_point()); | |
128 | let upper = self.apply(cube.minnorm_point()); | |
129 | Bounds(lower, upper) | |
130 | } | |
131 | } | |
132 | ||
133 | #[replace_float_literals(C::Type::cast_from(literal))] | |
134 | impl<'a, C : Constant, const N : usize> Norm<C::Type, L1> | |
135 | for Gaussian<C, N> { | |
136 | #[inline] | |
137 | fn norm(&self, _ : L1) -> C::Type { | |
138 | 1.0 | |
139 | } | |
140 | } | |
141 | ||
142 | #[replace_float_literals(C::Type::cast_from(literal))] | |
143 | impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity> | |
144 | for Gaussian<C, N> { | |
145 | #[inline] | |
146 | fn norm(&self, _ : Linfinity) -> C::Type { | |
147 | self.bounds().upper() | |
148 | } | |
149 | } | |
150 | ||
151 | #[replace_float_literals(C::Type::cast_from(literal))] | |
152 | impl<'a, C : Constant, const N : usize> Fourier<C::Type> | |
153 | for Gaussian<C, N> { | |
154 | type Domain = Loc<C::Type, N>; | |
155 | type Transformed = Weighted<Gaussian<C::Type, N>, C::Type>; | |
156 | ||
157 | #[inline] | |
158 | fn fourier(&self) -> Self::Transformed { | |
159 | let π = C::Type::PI; | |
160 | let σ2 = self.variance.value(); | |
161 | let g = Gaussian { variance : 1.0 / (4.0*π*π*σ2) }; | |
162 | g.weigh(g.scale()) | |
163 | } | |
164 | } | |
165 | ||
166 | /// Representation of the “cut” gaussian $f χ\_{[-a, a]^n}$ | |
167 | /// where $a>0$ and $f$ is a gaussian kernel on $ℝ^n$. | |
168 | pub type BasicCutGaussian<C, S, const N : usize> = SupportProductFirst<CubeIndicator<C, N>, | |
169 | Gaussian<S, N>>; | |
170 | ||
171 | ||
172 | /// This implements $χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$ | |
173 | /// where $a,b>0$ and $f$ is a gaussian kernel on $ℝ^n$. | |
174 | #[replace_float_literals(F::cast_from(literal))] | |
175 | impl<'a, F : Float, R, C, S, const N : usize> Apply<&'a Loc<F, N>> | |
176 | for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>> | |
177 | where R : Constant<Type=F>, | |
178 | C : Constant<Type=F>, | |
179 | S : Constant<Type=F> { | |
180 | ||
181 | type Output = F; | |
182 | ||
183 | #[inline] | |
184 | fn apply(&self, y : &'a Loc<F, N>) -> F { | |
185 | let Convolution(ref ind, | |
186 | SupportProductFirst(ref cut, | |
187 | ref gaussian)) = self; | |
188 | let a = cut.r.value(); | |
189 | let b = ind.r.value(); | |
190 | let σ = gaussian.variance.value().sqrt(); | |
191 | let π = F::PI; | |
192 | let t = F::SQRT_2 * σ; | |
193 | let c = σ * (8.0/π).sqrt(); | |
194 | ||
195 | // This is just a product of one-dimensional versions | |
196 | let unscaled = y.product_map(|x| { | |
197 | let c1 = -(a.min(b + x)); //(-a).max(-x-b); | |
198 | let c2 = a.min(b - x); | |
199 | if c1 >= c2 { | |
200 | 0.0 | |
201 | } else { | |
202 | let e1 = F::cast_from(erf((c1 / t).as_())); | |
203 | let e2 = F::cast_from(erf((c2 / t).as_())); | |
204 | debug_assert!(e2 >= e1); | |
205 | c * (e2 - e1) | |
206 | } | |
207 | }); | |
208 | ||
209 | unscaled / gaussian.scale() | |
210 | } | |
211 | } | |
212 | ||
213 | impl<F : Float, R, C, S, const N : usize> Apply<Loc<F, N>> | |
214 | for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>> | |
215 | where R : Constant<Type=F>, | |
216 | C : Constant<Type=F>, | |
217 | S : Constant<Type=F> { | |
218 | ||
219 | type Output = F; | |
220 | ||
221 | #[inline] | |
222 | fn apply(&self, y : Loc<F, N>) -> F { | |
223 | self.apply(&y) | |
224 | } | |
225 | } | |
226 | ||
227 | impl<F : Float, R, C, S, const N : usize> | |
228 | Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>> | |
229 | where R : Constant<Type=F>, | |
230 | C : Constant<Type=F>, | |
231 | S : Constant<Type=F> { | |
232 | ||
233 | #[inline] | |
234 | fn get_r(&self) -> F { | |
235 | let Convolution(ref ind, | |
236 | SupportProductFirst(ref cut, ..)) = self; | |
237 | ind.r.value() + cut.r.value() | |
238 | } | |
239 | } | |
240 | ||
241 | impl<F : Float, R, C, S, const N : usize> Support<F, N> | |
242 | for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>> | |
243 | where R : Constant<Type=F>, | |
244 | C : Constant<Type=F>, | |
245 | S : Constant<Type=F> { | |
246 | #[inline] | |
247 | fn support_hint(&self) -> Cube<F, N> { | |
248 | let r = self.get_r(); | |
249 | array_init(|| [-r, r]).into() | |
250 | } | |
251 | ||
252 | #[inline] | |
253 | fn in_support(&self, y : &Loc<F, N>) -> bool { | |
254 | let r = self.get_r(); | |
255 | y.iter().all(|x| x.abs() <= r) | |
256 | } | |
257 | ||
258 | #[inline] | |
259 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
260 | let r = self.get_r(); | |
261 | // From c1 = -a.min(b + x) and c2 = a.min(b - x) with c_1 < c_2, | |
262 | // solve bounds for x. that is 0 ≤ a.min(b + x) + a.min(b - x). | |
263 | // If b + x ≤ a and b - x ≤ a, the sum is 2b ≥ 0. | |
264 | // If b + x ≥ a and b - x ≥ a, the sum is 2a ≥ 0. | |
265 | // If b + x ≤ a and b - x ≥ a, the sum is b + x + a ⟹ need x ≥ -a - b = -r. | |
266 | // If b + x ≥ a and b - x ≤ a, the sum is a + b - x ⟹ need x ≤ a + b = r. | |
267 | cube.map(|c, d| symmetric_peak_hint(r, c, d)) | |
268 | } | |
269 | } | |
270 | ||
271 | impl<F : Float, R, C, S, const N : usize> GlobalAnalysis<F, Bounds<F>> | |
272 | for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>> | |
273 | where R : Constant<Type=F>, | |
274 | C : Constant<Type=F>, | |
275 | S : Constant<Type=F> { | |
276 | #[inline] | |
277 | fn global_analysis(&self) -> Bounds<F> { | |
278 | Bounds(F::ZERO, self.apply(Loc::ORIGIN)) | |
279 | } | |
280 | } | |
281 | ||
282 | impl<F : Float, R, C, S, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
283 | for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>> | |
284 | where R : Constant<Type=F>, | |
285 | C : Constant<Type=F>, | |
286 | S : Constant<Type=F> { | |
287 | #[inline] | |
288 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
289 | // The function is maximised/minimised where the absolute value is minimised/maximised. | |
290 | let lower = self.apply(cube.maxnorm_point()); | |
291 | let upper = self.apply(cube.minnorm_point()); | |
292 | Bounds(lower, upper) | |
293 | } | |
294 | } | |
295 |