src/kernels/base.rs

changeset 52
f0e8704d3f0e
parent 35
b087e3eab191
child 38
0f59c0d02e13
equal deleted inserted replaced
31:6105b5cd8d89 52:f0e8704d3f0e
12 Bounds, 12 Bounds,
13 LocalAnalysis, 13 LocalAnalysis,
14 GlobalAnalysis, 14 GlobalAnalysis,
15 Bounded, 15 Bounded,
16 }; 16 };
17 use alg_tools::mapping::Apply; 17 use alg_tools::mapping::{
18 use alg_tools::maputil::{array_init, map2}; 18 Mapping,
19 DifferentiableImpl,
20 DifferentiableMapping,
21 Differential,
22 };
23 use alg_tools::instance::{Instance, Space};
24 use alg_tools::maputil::{array_init, map2, map1_indexed};
19 use alg_tools::sets::SetOrd; 25 use alg_tools::sets::SetOrd;
20 26
21 use crate::fourier::Fourier; 27 use crate::fourier::Fourier;
28 use crate::types::*;
22 29
23 /// Representation of the product of two kernels. 30 /// Representation of the product of two kernels.
24 /// 31 ///
25 /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. 32 /// The kernels typically implement [`Support`] and [`Mapping`].
26 /// 33 ///
27 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! 34 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
28 #[derive(Copy,Clone,Serialize,Debug)] 35 #[derive(Copy,Clone,Serialize,Debug)]
29 pub struct SupportProductFirst<A, B>( 36 pub struct SupportProductFirst<A, B>(
30 /// First kernel 37 /// First kernel
31 pub A, 38 pub A,
32 /// Second kernel 39 /// Second kernel
33 pub B 40 pub B
34 ); 41 );
35 42
36 impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>> 43 impl<A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
37 for SupportProductFirst<A, B> 44 for SupportProductFirst<A, B>
38 where A : for<'a> Apply<&'a Loc<F, N>, Output=F>, 45 where
39 B : for<'a> Apply<&'a Loc<F, N>, Output=F> { 46 A : Mapping<Loc<F, N>, Codomain = F>,
40 type Output = F; 47 B : Mapping<Loc<F, N>, Codomain = F>,
41 #[inline] 48 {
42 fn apply(&self, x : Loc<F, N>) -> Self::Output { 49 type Codomain = F;
43 self.0.apply(&x) * self.1.apply(&x) 50
44 } 51 #[inline]
45 } 52 fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
46 53 self.0.apply(x.ref_instance()) * self.1.apply(x)
47 impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>> 54 }
55 }
56
57 impl<A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
48 for SupportProductFirst<A, B> 58 for SupportProductFirst<A, B>
49 where A : Apply<&'a Loc<F, N>, Output=F>, 59 where
50 B : Apply<&'a Loc<F, N>, Output=F> { 60 A : DifferentiableMapping<
51 type Output = F; 61 Loc<F, N>,
52 #[inline] 62 DerivativeDomain=Loc<F, N>,
53 fn apply(&self, x : &'a Loc<F, N>) -> Self::Output { 63 Codomain = F
54 self.0.apply(x) * self.1.apply(x) 64 >,
55 } 65 B : DifferentiableMapping<
56 } 66 Loc<F, N>,
67 DerivativeDomain=Loc<F, N>,
68 Codomain = F,
69 >
70 {
71 type Derivative = Loc<F, N>;
72
73 #[inline]
74 fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
75 let xr = x.ref_instance();
76 self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(x)
77 }
78 }
79
80 impl<A, B, M : Copy, F : Float> Lipschitz<M>
81 for SupportProductFirst<A, B>
82 where A : Lipschitz<M, FloatType = F> + Bounded<F>,
83 B : Lipschitz<M, FloatType = F> + Bounded<F> {
84 type FloatType = F;
85 #[inline]
86 fn lipschitz_factor(&self, m : M) -> Option<F> {
87 // f(x)g(x) - f(y)g(y) = f(x)[g(x)-g(y)] - [f(y)-f(x)]g(y)
88 let &SupportProductFirst(ref f, ref g) = self;
89 f.lipschitz_factor(m).map(|l| l * g.bounds().uniform())
90 .zip(g.lipschitz_factor(m).map(|l| l * f.bounds().uniform()))
91 .map(|(a, b)| a + b)
92 }
93 }
94
95 impl<'a, A, B, M : Copy, Domain, F : Float> Lipschitz<M>
96 for Differential<'a, Domain, SupportProductFirst<A, B>>
97 where
98 Domain : Space,
99 A : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
100 B : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
101 SupportProductFirst<A, B> : DifferentiableMapping<Domain>,
102 for<'b> A::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>,
103 for<'b> B::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>
104 {
105 type FloatType = F;
106 #[inline]
107 fn lipschitz_factor(&self, m : M) -> Option<F> {
108 // ∇[gf] = f∇g + g∇f
109 // ⟹ ∇[gf](x) - ∇[gf](y) = f(x)∇g(x) + g(x)∇f(x) - f(y)∇g(y) + g(y)∇f(y)
110 // = f(x)[∇g(x)-∇g(y)] + g(x)∇f(x) - [f(y)-f(x)]∇g(y) + g(y)∇f(y)
111 // = f(x)[∇g(x)-∇g(y)] + g(x)[∇f(x)-∇f(y)]
112 // - [f(y)-f(x)]∇g(y) + [g(y)-g(x)]∇f(y)
113 let &SupportProductFirst(ref f, ref g) = self.base_fn();
114 let (df, dg) = (f.diff_ref(), g.diff_ref());
115 [
116 df.lipschitz_factor(m).map(|l| l * g.bounds().uniform()),
117 dg.lipschitz_factor(m).map(|l| l * f.bounds().uniform()),
118 f.lipschitz_factor(m).map(|l| l * dg.norm_bound(L2)),
119 g.lipschitz_factor(m).map(|l| l * df.norm_bound(L2))
120 ].into_iter().sum()
121 }
122 }
123
57 124
58 impl<'a, A, B, F : Float, const N : usize> Support<F, N> 125 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
59 for SupportProductFirst<A, B> 126 for SupportProductFirst<A, B>
60 where A : Support<F, N>, 127 where
61 B : Support<F, N> { 128 A : Support<F, N>,
129 B : Support<F, N>
130 {
62 #[inline] 131 #[inline]
63 fn support_hint(&self) -> Cube<F, N> { 132 fn support_hint(&self) -> Cube<F, N> {
64 self.0.support_hint() 133 self.0.support_hint()
65 } 134 }
66 135
95 } 164 }
96 } 165 }
97 166
98 /// Representation of the sum of two kernels 167 /// Representation of the sum of two kernels
99 /// 168 ///
100 /// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. 169 /// The kernels typically implement [`Support`] and [`Mapping`].
101 /// 170 ///
102 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter! 171 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
103 #[derive(Copy,Clone,Serialize,Debug)] 172 #[derive(Copy,Clone,Serialize,Debug)]
104 pub struct SupportSum<A, B>( 173 pub struct SupportSum<A, B>(
105 /// First kernel 174 /// First kernel
106 pub A, 175 pub A,
107 /// Second kernel 176 /// Second kernel
108 pub B 177 pub B
109 ); 178 );
110 179
111 impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>> 180 impl<'a, A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
112 for SupportSum<A, B> 181 for SupportSum<A, B>
113 where A : Apply<&'a Loc<F, N>, Output=F>, 182 where
114 B : Apply<&'a Loc<F, N>, Output=F> { 183 A : Mapping<Loc<F, N>, Codomain = F>,
115 type Output = F; 184 B : Mapping<Loc<F, N>, Codomain = F>,
116 #[inline] 185 {
117 fn apply(&self, x : &'a Loc<F, N>) -> Self::Output { 186 type Codomain = F;
118 self.0.apply(x) + self.1.apply(x) 187
119 } 188 #[inline]
120 } 189 fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
121 190 self.0.apply(x.ref_instance()) + self.1.apply(x)
122 impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>> 191 }
192 }
193
194 impl<'a, A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
123 for SupportSum<A, B> 195 for SupportSum<A, B>
124 where A : for<'a> Apply<&'a Loc<F, N>, Output=F>, 196 where
125 B : for<'a> Apply<&'a Loc<F, N>, Output=F> { 197 A : DifferentiableMapping<
126 type Output = F; 198 Loc<F, N>,
127 #[inline] 199 DerivativeDomain = Loc<F, N>
128 fn apply(&self, x : Loc<F, N>) -> Self::Output { 200 >,
129 self.0.apply(&x) + self.1.apply(&x) 201 B : DifferentiableMapping<
130 } 202 Loc<F, N>,
131 } 203 DerivativeDomain = Loc<F, N>,
204 >
205 {
206
207 type Derivative = Loc<F, N>;
208
209 #[inline]
210 fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
211 self.0.differential(x.ref_instance()) + self.1.differential(x)
212 }
213 }
214
132 215
133 impl<'a, A, B, F : Float, const N : usize> Support<F, N> 216 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
134 for SupportSum<A, B> 217 for SupportSum<A, B>
135 where A : Support<F, N>, 218 where A : Support<F, N>,
136 B : Support<F, N>, 219 B : Support<F, N>,
137 Cube<F, N> : SetOrd { 220 Cube<F, N> : SetOrd {
221
138 #[inline] 222 #[inline]
139 fn support_hint(&self) -> Cube<F, N> { 223 fn support_hint(&self) -> Cube<F, N> {
140 self.0.support_hint().common(&self.1.support_hint()) 224 self.0.support_hint().common(&self.1.support_hint())
141 } 225 }
142 226
172 fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { 256 fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
173 self.0.local_analysis(cube) + self.1.local_analysis(cube) 257 self.0.local_analysis(cube) + self.1.local_analysis(cube)
174 } 258 }
175 } 259 }
176 260
261 impl<F : Float, M : Copy, A, B> Lipschitz<M> for SupportSum<A, B>
262 where A : Lipschitz<M, FloatType = F>,
263 B : Lipschitz<M, FloatType = F> {
264 type FloatType = F;
265
266 fn lipschitz_factor(&self, m : M) -> Option<F> {
267 match (self.0.lipschitz_factor(m), self.1.lipschitz_factor(m)) {
268 (Some(l0), Some(l1)) => Some(l0 + l1),
269 _ => None
270 }
271 }
272 }
273
274 impl<'b, F : Float, M : Copy, A, B, Domain> Lipschitz<M>
275 for Differential<'b, Domain, SupportSum<A, B>>
276 where
277 Domain : Space,
278 A : Clone + DifferentiableMapping<Domain, Codomain=F>,
279 B : Clone + DifferentiableMapping<Domain, Codomain=F>,
280 SupportSum<A, B> : DifferentiableMapping<Domain, Codomain=F>,
281 for<'a> A :: Differential<'a> : Lipschitz<M, FloatType = F>,
282 for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
283 {
284 type FloatType = F;
285
286 fn lipschitz_factor(&self, m : M) -> Option<F> {
287 let base = self.base_fn();
288 base.0.diff_ref().lipschitz_factor(m)
289 .zip(base.1.diff_ref().lipschitz_factor(m))
290 .map(|(a, b)| a + b)
291 }
292 }
293
177 /// Representation of the convolution of two kernels. 294 /// Representation of the convolution of two kernels.
178 /// 295 ///
179 /// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping]. 296 /// The kernels typically implement [`Support`]s and [`Mapping`].
180 // 297 //
181 /// Trait implementations have to be on a case-by-case basis. 298 /// Trait implementations have to be on a case-by-case basis.
182 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] 299 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
183 pub struct Convolution<A, B>( 300 pub struct Convolution<A, B>(
184 /// First kernel 301 /// First kernel
185 pub A, 302 pub A,
186 /// Second kernel 303 /// Second kernel
187 pub B 304 pub B
188 ); 305 );
189 306
307 impl<F : Float, M, A, B> Lipschitz<M> for Convolution<A, B>
308 where A : Norm<F, L1> ,
309 B : Lipschitz<M, FloatType = F> {
310 type FloatType = F;
311
312 fn lipschitz_factor(&self, m : M) -> Option<F> {
313 // For [f * g](x) = ∫ f(x-y)g(y) dy we have
314 // [f * g](x) - [f * g](z) = ∫ [f(x-y)-f(z-y)]g(y) dy.
315 // Hence |[f * g](x) - [f * g](z)| ≤ ∫ |f(x-y)-f(z-y)|g(y)| dy.
316 // ≤ L|x-z| ∫ |g(y)| dy,
317 // where L is the Lipschitz factor of f.
318 self.1.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
319 }
320 }
321
322 impl<'b, F : Float, M, A, B, Domain> Lipschitz<M>
323 for Differential<'b, Domain, Convolution<A, B>>
324 where
325 Domain : Space,
326 A : Clone + Norm<F, L1> ,
327 Convolution<A, B> : DifferentiableMapping<Domain, Codomain=F>,
328 B : Clone + DifferentiableMapping<Domain, Codomain=F>,
329 for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
330 {
331 type FloatType = F;
332
333 fn lipschitz_factor(&self, m : M) -> Option<F> {
334 // For [f * g](x) = ∫ f(x-y)g(y) dy we have
335 // ∇[f * g](x) - ∇[f * g](z) = ∫ [∇f(x-y)-∇f(z-y)]g(y) dy.
336 // Hence |∇[f * g](x) - ∇[f * g](z)| ≤ ∫ |∇f(x-y)-∇f(z-y)|g(y)| dy.
337 // ≤ L|x-z| ∫ |g(y)| dy,
338 // where L is the Lipschitz factor of ∇f.
339 let base = self.base_fn();
340 base.1.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
341 }
342 }
343
190 /// Representation of the autoconvolution of a kernel. 344 /// Representation of the autoconvolution of a kernel.
191 /// 345 ///
192 /// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]. 346 /// The kernel typically implements [`Support`] and [`Mapping`].
193 /// 347 ///
194 /// Trait implementations have to be on a case-by-case basis. 348 /// Trait implementations have to be on a case-by-case basis.
195 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] 349 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
196 pub struct AutoConvolution<A>( 350 pub struct AutoConvolution<A>(
197 /// The kernel to be autoconvolved 351 /// The kernel to be autoconvolved
198 pub A 352 pub A
199 ); 353 );
200 354
355 impl<F : Float, M, C> Lipschitz<M> for AutoConvolution<C>
356 where C : Lipschitz<M, FloatType = F> + Norm<F, L1> {
357 type FloatType = F;
358
359 fn lipschitz_factor(&self, m : M) -> Option<F> {
360 self.0.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
361 }
362 }
363
364 impl<'b, F : Float, M, C, Domain> Lipschitz<M>
365 for Differential<'b, Domain, AutoConvolution<C>>
366 where
367 Domain : Space,
368 C : Clone + Norm<F, L1> + DifferentiableMapping<Domain, Codomain=F>,
369 AutoConvolution<C> : DifferentiableMapping<Domain, Codomain=F>,
370 for<'a> C :: Differential<'a> : Lipschitz<M, FloatType = F>
371 {
372 type FloatType = F;
373
374 fn lipschitz_factor(&self, m : M) -> Option<F> {
375 let base = self.base_fn();
376 base.0.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
377 }
378 }
379
380
201 /// Representation a multi-dimensional product of a one-dimensional kernel. 381 /// Representation a multi-dimensional product of a one-dimensional kernel.
202 /// 382 ///
203 /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$. 383 /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
204 /// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping] 384 /// The kernel $G$ typically implements [`Support`] and [`Mapping`]
205 /// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`]. 385 /// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`].
206 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)] 386 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
387 #[allow(dead_code)]
207 struct UniformProduct<G, const N : usize>( 388 struct UniformProduct<G, const N : usize>(
208 /// The one-dimensional kernel 389 /// The one-dimensional kernel
209 G 390 G
210 ); 391 );
211 392
212 impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>> 393 impl<'a, G, F : Float, const N : usize> Mapping<Loc<F, N>>
213 for UniformProduct<G, N> 394 for UniformProduct<G, N>
214 where G : Apply<Loc<F, 1>, Output=F> { 395 where
215 type Output = F; 396 G : Mapping<Loc<F, 1>, Codomain = F>
216 #[inline] 397 {
217 fn apply(&self, x : &'a Loc<F, N>) -> F { 398 type Codomain = F;
218 x.iter().map(|&y| self.0.apply(Loc([y]))).product() 399
219 } 400 #[inline]
220 } 401 fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> F {
221 402 x.cow().iter().map(|&y| self.0.apply(Loc([y]))).product()
222 impl<G, F : Float, const N : usize> Apply<Loc<F, N>> 403 }
404 }
405
406
407
408 impl<'a, G, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
223 for UniformProduct<G, N> 409 for UniformProduct<G, N>
224 where G : Apply<Loc<F, 1>, Output=F> { 410 where
225 type Output = F; 411 G : DifferentiableMapping<
226 #[inline] 412 Loc<F, 1>,
227 fn apply(&self, x : Loc<F, N>) -> F { 413 DerivativeDomain = F,
228 x.into_iter().map(|y| self.0.apply(Loc([y]))).product() 414 Codomain = F,
415 >
416 {
417 type Derivative = Loc<F, N>;
418
419 #[inline]
420 fn differential_impl<I : Instance<Loc<F, N>>>(&self, x0 : I) -> Loc<F, N> {
421 x0.eval(|x| {
422 let vs = x.map(|y| self.0.apply(Loc([y])));
423 product_differential(x, &vs, |y| self.0.differential(Loc([y])))
424 })
425 }
426 }
427
428 /// Helper function to calulate the differential of $f(x)=∏_{i=1}^N g(x_i)$.
429 ///
430 /// The vector `x` is the location, `vs` consists of the values `g(x_i)`, and
431 /// `gd` calculates the derivative `g'`.
432 #[inline]
433 pub(crate) fn product_differential<F : Float, G : Fn(F) -> F, const N : usize>(
434 x : &Loc<F, N>,
435 vs : &Loc<F, N>,
436 gd : G
437 ) -> Loc<F, N> {
438 map1_indexed(x, |i, &y| {
439 gd(y) * vs.iter()
440 .zip(0..)
441 .filter_map(|(v, j)| (j != i).then_some(*v))
442 .product()
443 }).into()
444 }
445
446 /// Helper function to calulate the Lipschitz factor of $∇f$ for $f(x)=∏_{i=1}^N g(x_i)$.
447 ///
448 /// The parameter `bound` is a bound on $|g|_∞$, `lip` is a Lipschitz factor for $g$,
449 /// `dbound` is a bound on $|∇g|_∞$, and `dlip` a Lipschitz factor for $∇g$.
450 #[inline]
451 pub(crate) fn product_differential_lipschitz_factor<F : Float, const N : usize>(
452 bound : F,
453 lip : F,
454 dbound : F,
455 dlip : F
456 ) -> F {
457 // For arbitrary ψ(x) = ∏_{i=1}^n ψ_i(x_i), we have
458 // ψ(x) - ψ(y) = ∑_i [ψ_i(x_i)-ψ_i(y_i)] ∏_{j ≠ i} ψ_j(x_j)
459 // by a simple recursive argument. In particular, if ψ_i=g for all i, j, we have
460 // |ψ(x) - ψ(y)| ≤ ∑_i L_g M_g^{n-1}|x-y|, where L_g is the Lipschitz factor of g, and
461 // M_g a bound on it.
462 //
463 // We also have in the general case ∇ψ(x) = ∑_i ∇ψ_i(x_i) ∏_{j ≠ i} ψ_j(x_j), whence
464 // using the previous formula for each i with f_i=∇ψ_i and f_j=ψ_j for j ≠ i, we get
465 // ∇ψ(x) - ∇ψ(y) = ∑_i[ ∇ψ_i(x_i)∏_{j ≠ i} ψ_j(x_j) - ∇ψ_i(y_i)∏_{j ≠ i} ψ_j(y_j)]
466 // = ∑_i[ [∇ψ_i(x_i) - ∇ψ_j(x_j)] ∏_{j ≠ i}ψ_j(x_j)
467 // + [∑_{k ≠ i} [ψ_k(x_k) - ∇ψ_k(x_k)] ∏_{j ≠ i, k}ψ_j(x_j)]∇ψ_i(x_i)].
468 // With $ψ_i=g for all i, j, it follows that
469 // |∇ψ(x) - ∇ψ(y)| ≤ ∑_i L_{∇g} M_g^{n-1} + ∑_{k ≠ i} L_g M_g^{n-2} M_{∇g}
470 // = n [L_{∇g} M_g^{n-1} + (n-1) L_g M_g^{n-2} M_{∇g}].
471 // = n M_g^{n-2}[L_{∇g} M_g + (n-1) L_g M_{∇g}].
472 if N >= 2 {
473 F::cast_from(N) * bound.powi((N-2) as i32)
474 * (dlip * bound + F::cast_from(N-1) * lip * dbound)
475 } else if N==1 {
476 dlip
477 } else {
478 panic!("Invalid dimension")
229 } 479 }
230 } 480 }
231 481
232 impl<G, F : Float, const N : usize> Support<F, N> 482 impl<G, F : Float, const N : usize> Support<F, N>
233 for UniformProduct<G, N> 483 for UniformProduct<G, N>

mercurial