src/kernels/hat_convolution.rs

branch
dev
changeset 61
4f468d35fa29
parent 35
b087e3eab191
equal deleted inserted replaced
60:9738b51d90d7 61:4f468d35fa29
1 //! Implementation of the convolution of two hat functions, 1 //! Implementation of the convolution of two hat functions,
2 //! and its convolution with a [`CubeIndicator`]. 2 //! and its convolution with a [`CubeIndicator`].
3
4 use alg_tools::bisection_tree::{Constant, Support};
5 use alg_tools::bounds::{Bounded, Bounds, GlobalAnalysis, LocalAnalysis};
6 use alg_tools::error::DynResult;
7 use alg_tools::loc::Loc;
8 use alg_tools::mapping::{DifferentiableImpl, Instance, LipschitzDifferentiableImpl, Mapping};
9 use alg_tools::maputil::array_init;
10 use alg_tools::norms::*;
11 use alg_tools::sets::Cube;
12 use alg_tools::types::*;
13 use anyhow::anyhow;
3 use numeric_literals::replace_float_literals; 14 use numeric_literals::replace_float_literals;
4 use serde::Serialize; 15 use serde::Serialize;
5 use alg_tools::types::*; 16
6 use alg_tools::norms::*; 17 use super::ball_indicator::CubeIndicator;
7 use alg_tools::loc::Loc; 18 use super::base::*;
8 use alg_tools::sets::Cube;
9 use alg_tools::bisection_tree::{
10 Support,
11 Constant,
12 Bounds,
13 LocalAnalysis,
14 GlobalAnalysis,
15 Bounded,
16 };
17 use alg_tools::mapping::{
18 Mapping,
19 Instance,
20 DifferentiableImpl,
21 Differential,
22 };
23 use alg_tools::maputil::array_init;
24
25 use crate::types::Lipschitz; 19 use crate::types::Lipschitz;
26 use super::base::*;
27 use super::ball_indicator::CubeIndicator;
28 20
29 /// Hat convolution kernel. 21 /// Hat convolution kernel.
30 /// 22 ///
31 /// This struct represents the function 23 /// This struct represents the function
32 /// $$ 24 /// $$
67 // This is maximised at y=±1/2 with value 2, and minimised at y=0 with value -4. 59 // This is maximised at y=±1/2 with value 2, and minimised at y=0 with value -4.
68 // Now observe that 60 // Now observe that
69 // $$ 61 // $$
70 // [∇f(x\_1, …, x\_n)]_j = \frac{4}{σ} (h\*h)'(x\_j/σ) \prod\_{j ≠ i} \frac{4}{σ} (h\*h)(x\_i/σ) 62 // [∇f(x\_1, …, x\_n)]_j = \frac{4}{σ} (h\*h)'(x\_j/σ) \prod\_{j ≠ i} \frac{4}{σ} (h\*h)(x\_i/σ)
71 // $$ 63 // $$
72 #[derive(Copy,Clone,Debug,Serialize,Eq)] 64 #[derive(Copy, Clone, Debug, Serialize, Eq)]
73 pub struct HatConv<S : Constant, const N : usize> { 65 pub struct HatConv<S: Constant, const N: usize> {
74 /// The parameter $σ$ of the kernel. 66 /// The parameter $σ$ of the kernel.
75 pub radius : S, 67 pub radius: S,
76 } 68 }
77 69
78 impl<S1, S2, const N : usize> PartialEq<HatConv<S2, N>> for HatConv<S1, N> 70 impl<S1, S2, const N: usize> PartialEq<HatConv<S2, N>> for HatConv<S1, N>
79 where S1 : Constant, 71 where
80 S2 : Constant<Type=S1::Type> { 72 S1: Constant,
81 fn eq(&self, other : &HatConv<S2, N>) -> bool { 73 S2: Constant<Type = S1::Type>,
74 {
75 fn eq(&self, other: &HatConv<S2, N>) -> bool {
82 self.radius.value() == other.radius.value() 76 self.radius.value() == other.radius.value()
83 } 77 }
84 } 78 }
85 79
86 impl<'a, S, const N : usize> HatConv<S, N> where S : Constant { 80 impl<'a, S, const N: usize> HatConv<S, N>
81 where
82 S: Constant,
83 {
87 /// Returns the $σ$ parameter of the kernel. 84 /// Returns the $σ$ parameter of the kernel.
88 #[inline] 85 #[inline]
89 pub fn radius(&self) -> S::Type { 86 pub fn radius(&self) -> S::Type {
90 self.radius.value() 87 self.radius.value()
91 } 88 }
92 } 89 }
93 90
94 impl<'a, S, const N : usize> Mapping<Loc<S::Type, N>> for HatConv<S, N> 91 impl<'a, S, const N: usize> Mapping<Loc<N, S::Type>> for HatConv<S, N>
95 where S : Constant { 92 where
93 S: Constant,
94 {
96 type Codomain = S::Type; 95 type Codomain = S::Type;
97 96
98 #[inline] 97 #[inline]
99 fn apply<I : Instance<Loc<S::Type, N>>>(&self, y : I) -> Self::Codomain { 98 fn apply<I: Instance<Loc<N, S::Type>>>(&self, y: I) -> Self::Codomain {
100 let σ = self.radius(); 99 let σ = self.radius();
101 y.cow().product_map(|x| { 100 y.decompose().product_map(|x| self.value_1d_σ1(x / σ) / σ)
102 self.value_1d_σ1(x / σ) / σ
103 })
104 } 101 }
105 } 102 }
106 103
107 #[replace_float_literals(S::Type::cast_from(literal))] 104 #[replace_float_literals(S::Type::cast_from(literal))]
108 impl<S, const N : usize> Lipschitz<L1> for HatConv<S, N> 105 impl<S, const N: usize> Lipschitz<L1> for HatConv<S, N>
109 where S : Constant { 106 where
107 S: Constant,
108 {
110 type FloatType = S::Type; 109 type FloatType = S::Type;
111 #[inline] 110 #[inline]
112 fn lipschitz_factor(&self, L1 : L1) -> Option<Self::FloatType> { 111 fn lipschitz_factor(&self, L1: L1) -> DynResult<Self::FloatType> {
113 // For any ψ_i, we have 112 // For any ψ_i, we have
114 // ∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i) 113 // ∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)
115 // = [ψ_1(x_1)-ψ_1(y_1)] ∏_{i=2}^N ψ_i(x_i) 114 // = [ψ_1(x_1)-ψ_1(y_1)] ∏_{i=2}^N ψ_i(x_i)
116 // + ψ_1(y_1)[ ∏_{i=2}^N ψ_i(x_i) - ∏_{i=2}^N ψ_i(y_i)] 115 // + ψ_1(y_1)[ ∏_{i=2}^N ψ_i(x_i) - ∏_{i=2}^N ψ_i(y_i)]
117 // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i) 116 // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i)
118 // Thus 117 // Thus
119 // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)| 118 // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)|
120 // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_j |ψ_j| 119 // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_j |ψ_j|
121 let σ = self.radius(); 120 let σ = self.radius();
122 let l1d = self.lipschitz_1d_σ1() / (σ*σ); 121 let l1d = self.lipschitz_1d_σ1() / (σ * σ);
123 let m1d = self.value_1d_σ1(0.0) / σ; 122 let m1d = self.value_1d_σ1(0.0) / σ;
124 Some(l1d * m1d.powi(N as i32 - 1)) 123 Ok(l1d * m1d.powi(N as i32 - 1))
125 } 124 }
126 } 125 }
127 126
128 impl<S, const N : usize> Lipschitz<L2> for HatConv<S, N> 127 impl<S, const N: usize> Lipschitz<L2> for HatConv<S, N>
129 where S : Constant { 128 where
129 S: Constant,
130 {
130 type FloatType = S::Type; 131 type FloatType = S::Type;
131 #[inline] 132 #[inline]
132 fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> { 133 fn lipschitz_factor(&self, L2: L2) -> DynResult<Self::FloatType> {
133 self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt()) 134 self.lipschitz_factor(L1)
134 } 135 .map(|l1| l1 * <S::Type>::cast_from(N).sqrt())
135 } 136 }
136 137 }
137 138
138 impl<'a, S, const N : usize> DifferentiableImpl<Loc<S::Type, N>> for HatConv<S, N> 139 impl<'a, S, const N: usize> DifferentiableImpl<Loc<N, S::Type>> for HatConv<S, N>
139 where S : Constant { 140 where
140 type Derivative = Loc<S::Type, N>; 141 S: Constant,
141 142 {
142 #[inline] 143 type Derivative = Loc<N, S::Type>;
143 fn differential_impl<I : Instance<Loc<S::Type, N>>>(&self, y0 : I) -> Self::Derivative { 144
144 let y = y0.cow(); 145 #[inline]
146 fn differential_impl<I: Instance<Loc<N, S::Type>>>(&self, y0: I) -> Self::Derivative {
147 let y = y0.decompose();
145 let σ = self.radius(); 148 let σ = self.radius();
146 let σ2 = σ * σ; 149 let σ2 = σ * σ;
147 let vs = y.map(|x| { 150 let vs = y.map(|x| self.value_1d_σ1(x / σ) / σ);
148 self.value_1d_σ1(x / σ) / σ 151 product_differential(&*y, &vs, |x| self.diff_1d_σ1(x / σ) / σ2)
149 }); 152 }
150 product_differential(&*y, &vs, |x| { 153 }
151 self.diff_1d_σ1(x / σ) / σ2
152 })
153 }
154 }
155
156 154
157 #[replace_float_literals(S::Type::cast_from(literal))] 155 #[replace_float_literals(S::Type::cast_from(literal))]
158 impl<'a, F : Float, S, const N : usize> Lipschitz<L2> 156 impl<'a, F: Float, S, const N: usize> LipschitzDifferentiableImpl<Loc<N, S::Type>, L2>
159 for Differential<'a, Loc<F, N>, HatConv<S, N>> 157 for HatConv<S, N>
160 where S : Constant<Type=F> { 158 where
159 S: Constant<Type = F>,
160 {
161 type FloatType = F; 161 type FloatType = F;
162 162
163 #[inline] 163 #[inline]
164 fn lipschitz_factor(&self, _l2 : L2) -> Option<F> { 164 fn diff_lipschitz_factor(&self, _l2: L2) -> DynResult<F> {
165 let h = self.base_fn(); 165 let σ = self.radius();
166 let σ = h.radius(); 166 product_differential_lipschitz_factor::<F, N>(
167 Some(product_differential_lipschitz_factor::<F, N>( 167 self.value_1d_σ1(0.0) / σ,
168 h.value_1d_σ1(0.0) / σ, 168 self.lipschitz_1d_σ1() / (σ * σ),
169 h.lipschitz_1d_σ1() / (σ*σ), 169 self.maxabsdiff_1d_σ1() / (σ * σ),
170 h.maxabsdiff_1d_σ1() / (σ*σ), 170 self.lipschitz_diff_1d_σ1() / (σ * σ),
171 h.lipschitz_diff_1d_σ1() / (σ*σ), 171 )
172 )) 172 }
173 } 173 }
174 }
175
176 174
177 #[replace_float_literals(S::Type::cast_from(literal))] 175 #[replace_float_literals(S::Type::cast_from(literal))]
178 impl<'a, F : Float, S, const N : usize> HatConv<S, N> 176 impl<'a, F: Float, S, const N: usize> HatConv<S, N>
179 where S : Constant<Type=F> { 177 where
178 S: Constant<Type = F>,
179 {
180 /// Computes the value of the kernel for $n=1$ with $σ=1$. 180 /// Computes the value of the kernel for $n=1$ with $σ=1$.
181 #[inline] 181 #[inline]
182 fn value_1d_σ1(&self, x : F) -> F { 182 fn value_1d_σ1(&self, x: F) -> F {
183 let y = x.abs(); 183 let y = x.abs();
184 if y >= 1.0 { 184 if y >= 1.0 {
185 0.0 185 0.0
186 } else if y > 0.5 { 186 } else if y > 0.5 {
187 - (8.0/3.0) * (y - 1.0).powi(3) 187 -(8.0 / 3.0) * (y - 1.0).powi(3)
188 } else /* 0 ≤ y ≤ 0.5 */ { 188 } else
189 (4.0/3.0) + 8.0 * y * y * (y - 1.0) 189 /* 0 ≤ y ≤ 0.5 */
190 {
191 (4.0 / 3.0) + 8.0 * y * y * (y - 1.0)
190 } 192 }
191 } 193 }
192 194
193 /// Computes the differential of the kernel for $n=1$ with $σ=1$. 195 /// Computes the differential of the kernel for $n=1$ with $σ=1$.
194 #[inline] 196 #[inline]
195 fn diff_1d_σ1(&self, x : F) -> F { 197 fn diff_1d_σ1(&self, x: F) -> F {
196 let y = x.abs(); 198 let y = x.abs();
197 if y >= 1.0 { 199 if y >= 1.0 {
198 0.0 200 0.0
199 } else if y > 0.5 { 201 } else if y > 0.5 {
200 - 8.0 * (y - 1.0).powi(2) 202 -8.0 * (y - 1.0).powi(2)
201 } else /* 0 ≤ y ≤ 0.5 */ { 203 } else
204 /* 0 ≤ y ≤ 0.5 */
205 {
202 (24.0 * y - 16.0) * y 206 (24.0 * y - 16.0) * y
203 } 207 }
204 } 208 }
205 209
206 /// Computes the Lipschitz factor of the kernel for $n=1$ with $σ=1$. 210 /// Computes the Lipschitz factor of the kernel for $n=1$ with $σ=1$.
218 } 222 }
219 223
220 /// Computes the second differential of the kernel for $n=1$ with $σ=1$. 224 /// Computes the second differential of the kernel for $n=1$ with $σ=1$.
221 #[inline] 225 #[inline]
222 #[allow(dead_code)] 226 #[allow(dead_code)]
223 fn diff2_1d_σ1(&self, x : F) -> F { 227 fn diff2_1d_σ1(&self, x: F) -> F {
224 let y = x.abs(); 228 let y = x.abs();
225 if y >= 1.0 { 229 if y >= 1.0 {
226 0.0 230 0.0
227 } else if y > 0.5 { 231 } else if y > 0.5 {
228 - 16.0 * (y - 1.0) 232 -16.0 * (y - 1.0)
229 } else /* 0 ≤ y ≤ 0.5 */ { 233 } else
234 /* 0 ≤ y ≤ 0.5 */
235 {
230 48.0 * y - 16.0 236 48.0 * y - 16.0
231 } 237 }
232 } 238 }
233 239
234 /// Computes the differential of the kernel for $n=1$ with $σ=1$. 240 /// Computes the differential of the kernel for $n=1$ with $σ=1$.
237 // Maximal absolute second differential achieved at 0 by diff2_1d_σ1 analysis 243 // Maximal absolute second differential achieved at 0 by diff2_1d_σ1 analysis
238 16.0 244 16.0
239 } 245 }
240 } 246 }
241 247
242 impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N> 248 impl<'a, S, const N: usize> Support<N, S::Type> for HatConv<S, N>
243 where S : Constant { 249 where
244 #[inline] 250 S: Constant,
245 fn support_hint(&self) -> Cube<S::Type,N> { 251 {
252 #[inline]
253 fn support_hint(&self) -> Cube<N, S::Type> {
246 let σ = self.radius(); 254 let σ = self.radius();
247 array_init(|| [-σ, σ]).into() 255 array_init(|| [-σ, σ]).into()
248 } 256 }
249 257
250 #[inline] 258 #[inline]
251 fn in_support(&self, y : &Loc<S::Type,N>) -> bool { 259 fn in_support(&self, y: &Loc<N, S::Type>) -> bool {
252 let σ = self.radius(); 260 let σ = self.radius();
253 y.iter().all(|x| x.abs() <= σ) 261 y.iter().all(|x| x.abs() <= σ)
254 } 262 }
255 263
256 #[inline] 264 #[inline]
257 fn bisection_hint(&self, cube : &Cube<S::Type, N>) -> [Option<S::Type>; N] { 265 fn bisection_hint(&self, cube: &Cube<N, S::Type>) -> [Option<S::Type>; N] {
258 let σ = self.radius(); 266 let σ = self.radius();
259 cube.map(|c, d| symmetric_peak_hint(σ, c, d)) 267 cube.map(|c, d| symmetric_peak_hint(σ, c, d))
260 } 268 }
261 } 269 }
262 270
263 #[replace_float_literals(S::Type::cast_from(literal))] 271 #[replace_float_literals(S::Type::cast_from(literal))]
264 impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>> for HatConv<S, N> 272 impl<S, const N: usize> GlobalAnalysis<S::Type, Bounds<S::Type>> for HatConv<S, N>
265 where S : Constant { 273 where
274 S: Constant,
275 {
266 #[inline] 276 #[inline]
267 fn global_analysis(&self) -> Bounds<S::Type> { 277 fn global_analysis(&self) -> Bounds<S::Type> {
268 Bounds(0.0, self.apply(Loc::ORIGIN)) 278 Bounds(0.0, self.apply(Loc::ORIGIN))
269 } 279 }
270 } 280 }
271 281
272 impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N> for HatConv<S, N> 282 impl<S, const N: usize> LocalAnalysis<S::Type, Bounds<S::Type>, N> for HatConv<S, N>
273 where S : Constant { 283 where
274 #[inline] 284 S: Constant,
275 fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> { 285 {
286 #[inline]
287 fn local_analysis(&self, cube: &Cube<N, S::Type>) -> Bounds<S::Type> {
276 // The function is maximised/minimised where the 2-norm is minimised/maximised. 288 // The function is maximised/minimised where the 2-norm is minimised/maximised.
277 let lower = self.apply(cube.maxnorm_point()); 289 let lower = self.apply(cube.maxnorm_point());
278 let upper = self.apply(cube.minnorm_point()); 290 let upper = self.apply(cube.minnorm_point());
279 Bounds(lower, upper) 291 Bounds(lower, upper)
280 } 292 }
281 } 293 }
282 294
283 #[replace_float_literals(C::Type::cast_from(literal))] 295 #[replace_float_literals(C::Type::cast_from(literal))]
284 impl<'a, C : Constant, const N : usize> Norm<C::Type, L1> 296 impl<'a, C: Constant, const N: usize> Norm<L1, C::Type> for HatConv<C, N> {
285 for HatConv<C, N> { 297 #[inline]
286 #[inline] 298 fn norm(&self, _: L1) -> C::Type {
287 fn norm(&self, _ : L1) -> C::Type {
288 1.0 299 1.0
289 } 300 }
290 } 301 }
291 302
292 #[replace_float_literals(C::Type::cast_from(literal))] 303 #[replace_float_literals(C::Type::cast_from(literal))]
293 impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity> 304 impl<'a, C: Constant, const N: usize> Norm<Linfinity, C::Type> for HatConv<C, N> {
294 for HatConv<C, N> { 305 #[inline]
295 #[inline] 306 fn norm(&self, _: Linfinity) -> C::Type {
296 fn norm(&self, _ : Linfinity) -> C::Type {
297 self.bounds().upper() 307 self.bounds().upper()
298 } 308 }
299 } 309 }
300 310
301 #[replace_float_literals(F::cast_from(literal))] 311 #[replace_float_literals(F::cast_from(literal))]
302 impl<'a, F : Float, R, C, const N : usize> Mapping<Loc<F, N>> 312 impl<'a, F: Float, R, C, const N: usize> Mapping<Loc<N, F>>
303 for Convolution<CubeIndicator<R, N>, HatConv<C, N>> 313 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
304 where R : Constant<Type=F>, 314 where
305 C : Constant<Type=F> { 315 R: Constant<Type = F>,
306 316 C: Constant<Type = F>,
317 {
307 type Codomain = F; 318 type Codomain = F;
308 319
309 #[inline] 320 #[inline]
310 fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F { 321 fn apply<I: Instance<Loc<N, F>>>(&self, y: I) -> F {
311 let Convolution(ref ind, ref hatconv) = self; 322 let Convolution(ref ind, ref hatconv) = self;
312 let β = ind.r.value(); 323 let β = ind.r.value();
313 let σ = hatconv.radius(); 324 let σ = hatconv.radius();
314 325
315 // This is just a product of one-dimensional versions 326 // This is just a product of one-dimensional versions
316 y.cow().product_map(|x| { 327 y.decompose().product_map(|x| {
317 // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution 328 // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution
318 // we have 329 // we have
319 // $$ 330 // $$
320 // [χ_{-β,β} * u_σ](x) 331 // [χ_{-β,β} * u_σ](x)
321 // = ∫_{x-β}^{x+β} u_σ(z) d z 332 // = ∫_{x-β}^{x+β} u_σ(z) d z
327 }) 338 })
328 } 339 }
329 } 340 }
330 341
331 #[replace_float_literals(F::cast_from(literal))] 342 #[replace_float_literals(F::cast_from(literal))]
332 impl<'a, F : Float, R, C, const N : usize> DifferentiableImpl<Loc<F, N>> 343 impl<'a, F: Float, R, C, const N: usize> DifferentiableImpl<Loc<N, F>>
333 for Convolution<CubeIndicator<R, N>, HatConv<C, N>> 344 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
334 where R : Constant<Type=F>, 345 where
335 C : Constant<Type=F> { 346 R: Constant<Type = F>,
336 347 C: Constant<Type = F>,
337 type Derivative = Loc<F, N>; 348 {
338 349 type Derivative = Loc<N, F>;
339 #[inline] 350
340 fn differential_impl<I : Instance<Loc<F, N>>>(&self, y0 : I) -> Loc<F, N> { 351 #[inline]
341 let y = y0.cow(); 352 fn differential_impl<I: Instance<Loc<N, F>>>(&self, y0: I) -> Loc<N, F> {
353 let y = y0.decompose();
342 let Convolution(ref ind, ref hatconv) = self; 354 let Convolution(ref ind, ref hatconv) = self;
343 let β = ind.r.value(); 355 let β = ind.r.value();
344 let σ = hatconv.radius(); 356 let σ = hatconv.radius();
345 let σ2 = σ * σ; 357 let σ2 = σ * σ;
346 358
347 let vs = y.map(|x| { 359 let vs = y.map(|x| self.value_1d_σ1(x / σ, β / σ));
348 self.value_1d_σ1(x / σ, β / σ) 360 product_differential(&*y, &vs, |x| self.diff_1d_σ1(x / σ, β / σ) / σ2)
349 }); 361 }
350 product_differential(&*y, &vs, |x| { 362 }
351 self.diff_1d_σ1(x / σ, β / σ) / σ2
352 })
353 }
354 }
355
356 363
357 /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$. 364 /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
358 /// If $b > d$, add $g()$ to the result. 365 /// If $b > d$, add $g()$ to the result.
359 #[inline] 366 #[inline]
360 #[replace_float_literals(F::cast_from(literal))] 367 #[replace_float_literals(F::cast_from(literal))]
361 fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F, 368 fn i<F: Float>(a: F, b: F, c: F, d: F, f: impl Fn(F) -> F, g: impl Fn() -> F) -> F {
362 g : impl Fn() -> F) -> F {
363 if b < c { 369 if b < c {
364 0.0 370 0.0
365 } else if b <= d { 371 } else if b <= d {
366 if a <= c { 372 if a <= c {
367 f(b) - f(c) 373 f(b) - f(c)
368 } else { 374 } else {
369 f(b) - f(a) 375 f(b) - f(a)
370 } 376 }
371 } else /* b > d */ { 377 } else
378 /* b > d */
379 {
372 g() + if a <= c { 380 g() + if a <= c {
373 f(d) - f(c) 381 f(d) - f(c)
374 } else if a < d { 382 } else if a < d {
375 f(d) - f(a) 383 f(d) - f(a)
376 } else { 384 } else {
378 } 386 }
379 } 387 }
380 } 388 }
381 389
382 #[replace_float_literals(F::cast_from(literal))] 390 #[replace_float_literals(F::cast_from(literal))]
383 impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>> 391 impl<F: Float, C, R, const N: usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>>
384 where R : Constant<Type=F>, 392 where
385 C : Constant<Type=F> { 393 R: Constant<Type = F>,
386 394 C: Constant<Type = F>,
395 {
387 /// Calculates the value of the 1D hat convolution further convolved by a interval indicator. 396 /// Calculates the value of the 1D hat convolution further convolved by a interval indicator.
388 /// As both functions are piecewise polynomials, this is implemented by explicit integral over 397 /// As both functions are piecewise polynomials, this is implemented by explicit integral over
389 /// all subintervals of polynomiality of the cube indicator, using easily formed 398 /// all subintervals of polynomiality of the cube indicator, using easily formed
390 /// antiderivatives. 399 /// antiderivatives.
391 #[inline] 400 #[inline]
392 pub fn value_1d_σ1(&self, x : F, β : F) -> F { 401 pub fn value_1d_σ1(&self, x: F, β: F) -> F {
393 // The integration interval 402 // The integration interval
394 let a = x - β; 403 let a = x - β;
395 let b = x + β; 404 let b = x + β;
396 405
397 #[inline] 406 #[inline]
398 fn pow4<F : Float>(x : F) -> F { 407 fn pow4<F: Float>(x: F) -> F {
399 let y = x * x; 408 let y = x * x;
400 y * y 409 y * y
401 } 410 }
402 411
403 // Observe the factor 1/6 at the front from the antiderivatives below. 412 // Observe the factor 1/6 at the front from the antiderivatives below.
404 // The factor 4 is from normalisation of the original function. 413 // The factor 4 is from normalisation of the original function.
405 (4.0/6.0) * i(a, b, -1.0, -0.5, 414 (4.0 / 6.0)
415 * i(
416 a,
417 b,
418 -1.0,
419 -0.5,
406 // (2/3) (y+1)^3 on -1 < y ≤ -1/2 420 // (2/3) (y+1)^3 on -1 < y ≤ -1/2
407 // The antiderivative is (2/12)(y+1)^4 = (1/6)(y+1)^4 421 // The antiderivative is (2/12)(y+1)^4 = (1/6)(y+1)^4
408 |y| pow4(y+1.0), 422 |y| pow4(y + 1.0),
409 || i(a, b, -0.5, 0.0, 423 || {
410 // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0 424 i(
411 // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y 425 a,
412 |y| y*(-y*y*(y*3.0 + 4.0) + 2.0), 426 b,
413 || i(a, b, 0.0, 0.5, 427 -0.5,
414 // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2 428 0.0,
415 // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y 429 // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0
416 |y| y*(y*y*(y*3.0 - 4.0) + 2.0), 430 // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y
417 || i(a, b, 0.5, 1.0, 431 |y| y * (-y * y * (y * 3.0 + 4.0) + 2.0),
418 // -(2/3) (y-1)^3 on 1/2 < y ≤ 1 432 || {
419 // The antiderivative is -(2/12)(y-1)^4 = -(1/6)(y-1)^4 433 i(
420 |y| -pow4(y-1.0), 434 a,
421 || 0.0 435 b,
436 0.0,
437 0.5,
438 // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
439 // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y
440 |y| y * (y * y * (y * 3.0 - 4.0) + 2.0),
441 || {
442 i(
443 a,
444 b,
445 0.5,
446 1.0,
447 // -(2/3) (y-1)^3 on 1/2 < y ≤ 1
448 // The antiderivative is -(2/12)(y-1)^4 = -(1/6)(y-1)^4
449 |y| -pow4(y - 1.0),
450 || 0.0,
451 )
452 },
422 ) 453 )
454 },
423 ) 455 )
424 ) 456 },
425 ) 457 )
426 } 458 }
427 459
428 /// Calculates the derivative of the 1D hat convolution further convolved by a interval 460 /// Calculates the derivative of the 1D hat convolution further convolved by a interval
429 /// indicator. The implementation is similar to [`Self::value_1d_σ1`], using the fact that 461 /// indicator. The implementation is similar to [`Self::value_1d_σ1`], using the fact that
430 /// $(θ * ψ)' = θ * ψ'$. 462 /// $(θ * ψ)' = θ * ψ'$.
431 #[inline] 463 #[inline]
432 pub fn diff_1d_σ1(&self, x : F, β : F) -> F { 464 pub fn diff_1d_σ1(&self, x: F, β: F) -> F {
433 // The integration interval 465 // The integration interval
434 let a = x - β; 466 let a = x - β;
435 let b = x + β; 467 let b = x + β;
436 468
437 // The factor 4 is from normalisation of the original function. 469 // The factor 4 is from normalisation of the original function.
438 4.0 * i(a, b, -1.0, -0.5, 470 4.0 * i(
439 // (2/3) (y+1)^3 on -1 < y ≤ -1/2 471 a,
440 |y| (2.0/3.0) * (y + 1.0).powi(3), 472 b,
441 || i(a, b, -0.5, 0.0, 473 -1.0,
474 -0.5,
475 // (2/3) (y+1)^3 on -1 < y ≤ -1/2
476 |y| (2.0 / 3.0) * (y + 1.0).powi(3),
477 || {
478 i(
479 a,
480 b,
481 -0.5,
482 0.0,
442 // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0 483 // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0
443 |y| -2.0*(y + 1.0) * y * y + (1.0/3.0), 484 |y| -2.0 * (y + 1.0) * y * y + (1.0 / 3.0),
444 || i(a, b, 0.0, 0.5, 485 || {
486 i(
487 a,
488 b,
489 0.0,
490 0.5,
445 // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2 491 // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
446 |y| 2.0*(y - 1.0) * y * y + (1.0/3.0), 492 |y| 2.0 * (y - 1.0) * y * y + (1.0 / 3.0),
447 || i(a, b, 0.5, 1.0, 493 || {
448 // -(2/3) (y-1)^3 on 1/2 < y ≤ 1 494 i(
449 |y| -(2.0/3.0) * (y - 1.0).powi(3), 495 a,
450 || 0.0 496 b,
451 ) 497 0.5,
452 ) 498 1.0,
499 // -(2/3) (y-1)^3 on 1/2 < y ≤ 1
500 |y| -(2.0 / 3.0) * (y - 1.0).powi(3),
501 || 0.0,
502 )
503 },
504 )
505 },
453 ) 506 )
507 },
454 ) 508 )
455 } 509 }
456 } 510 }
457 511
458 /* 512 /*
459 impl<'a, F : Float, R, C, const N : usize> Lipschitz<L2> 513 impl<'a, F : Float, R, C, const N : usize> Lipschitz<L2>
460 for Differential<Loc<F, N>, Convolution<CubeIndicator<R, N>, HatConv<C, N>>> 514 for Differential<Loc<N, F>, Convolution<CubeIndicator<R, N>, HatConv<C, N>>>
461 where R : Constant<Type=F>, 515 where R : Constant<Type=F>,
462 C : Constant<Type=F> { 516 C : Constant<Type=F> {
463 517
464 type FloatType = F; 518 type FloatType = F;
465 519
469 None 523 None
470 } 524 }
471 } 525 }
472 */ 526 */
473 527
474 impl<F : Float, R, C, const N : usize> 528 impl<F: Float, R, C, const N: usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>>
475 Convolution<CubeIndicator<R, N>, HatConv<C, N>> 529 where
476 where R : Constant<Type=F>, 530 R: Constant<Type = F>,
477 C : Constant<Type=F> { 531 C: Constant<Type = F>,
478 532 {
479 #[inline] 533 #[inline]
480 fn get_r(&self) -> F { 534 fn get_r(&self) -> F {
481 let Convolution(ref ind, ref hatconv) = self; 535 let Convolution(ref ind, ref hatconv) = self;
482 ind.r.value() + hatconv.radius() 536 ind.r.value() + hatconv.radius()
483 } 537 }
484 } 538 }
485 539
486 impl<F : Float, R, C, const N : usize> Support<F, N> 540 impl<F: Float, R, C, const N: usize> Support<N, F>
487 for Convolution<CubeIndicator<R, N>, HatConv<C, N>> 541 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
488 where R : Constant<Type=F>, 542 where
489 C : Constant<Type=F> { 543 R: Constant<Type = F>,
490 544 C: Constant<Type = F>,
491 #[inline] 545 {
492 fn support_hint(&self) -> Cube<F, N> { 546 #[inline]
547 fn support_hint(&self) -> Cube<N, F> {
493 let r = self.get_r(); 548 let r = self.get_r();
494 array_init(|| [-r, r]).into() 549 array_init(|| [-r, r]).into()
495 } 550 }
496 551
497 #[inline] 552 #[inline]
498 fn in_support(&self, y : &Loc<F, N>) -> bool { 553 fn in_support(&self, y: &Loc<N, F>) -> bool {
499 let r = self.get_r(); 554 let r = self.get_r();
500 y.iter().all(|x| x.abs() <= r) 555 y.iter().all(|x| x.abs() <= r)
501 } 556 }
502 557
503 #[inline] 558 #[inline]
504 fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { 559 fn bisection_hint(&self, cube: &Cube<N, F>) -> [Option<F>; N] {
505 // It is not difficult to verify that [`HatConv`] is C^2. 560 // It is not difficult to verify that [`HatConv`] is C^2.
506 // Therefore, so is [`Convolution<CubeIndicator<R, N>, HatConv<C, N>>`] so that a finer 561 // Therefore, so is [`Convolution<CubeIndicator<R, N>, HatConv<C, N>>`] so that a finer
507 // subdivision for the hint than this is not particularly useful. 562 // subdivision for the hint than this is not particularly useful.
508 let r = self.get_r(); 563 let r = self.get_r();
509 cube.map(|c, d| symmetric_peak_hint(r, c, d)) 564 cube.map(|c, d| symmetric_peak_hint(r, c, d))
510 } 565 }
511 } 566 }
512 567
513 impl<F : Float, R, C, const N : usize> GlobalAnalysis<F, Bounds<F>> 568 impl<F: Float, R, C, const N: usize> GlobalAnalysis<F, Bounds<F>>
514 for Convolution<CubeIndicator<R, N>, HatConv<C, N>> 569 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
515 where R : Constant<Type=F>, 570 where
516 C : Constant<Type=F> { 571 R: Constant<Type = F>,
572 C: Constant<Type = F>,
573 {
517 #[inline] 574 #[inline]
518 fn global_analysis(&self) -> Bounds<F> { 575 fn global_analysis(&self) -> Bounds<F> {
519 Bounds(F::ZERO, self.apply(Loc::ORIGIN)) 576 Bounds(F::ZERO, self.apply(Loc::ORIGIN))
520 } 577 }
521 } 578 }
522 579
523 impl<F : Float, R, C, const N : usize> LocalAnalysis<F, Bounds<F>, N> 580 impl<F: Float, R, C, const N: usize> LocalAnalysis<F, Bounds<F>, N>
524 for Convolution<CubeIndicator<R, N>, HatConv<C, N>> 581 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
525 where R : Constant<Type=F>, 582 where
526 C : Constant<Type=F> { 583 R: Constant<Type = F>,
527 #[inline] 584 C: Constant<Type = F>,
528 fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { 585 {
586 #[inline]
587 fn local_analysis(&self, cube: &Cube<N, F>) -> Bounds<F> {
529 // The function is maximised/minimised where the absolute value is minimised/maximised. 588 // The function is maximised/minimised where the absolute value is minimised/maximised.
530 let lower = self.apply(cube.maxnorm_point()); 589 let lower = self.apply(cube.maxnorm_point());
531 let upper = self.apply(cube.minnorm_point()); 590 let upper = self.apply(cube.minnorm_point());
532 //assert!(upper >= lower); 591 //assert!(upper >= lower);
533 if upper < lower { 592 if upper < lower {
540 Bounds(lower, upper) 599 Bounds(lower, upper)
541 } 600 }
542 } 601 }
543 } 602 }
544 603
545
546 /// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and 604 /// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and
547 /// $ψ = χ_{[-a,a]^N}$ for some $a>0$. 605 /// $ψ = χ_{[-a,a]^N}$ for some $a>0$.
548 /// 606 ///
549 /// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$, 607 /// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$,
550 /// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat 608 /// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat
551 /// convolution. 609 /// convolution.
552 #[replace_float_literals(F::cast_from(literal))] 610 #[replace_float_literals(F::cast_from(literal))]
553 impl<F, C, S, const N : usize> 611 impl<F, C, S, const N: usize>
554 BoundedBy<F, SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>> 612 BoundedBy<F, SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>>
555 for AutoConvolution<HatConv<C, N>> 613 for AutoConvolution<HatConv<C, N>>
556 where F : Float, 614 where
557 C : Constant<Type=F>, 615 F: Float,
558 S : Constant<Type=F> { 616 C: Constant<Type = F>,
559 617 S: Constant<Type = F>,
618 {
560 fn bounding_factor( 619 fn bounding_factor(
561 &self, 620 &self,
562 kernel : &SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>> 621 kernel: &SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>,
563 ) -> Option<F> { 622 ) -> DynResult<F> {
564 // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with 623 // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with
565 // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and 624 // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and
566 // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed 625 // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed
567 // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$. 626 // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$.
568 let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel; 627 let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel;
572 631
573 // Check that the cutting indicator of the comparison 632 // Check that the cutting indicator of the comparison
574 // `SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>` 633 // `SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>`
575 // is wide enough, and that the hat convolution has the same radius as ours. 634 // is wide enough, and that the hat convolution has the same radius as ours.
576 if σ <= a && hatconv2 == &self.0 { 635 if σ <= a && hatconv2 == &self.0 {
577 Some(bounding_1d.powi(N as i32)) 636 Ok(bounding_1d.powi(N as i32))
578 } else { 637 } else {
579 // We cannot compare 638 // We cannot compare
580 None 639 Err(anyhow!("Incomparable factors"))
581 } 640 }
582 } 641 }
583 } 642 }
584 643
585 /// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution. 644 /// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution.
586 /// 645 ///
587 /// This is based on Example 3.3 in the manuscript. 646 /// This is based on Example 3.3 in the manuscript.
588 #[replace_float_literals(F::cast_from(literal))] 647 #[replace_float_literals(F::cast_from(literal))]
589 impl<F, C, const N : usize> 648 impl<F, C, const N: usize> BoundedBy<F, HatConv<C, N>> for AutoConvolution<HatConv<C, N>>
590 BoundedBy<F, HatConv<C, N>> 649 where
591 for AutoConvolution<HatConv<C, N>> 650 F: Float,
592 where F : Float, 651 C: Constant<Type = F>,
593 C : Constant<Type=F> { 652 {
594
595 /// Returns an estimate of the factor $L_1$. 653 /// Returns an estimate of the factor $L_1$.
596 /// 654 ///
597 /// Returns `None` if `kernel` does not have the same width as hat convolution that `self` 655 /// Returns `None` if `kernel` does not have the same width as hat convolution that `self`
598 /// is based on. 656 /// is based on.
599 fn bounding_factor( 657 fn bounding_factor(&self, kernel: &HatConv<C, N>) -> DynResult<F> {
600 &self,
601 kernel : &HatConv<C, N>
602 ) -> Option<F> {
603 if kernel == &self.0 { 658 if kernel == &self.0 {
604 Some(1.0) 659 Ok(1.0)
605 } else { 660 } else {
606 // We cannot compare 661 // We cannot compare
607 None 662 Err(anyhow!("Incomparable kernels"))
608 } 663 }
609 } 664 }
610 } 665 }
611 666
612 #[cfg(test)] 667 #[cfg(test)]
613 mod tests { 668 mod tests {
669 use super::HatConv;
670 use crate::kernels::{BallIndicator, Convolution, CubeIndicator};
614 use alg_tools::lingrid::linspace; 671 use alg_tools::lingrid::linspace;
672 use alg_tools::loc::Loc;
615 use alg_tools::mapping::Mapping; 673 use alg_tools::mapping::Mapping;
616 use alg_tools::norms::Linfinity; 674 use alg_tools::norms::Linfinity;
617 use alg_tools::loc::Loc;
618 use crate::kernels::{BallIndicator, CubeIndicator, Convolution};
619 use super::HatConv;
620 675
621 /// Tests numerically that [`HatConv<f64, 1>`] is monotone. 676 /// Tests numerically that [`HatConv<f64, 1>`] is monotone.
622 #[test] 677 #[test]
623 fn hatconv_monotonicity() { 678 fn hatconv_monotonicity() {
624 let grid = linspace(0.0, 1.0, 100000); 679 let grid = linspace(0.0, 1.0, 100000);
625 let hatconv : HatConv<f64, 1> = HatConv{ radius : 1.0 }; 680 let hatconv: HatConv<f64, 1> = HatConv { radius: 1.0 };
626 let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); 681 let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
627 let first = vals.next().unwrap(); 682 let first = vals.next().unwrap();
628 let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; 683 let monotone = vals
684 .fold((first, true), |(prev, ok), t| (prev, ok && prev >= t))
685 .1;
629 assert!(monotone); 686 assert!(monotone);
630 } 687 }
631 688
632 /// Tests numerically that [`Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>`] is monotone. 689 /// Tests numerically that [`Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>`] is monotone.
633 #[test] 690 #[test]
634 fn convolution_cubeind_hatconv_monotonicity() { 691 fn convolution_cubeind_hatconv_monotonicity() {
635 let grid = linspace(-2.0, 0.0, 100000); 692 let grid = linspace(-2.0, 0.0, 100000);
636 let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> 693 let hatconv: Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> =
637 = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, 694 Convolution(BallIndicator { r: 0.5, exponent: Linfinity }, HatConv {
638 HatConv{ radius : 1.0 } ); 695 radius: 1.0,
696 });
639 let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); 697 let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
640 let first = vals.next().unwrap(); 698 let first = vals.next().unwrap();
641 let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev <= t)).1; 699 let monotone = vals
700 .fold((first, true), |(prev, ok), t| (prev, ok && prev <= t))
701 .1;
642 assert!(monotone); 702 assert!(monotone);
643 703
644 let grid = linspace(0.0, 2.0, 100000); 704 let grid = linspace(0.0, 2.0, 100000);
645 let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> 705 let hatconv: Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> =
646 = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, 706 Convolution(BallIndicator { r: 0.5, exponent: Linfinity }, HatConv {
647 HatConv{ radius : 1.0 } ); 707 radius: 1.0,
708 });
648 let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); 709 let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
649 let first = vals.next().unwrap(); 710 let first = vals.next().unwrap();
650 let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; 711 let monotone = vals
712 .fold((first, true), |(prev, ok), t| (prev, ok && prev >= t))
713 .1;
651 assert!(monotone); 714 assert!(monotone);
652 } 715 }
653 } 716 }

mercurial