Thu, 29 Aug 2024 00:00:00 -0500
Radon FB + sliding improvements
| 0 | 1 | //! Implementation of the convolution of two hat functions, |
| 2 | //! and its convolution with a [`CubeIndicator`]. | |
| 3 | use numeric_literals::replace_float_literals; | |
| 4 | use serde::Serialize; | |
| 5 | use alg_tools::types::*; | |
| 6 | use alg_tools::norms::*; | |
| 7 | use alg_tools::loc::Loc; | |
| 8 | use alg_tools::sets::Cube; | |
| 9 | use alg_tools::bisection_tree::{ | |
| 10 | Support, | |
| 11 | Constant, | |
| 12 | Bounds, | |
| 13 | LocalAnalysis, | |
| 14 | GlobalAnalysis, | |
| 15 | Bounded, | |
| 16 | }; | |
| 32 | 17 | use alg_tools::mapping::{Apply, Differentiable}; |
| 0 | 18 | use alg_tools::maputil::array_init; |
| 19 | ||
| 32 | 20 | use crate::types::Lipschitz; |
| 0 | 21 | use super::base::*; |
| 22 | use super::ball_indicator::CubeIndicator; | |
| 23 | ||
| 24 | /// Hat convolution kernel. | |
| 25 | /// | |
| 26 | /// This struct represents the function | |
| 27 | /// $$ | |
| 28 | /// f(x\_1, …, x\_n) = \prod\_{i=1}^n \frac{4}{σ} (h\*h)(x\_i/σ) | |
| 29 | /// $$ | |
| 30 | /// where the “hat function” $h(y)= \max(0, 1 - |2y|)$. | |
| 31 | /// The factor $4/σ$ normalises $∫ f d x = 1$. | |
| 32 | /// We have | |
| 33 | /// $$ | |
| 34 | /// (h*h)(y) = | |
| 35 | /// \begin{cases} | |
| 36 | /// \frac{2}{3} (y+1)^3 & -1<y\leq -\frac{1}{2}, \\\\ | |
| 37 | /// -2 y^3-2 y^2+\frac{1}{3} & -\frac{1}{2}<y\leq 0, \\\\ | |
| 38 | /// 2 y^3-2 y^2+\frac{1}{3} & 0<y<\frac{1}{2}, \\\\ | |
| 39 | /// -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\ | |
| 40 | /// \end{cases} | |
| 41 | /// $$ | |
| 42 | #[derive(Copy,Clone,Debug,Serialize,Eq)] | |
| 43 | pub struct HatConv<S : Constant, const N : usize> { | |
| 44 | /// The parameter $σ$ of the kernel. | |
| 45 | pub radius : S, | |
| 46 | } | |
| 47 | ||
| 48 | impl<S1, S2, const N : usize> PartialEq<HatConv<S2, N>> for HatConv<S1, N> | |
| 49 | where S1 : Constant, | |
| 50 | S2 : Constant<Type=S1::Type> { | |
| 51 | fn eq(&self, other : &HatConv<S2, N>) -> bool { | |
| 52 | self.radius.value() == other.radius.value() | |
| 53 | } | |
| 54 | } | |
| 55 | ||
| 56 | impl<'a, S, const N : usize> HatConv<S, N> where S : Constant { | |
| 57 | /// Returns the $σ$ parameter of the kernel. | |
| 58 | #[inline] | |
| 59 | pub fn radius(&self) -> S::Type { | |
| 60 | self.radius.value() | |
| 61 | } | |
| 62 | } | |
| 63 | ||
| 64 | impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for HatConv<S, N> | |
| 65 | where S : Constant { | |
| 66 | type Output = S::Type; | |
| 67 | #[inline] | |
| 68 | fn apply(&self, y : &'a Loc<S::Type, N>) -> Self::Output { | |
| 69 | let σ = self.radius(); | |
| 70 | y.product_map(|x| { | |
| 71 | self.value_1d_σ1(x / σ) / σ | |
| 72 | }) | |
| 73 | } | |
| 74 | } | |
| 75 | ||
| 76 | impl<'a, S, const N : usize> Apply<Loc<S::Type, N>> for HatConv<S, N> | |
| 77 | where S : Constant { | |
| 78 | type Output = S::Type; | |
| 79 | #[inline] | |
| 80 | fn apply(&self, y : Loc<S::Type, N>) -> Self::Output { | |
| 81 | self.apply(&y) | |
| 82 | } | |
| 83 | } | |
| 84 | ||
| 32 | 85 | #[replace_float_literals(S::Type::cast_from(literal))] |
| 86 | impl<S, const N : usize> Lipschitz<L1> for HatConv<S, N> | |
| 87 | where S : Constant { | |
| 88 | type FloatType = S::Type; | |
| 89 | #[inline] | |
| 90 | fn lipschitz_factor(&self, L1 : L1) -> Option<Self::FloatType> { | |
| 91 | // For any ψ_i, we have | |
| 92 | // ∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i) | |
| 93 | // = [ψ_1(x_1)-ψ_1(y_1)] ∏_{i=2}^N ψ_i(x_i) | |
| 94 | // + ψ_1(y_1)[ ∏_{i=2}^N ψ_i(x_i) - ∏_{i=2}^N ψ_i(y_i)] | |
| 95 | // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i) | |
| 96 | // Thus | |
| 97 | // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)| | |
| 98 | // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_i |ψ_i| | |
| 99 | let σ = self.radius(); | |
|
34
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
100 | let l1d = self.lipschitz_1d_σ1() / (σ*σ); |
|
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
101 | let m1d = self.value_1d_σ1(0.0) / σ; |
|
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
102 | Some(l1d * m1d.powi(N as i32 - 1)) |
| 32 | 103 | } |
| 104 | } | |
| 105 | ||
| 106 | impl<S, const N : usize> Lipschitz<L2> for HatConv<S, N> | |
| 107 | where S : Constant { | |
| 108 | type FloatType = S::Type; | |
| 109 | #[inline] | |
| 110 | fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> { | |
| 111 | self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt()) | |
| 112 | } | |
| 113 | } | |
| 114 | ||
| 115 | ||
| 116 | impl<'a, S, const N : usize> Differentiable<&'a Loc<S::Type, N>> for HatConv<S, N> | |
| 117 | where S : Constant { | |
| 118 | type Output = Loc<S::Type, N>; | |
| 119 | #[inline] | |
| 120 | fn differential(&self, y : &'a Loc<S::Type, N>) -> Self::Output { | |
| 121 | let σ = self.radius(); | |
| 122 | let σ2 = σ * σ; | |
| 123 | let vs = y.map(|x| { | |
| 124 | self.value_1d_σ1(x / σ) / σ | |
| 125 | }); | |
| 126 | product_differential(y, &vs, |x| { | |
| 127 | self.diff_1d_σ1(x / σ) / σ2 | |
| 128 | }) | |
| 129 | } | |
| 130 | } | |
| 131 | ||
| 132 | impl<'a, S, const N : usize> Differentiable<Loc<S::Type, N>> for HatConv<S, N> | |
| 133 | where S : Constant { | |
| 134 | type Output = Loc<S::Type, N>; | |
| 135 | #[inline] | |
| 136 | fn differential(&self, y : Loc<S::Type, N>) -> Self::Output { | |
| 137 | self.differential(&y) | |
| 138 | } | |
| 139 | } | |
| 0 | 140 | |
| 141 | #[replace_float_literals(S::Type::cast_from(literal))] | |
| 142 | impl<'a, F : Float, S, const N : usize> HatConv<S, N> | |
| 143 | where S : Constant<Type=F> { | |
| 144 | /// Computes the value of the kernel for $n=1$ with $σ=1$. | |
| 145 | #[inline] | |
| 146 | fn value_1d_σ1(&self, x : F) -> F { | |
| 147 | let y = x.abs(); | |
| 148 | if y >= 1.0 { | |
| 149 | 0.0 | |
| 150 | } else if y > 0.5 { | |
| 151 | - (8.0/3.0) * (y - 1.0).powi(3) | |
| 152 | } else /* 0 ≤ y ≤ 0.5 */ { | |
| 153 | (4.0/3.0) + 8.0 * y * y * (y - 1.0) | |
| 154 | } | |
| 155 | } | |
| 32 | 156 | |
| 157 | /// Computes the differential of the kernel for $n=1$ with $σ=1$. | |
| 158 | #[inline] | |
| 159 | fn diff_1d_σ1(&self, x : F) -> F { | |
| 160 | let y = x.abs(); | |
| 161 | if y >= 1.0 { | |
| 162 | 0.0 | |
| 163 | } else if y > 0.5 { | |
| 164 | - 8.0 * (y - 1.0).powi(2) | |
| 165 | } else /* 0 ≤ y ≤ 0.5 */ { | |
| 166 | (24.0 * y - 16.0) * y | |
| 167 | } | |
| 168 | } | |
| 169 | ||
| 170 | /// Computes the Lipschitz factor of the kernel for $n=1$ with $σ=1$. | |
| 171 | #[inline] | |
| 172 | fn lipschitz_1d_σ1(&self) -> F { | |
| 173 | // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis | |
| 174 | 2.0 | |
| 175 | } | |
| 0 | 176 | } |
| 177 | ||
| 178 | impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N> | |
| 179 | where S : Constant { | |
| 180 | #[inline] | |
| 181 | fn support_hint(&self) -> Cube<S::Type,N> { | |
| 182 | let σ = self.radius(); | |
| 183 | array_init(|| [-σ, σ]).into() | |
| 184 | } | |
| 185 | ||
| 186 | #[inline] | |
| 187 | fn in_support(&self, y : &Loc<S::Type,N>) -> bool { | |
| 188 | let σ = self.radius(); | |
| 189 | y.iter().all(|x| x.abs() <= σ) | |
| 190 | } | |
| 191 | ||
| 192 | #[inline] | |
| 193 | fn bisection_hint(&self, cube : &Cube<S::Type, N>) -> [Option<S::Type>; N] { | |
| 194 | let σ = self.radius(); | |
| 195 | cube.map(|c, d| symmetric_peak_hint(σ, c, d)) | |
| 196 | } | |
| 197 | } | |
| 198 | ||
| 199 | #[replace_float_literals(S::Type::cast_from(literal))] | |
| 200 | impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>> for HatConv<S, N> | |
| 201 | where S : Constant { | |
| 202 | #[inline] | |
| 203 | fn global_analysis(&self) -> Bounds<S::Type> { | |
| 204 | Bounds(0.0, self.apply(Loc::ORIGIN)) | |
| 205 | } | |
| 206 | } | |
| 207 | ||
| 208 | impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N> for HatConv<S, N> | |
| 209 | where S : Constant { | |
| 210 | #[inline] | |
| 211 | fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> { | |
| 212 | // The function is maximised/minimised where the 2-norm is minimised/maximised. | |
| 213 | let lower = self.apply(cube.maxnorm_point()); | |
| 214 | let upper = self.apply(cube.minnorm_point()); | |
| 215 | Bounds(lower, upper) | |
| 216 | } | |
| 217 | } | |
| 218 | ||
| 219 | #[replace_float_literals(C::Type::cast_from(literal))] | |
| 220 | impl<'a, C : Constant, const N : usize> Norm<C::Type, L1> | |
| 221 | for HatConv<C, N> { | |
| 222 | #[inline] | |
| 223 | fn norm(&self, _ : L1) -> C::Type { | |
| 224 | 1.0 | |
| 225 | } | |
| 226 | } | |
| 227 | ||
| 228 | #[replace_float_literals(C::Type::cast_from(literal))] | |
| 229 | impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity> | |
| 230 | for HatConv<C, N> { | |
| 231 | #[inline] | |
| 232 | fn norm(&self, _ : Linfinity) -> C::Type { | |
| 233 | self.bounds().upper() | |
| 234 | } | |
| 235 | } | |
| 236 | ||
| 237 | #[replace_float_literals(F::cast_from(literal))] | |
| 238 | impl<'a, F : Float, R, C, const N : usize> Apply<&'a Loc<F, N>> | |
| 239 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 240 | where R : Constant<Type=F>, | |
| 241 | C : Constant<Type=F> { | |
| 242 | ||
| 243 | type Output = F; | |
| 244 | ||
| 245 | #[inline] | |
| 246 | fn apply(&self, y : &'a Loc<F, N>) -> F { | |
| 247 | let Convolution(ref ind, ref hatconv) = self; | |
| 248 | let β = ind.r.value(); | |
| 249 | let σ = hatconv.radius(); | |
| 250 | ||
| 251 | // This is just a product of one-dimensional versions | |
| 252 | y.product_map(|x| { | |
| 253 | // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution | |
| 254 | // we have | |
| 255 | // $$ | |
| 256 | // [χ_{-β,β} * u_σ](x) | |
| 257 | // = ∫_{x-β}^{x+β} u_σ(z) d z | |
| 258 | // = (1/σ)∫_{x-β}^{x+β} u_1(z/σ) d z | |
| 259 | // = ∫_{(x-β)/σ}^{(x+β)/σ} u_1(z) d z | |
| 260 | // = [χ_{-β/σ, β/σ} * u_1](x/σ) | |
| 261 | // $$ | |
| 262 | self.value_1d_σ1(x / σ, β / σ) | |
| 263 | }) | |
| 264 | } | |
| 265 | } | |
| 266 | ||
| 267 | impl<'a, F : Float, R, C, const N : usize> Apply<Loc<F, N>> | |
| 268 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 269 | where R : Constant<Type=F>, | |
| 270 | C : Constant<Type=F> { | |
| 271 | ||
| 272 | type Output = F; | |
| 273 | ||
| 274 | #[inline] | |
| 275 | fn apply(&self, y : Loc<F, N>) -> F { | |
| 276 | self.apply(&y) | |
| 277 | } | |
| 278 | } | |
| 279 | ||
| 32 | 280 | #[replace_float_literals(F::cast_from(literal))] |
| 281 | impl<'a, F : Float, R, C, const N : usize> Differentiable<&'a Loc<F, N>> | |
| 282 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 283 | where R : Constant<Type=F>, | |
| 284 | C : Constant<Type=F> { | |
| 285 | ||
| 286 | type Output = Loc<F, N>; | |
| 287 | ||
| 288 | #[inline] | |
| 289 | fn differential(&self, y : &'a Loc<F, N>) -> Loc<F, N> { | |
| 290 | let Convolution(ref ind, ref hatconv) = self; | |
| 291 | let β = ind.r.value(); | |
| 292 | let σ = hatconv.radius(); | |
| 293 | let σ2 = σ * σ; | |
| 294 | ||
| 295 | let vs = y.map(|x| { | |
| 296 | self.value_1d_σ1(x / σ, β / σ) | |
| 297 | }); | |
| 298 | product_differential(y, &vs, |x| { | |
| 299 | self.diff_1d_σ1(x / σ, β / σ) / σ2 | |
| 300 | }) | |
| 301 | } | |
| 302 | } | |
| 303 | ||
| 304 | impl<'a, F : Float, R, C, const N : usize> Differentiable<Loc<F, N>> | |
| 305 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 306 | where R : Constant<Type=F>, | |
| 307 | C : Constant<Type=F> { | |
| 308 | ||
| 309 | type Output = Loc<F, N>; | |
| 310 | ||
| 311 | #[inline] | |
| 312 | fn differential(&self, y : Loc<F, N>) -> Loc<F, N> { | |
| 313 | self.differential(&y) | |
| 314 | } | |
| 315 | } | |
| 316 | ||
| 317 | /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$. | |
| 318 | /// If $b > d$, add $g()$ to the result. | |
| 319 | #[inline] | |
| 320 | #[replace_float_literals(F::cast_from(literal))] | |
| 321 | fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F, | |
| 322 | g : impl Fn() -> F) -> F { | |
| 323 | if b < c { | |
| 324 | 0.0 | |
| 325 | } else if b <= d { | |
| 326 | if a <= c { | |
| 327 | f(b) - f(c) | |
| 328 | } else { | |
| 329 | f(b) - f(a) | |
| 330 | } | |
| 331 | } else /* b > d */ { | |
| 332 | g() + if a <= c { | |
| 333 | f(d) - f(c) | |
| 334 | } else if a < d { | |
| 335 | f(d) - f(a) | |
| 336 | } else { | |
| 337 | 0.0 | |
| 338 | } | |
| 339 | } | |
| 340 | } | |
| 0 | 341 | |
| 342 | #[replace_float_literals(F::cast_from(literal))] | |
| 343 | impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 344 | where R : Constant<Type=F>, | |
| 345 | C : Constant<Type=F> { | |
| 32 | 346 | |
| 347 | /// Calculates the value of the 1D hat convolution further convolved by a interval indicator. | |
| 348 | /// As both functions are piecewise polynomials, this is implemented by explicit integral over | |
| 349 | /// all subintervals of polynomiality of the cube indicator, using easily formed | |
| 350 | /// antiderivatives. | |
| 0 | 351 | #[inline] |
| 352 | pub fn value_1d_σ1(&self, x : F, β : F) -> F { | |
| 353 | // The integration interval | |
| 354 | let a = x - β; | |
| 355 | let b = x + β; | |
| 356 | ||
| 357 | #[inline] | |
| 358 | fn pow4<F : Float>(x : F) -> F { | |
| 359 | let y = x * x; | |
| 360 | y * y | |
| 361 | } | |
| 362 | ||
| 363 | // Observe the factor 1/6 at the front from the antiderivatives below. | |
| 364 | // The factor 4 is from normalisation of the original function. | |
| 365 | (4.0/6.0) * i(a, b, -1.0, -0.5, | |
| 32 | 366 | // (2/3) (y+1)^3 on -1 < y ≤ -1/2 |
| 0 | 367 | // The antiderivative is (2/12)(y+1)^4 = (1/6)(y+1)^4 |
| 368 | |y| pow4(y+1.0), | |
| 369 | || i(a, b, -0.5, 0.0, | |
| 370 | // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0 | |
| 371 | // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y | |
| 372 | |y| y*(-y*y*(y*3.0 + 4.0) + 2.0), | |
| 373 | || i(a, b, 0.0, 0.5, | |
| 374 | // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2 | |
| 375 | // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y | |
| 376 | |y| y*(y*y*(y*3.0 - 4.0) + 2.0), | |
| 377 | || i(a, b, 0.5, 1.0, | |
| 378 | // -(2/3) (y-1)^3 on 1/2 < y ≤ 1 | |
| 379 | // The antiderivative is -(2/12)(y-1)^4 = -(1/6)(y-1)^4 | |
| 380 | |y| -pow4(y-1.0), | |
| 381 | || 0.0 | |
| 382 | ) | |
| 383 | ) | |
| 384 | ) | |
| 385 | ) | |
| 386 | } | |
| 32 | 387 | |
| 388 | /// Calculates the derivative of the 1D hat convolution further convolved by a interval | |
| 389 | /// indicator. The implementation is similar to [`Self::value_1d_σ1`], using the fact that | |
| 390 | /// $(θ * ψ)' = θ * ψ'$. | |
| 391 | #[inline] | |
| 392 | pub fn diff_1d_σ1(&self, x : F, β : F) -> F { | |
| 393 | // The integration interval | |
| 394 | let a = x - β; | |
| 395 | let b = x + β; | |
| 396 | ||
| 397 | // The factor 4 is from normalisation of the original function. | |
| 398 | 4.0 * i(a, b, -1.0, -0.5, | |
| 399 | // (2/3) (y+1)^3 on -1 < y ≤ -1/2 | |
| 400 | |y| (2.0/3.0) * (y + 1.0).powi(3), | |
| 401 | || i(a, b, -0.5, 0.0, | |
| 402 | // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0 | |
|
34
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
403 | |y| -2.0*(y + 1.0) * y * y + (1.0/3.0), |
| 32 | 404 | || i(a, b, 0.0, 0.5, |
| 405 | // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2 | |
| 406 | |y| 2.0*(y - 1.0) * y * y + (1.0/3.0), | |
| 407 | || i(a, b, 0.5, 1.0, | |
| 408 | // -(2/3) (y-1)^3 on 1/2 < y ≤ 1 | |
| 409 | |y| -(2.0/3.0) * (y - 1.0).powi(3), | |
| 410 | || 0.0 | |
| 411 | ) | |
| 412 | ) | |
| 413 | ) | |
| 414 | ) | |
| 415 | } | |
| 0 | 416 | } |
| 417 | ||
| 418 | impl<F : Float, R, C, const N : usize> | |
| 419 | Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 420 | where R : Constant<Type=F>, | |
| 421 | C : Constant<Type=F> { | |
| 422 | ||
| 423 | #[inline] | |
| 424 | fn get_r(&self) -> F { | |
| 425 | let Convolution(ref ind, ref hatconv) = self; | |
| 426 | ind.r.value() + hatconv.radius() | |
| 427 | } | |
| 428 | } | |
| 429 | ||
| 430 | impl<F : Float, R, C, const N : usize> Support<F, N> | |
| 431 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 432 | where R : Constant<Type=F>, | |
| 433 | C : Constant<Type=F> { | |
| 434 | ||
| 435 | #[inline] | |
| 436 | fn support_hint(&self) -> Cube<F, N> { | |
| 437 | let r = self.get_r(); | |
| 438 | array_init(|| [-r, r]).into() | |
| 439 | } | |
| 440 | ||
| 441 | #[inline] | |
| 442 | fn in_support(&self, y : &Loc<F, N>) -> bool { | |
| 443 | let r = self.get_r(); | |
| 444 | y.iter().all(|x| x.abs() <= r) | |
| 445 | } | |
| 446 | ||
| 447 | #[inline] | |
| 448 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
| 449 | // It is not difficult to verify that [`HatConv`] is C^2. | |
| 450 | // Therefore, so is [`Convolution<CubeIndicator<R, N>, HatConv<C, N>>`] so that a finer | |
| 451 | // subdivision for the hint than this is not particularly useful. | |
| 452 | let r = self.get_r(); | |
| 453 | cube.map(|c, d| symmetric_peak_hint(r, c, d)) | |
| 454 | } | |
| 455 | } | |
| 456 | ||
| 457 | impl<F : Float, R, C, const N : usize> GlobalAnalysis<F, Bounds<F>> | |
| 458 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 459 | where R : Constant<Type=F>, | |
| 460 | C : Constant<Type=F> { | |
| 461 | #[inline] | |
| 462 | fn global_analysis(&self) -> Bounds<F> { | |
| 463 | Bounds(F::ZERO, self.apply(Loc::ORIGIN)) | |
| 464 | } | |
| 465 | } | |
| 466 | ||
| 467 | impl<F : Float, R, C, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
| 468 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 469 | where R : Constant<Type=F>, | |
| 470 | C : Constant<Type=F> { | |
| 471 | #[inline] | |
| 472 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
| 473 | // The function is maximised/minimised where the absolute value is minimised/maximised. | |
| 474 | let lower = self.apply(cube.maxnorm_point()); | |
| 475 | let upper = self.apply(cube.minnorm_point()); | |
| 476 | //assert!(upper >= lower); | |
| 477 | if upper < lower { | |
| 478 | let Convolution(ref ind, ref hatconv) = self; | |
| 479 | let β = ind.r.value(); | |
| 480 | let σ = hatconv.radius(); | |
| 481 | eprintln!("WARNING: Hat convolution {β} {σ} upper bound {upper} < lower bound {lower} on {cube:?} with min-norm point {:?} and max-norm point {:?}", cube.minnorm_point(), cube.maxnorm_point()); | |
| 482 | Bounds(upper, lower) | |
| 483 | } else { | |
| 484 | Bounds(lower, upper) | |
| 485 | } | |
| 486 | } | |
| 487 | } | |
| 488 | ||
| 489 | ||
| 490 | /// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and | |
| 491 | /// $ψ = χ_{[-a,a]^N}$ for some $a>0$. | |
| 492 | /// | |
| 493 | /// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$, | |
| 494 | /// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat | |
| 495 | /// convolution. | |
| 496 | #[replace_float_literals(F::cast_from(literal))] | |
| 497 | impl<F, C, S, const N : usize> | |
| 498 | BoundedBy<F, SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>> | |
| 499 | for AutoConvolution<HatConv<C, N>> | |
| 500 | where F : Float, | |
| 501 | C : Constant<Type=F>, | |
| 502 | S : Constant<Type=F> { | |
| 503 | ||
| 504 | fn bounding_factor( | |
| 505 | &self, | |
| 506 | kernel : &SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>> | |
| 507 | ) -> Option<F> { | |
| 508 | // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with | |
| 509 | // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and | |
| 510 | // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed | |
| 511 | // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$. | |
| 512 | let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel; | |
| 513 | let σ = self.0.radius(); | |
| 514 | let a = ind.r.value(); | |
| 515 | let bounding_1d = 4.0 / (3.0 * σ); | |
| 516 | ||
| 517 | // Check that the cutting indicator of the comparison | |
| 518 | // `SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>` | |
| 519 | // is wide enough, and that the hat convolution has the same radius as ours. | |
| 520 | if σ <= a && hatconv2 == &self.0 { | |
| 521 | Some(bounding_1d.powi(N as i32)) | |
| 522 | } else { | |
| 523 | // We cannot compare | |
| 524 | None | |
| 525 | } | |
| 526 | } | |
| 527 | } | |
| 528 | ||
| 529 | /// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution. | |
| 530 | /// | |
| 531 | /// This is based on Example 3.3 in the manuscript. | |
| 532 | #[replace_float_literals(F::cast_from(literal))] | |
| 533 | impl<F, C, const N : usize> | |
| 534 | BoundedBy<F, HatConv<C, N>> | |
| 535 | for AutoConvolution<HatConv<C, N>> | |
| 536 | where F : Float, | |
| 537 | C : Constant<Type=F> { | |
| 538 | ||
| 539 | /// Returns an estimate of the factor $L_1$. | |
| 540 | /// | |
| 541 | /// Returns `None` if `kernel` does not have the same width as hat convolution that `self` | |
| 542 | /// is based on. | |
| 543 | fn bounding_factor( | |
| 544 | &self, | |
| 545 | kernel : &HatConv<C, N> | |
| 546 | ) -> Option<F> { | |
| 547 | if kernel == &self.0 { | |
| 548 | Some(1.0) | |
| 549 | } else { | |
| 550 | // We cannot compare | |
| 551 | None | |
| 552 | } | |
| 553 | } | |
| 554 | } | |
| 555 | ||
| 556 | #[cfg(test)] | |
| 557 | mod tests { | |
| 558 | use alg_tools::lingrid::linspace; | |
| 559 | use alg_tools::mapping::Apply; | |
| 560 | use alg_tools::norms::Linfinity; | |
| 561 | use alg_tools::loc::Loc; | |
| 562 | use crate::kernels::{BallIndicator, CubeIndicator, Convolution}; | |
| 563 | use super::HatConv; | |
| 564 | ||
| 565 | /// Tests numerically that [`HatConv<f64, 1>`] is monotone. | |
| 566 | #[test] | |
| 567 | fn hatconv_monotonicity() { | |
| 568 | let grid = linspace(0.0, 1.0, 100000); | |
| 569 | let hatconv : HatConv<f64, 1> = HatConv{ radius : 1.0 }; | |
| 570 | let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); | |
| 571 | let first = vals.next().unwrap(); | |
| 572 | let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; | |
| 573 | assert!(monotone); | |
| 574 | } | |
| 575 | ||
| 576 | /// Tests numerically that [`Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>`] is monotone. | |
| 577 | #[test] | |
| 578 | fn convolution_cubeind_hatconv_monotonicity() { | |
| 579 | let grid = linspace(-2.0, 0.0, 100000); | |
| 580 | let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> | |
| 581 | = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, | |
| 582 | HatConv{ radius : 1.0 } ); | |
| 583 | let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); | |
| 584 | let first = vals.next().unwrap(); | |
| 585 | let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev <= t)).1; | |
| 586 | assert!(monotone); | |
| 587 | ||
| 588 | let grid = linspace(0.0, 2.0, 100000); | |
| 589 | let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> | |
| 590 | = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, | |
| 591 | HatConv{ radius : 1.0 } ); | |
| 592 | let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); | |
| 593 | let first = vals.next().unwrap(); | |
| 594 | let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; | |
| 595 | assert!(monotone); | |
| 596 | } | |
| 597 | } |