Sun, 27 Apr 2025 15:03:20 -0500
Start work on 3.0.0
| 0 | 1 | //! Implementation of the convolution of two hat functions, |
| 2 | //! and its convolution with a [`CubeIndicator`]. | |
| 3 | use numeric_literals::replace_float_literals; | |
| 4 | use serde::Serialize; | |
| 5 | use alg_tools::types::*; | |
| 6 | use alg_tools::norms::*; | |
| 7 | use alg_tools::loc::Loc; | |
| 8 | use alg_tools::sets::Cube; | |
| 9 | use alg_tools::bisection_tree::{ | |
| 10 | Support, | |
| 11 | Constant, | |
| 12 | Bounds, | |
| 13 | LocalAnalysis, | |
| 14 | GlobalAnalysis, | |
| 15 | Bounded, | |
| 16 | }; | |
| 35 | 17 | use alg_tools::mapping::{ |
| 18 | Mapping, | |
| 19 | Instance, | |
| 20 | DifferentiableImpl, | |
| 21 | Differential, | |
| 22 | }; | |
| 0 | 23 | use alg_tools::maputil::array_init; |
| 24 | ||
| 32 | 25 | use crate::types::Lipschitz; |
| 0 | 26 | use super::base::*; |
| 27 | use super::ball_indicator::CubeIndicator; | |
| 28 | ||
| 29 | /// Hat convolution kernel. | |
| 30 | /// | |
| 31 | /// This struct represents the function | |
| 32 | /// $$ | |
| 33 | /// f(x\_1, …, x\_n) = \prod\_{i=1}^n \frac{4}{σ} (h\*h)(x\_i/σ) | |
| 34 | /// $$ | |
| 35 | /// where the “hat function” $h(y)= \max(0, 1 - |2y|)$. | |
| 36 | /// The factor $4/σ$ normalises $∫ f d x = 1$. | |
| 37 | /// We have | |
| 38 | /// $$ | |
| 39 | /// (h*h)(y) = | |
| 40 | /// \begin{cases} | |
| 41 | /// \frac{2}{3} (y+1)^3 & -1<y\leq -\frac{1}{2}, \\\\ | |
| 42 | /// -2 y^3-2 y^2+\frac{1}{3} & -\frac{1}{2}<y\leq 0, \\\\ | |
| 43 | /// 2 y^3-2 y^2+\frac{1}{3} & 0<y<\frac{1}{2}, \\\\ | |
| 44 | /// -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\ | |
| 45 | /// \end{cases} | |
| 46 | /// $$ | |
| 35 | 47 | // Hence |
| 48 | // $$ | |
| 49 | // (h\*h)'(y) = | |
| 50 | // \begin{cases} | |
| 51 | // 2 (y+1)^2 & -1<y\leq -\frac{1}{2}, \\\\ | |
| 52 | // -6 y^2-4 y & -\frac{1}{2}<y\leq 0, \\\\ | |
| 53 | // 6 y^2-4 y & 0<y<\frac{1}{2}, \\\\ | |
| 54 | // -2 (y-1)^2 & \frac{1}{2}\leq y<1. \\\\ | |
| 55 | // \end{cases} | |
| 56 | // $$ | |
| 57 | // as well as | |
| 58 | // $$ | |
| 59 | // (h\*h)''(y) = | |
| 60 | // \begin{cases} | |
| 61 | // 4 (y+1) & -1<y\leq -\frac{1}{2}, \\\\ | |
| 62 | // -12 y-4 & -\frac{1}{2}<y\leq 0, \\\\ | |
| 63 | // 12 y-4 & 0<y<\frac{1}{2}, \\\\ | |
| 64 | // -4 (y-1) & \frac{1}{2}\leq y<1. \\\\ | |
| 65 | // \end{cases} | |
| 66 | // $$ | |
| 67 | // This is maximised at y=±1/2 with value 2, and minimised at y=0 with value -4. | |
| 68 | // Now observe that | |
| 69 | // $$ | |
| 70 | // [∇f(x\_1, …, x\_n)]_j = \frac{4}{σ} (h\*h)'(x\_j/σ) \prod\_{j ≠ i} \frac{4}{σ} (h\*h)(x\_i/σ) | |
| 71 | // $$ | |
| 0 | 72 | #[derive(Copy,Clone,Debug,Serialize,Eq)] |
| 73 | pub struct HatConv<S : Constant, const N : usize> { | |
| 74 | /// The parameter $σ$ of the kernel. | |
| 75 | pub radius : S, | |
| 76 | } | |
| 77 | ||
| 78 | impl<S1, S2, const N : usize> PartialEq<HatConv<S2, N>> for HatConv<S1, N> | |
| 79 | where S1 : Constant, | |
| 80 | S2 : Constant<Type=S1::Type> { | |
| 81 | fn eq(&self, other : &HatConv<S2, N>) -> bool { | |
| 82 | self.radius.value() == other.radius.value() | |
| 83 | } | |
| 84 | } | |
| 85 | ||
| 86 | impl<'a, S, const N : usize> HatConv<S, N> where S : Constant { | |
| 87 | /// Returns the $σ$ parameter of the kernel. | |
| 88 | #[inline] | |
| 89 | pub fn radius(&self) -> S::Type { | |
| 90 | self.radius.value() | |
| 91 | } | |
| 92 | } | |
| 93 | ||
| 35 | 94 | impl<'a, S, const N : usize> Mapping<Loc<S::Type, N>> for HatConv<S, N> |
| 0 | 95 | where S : Constant { |
| 35 | 96 | type Codomain = S::Type; |
| 97 | ||
| 0 | 98 | #[inline] |
| 35 | 99 | fn apply<I : Instance<Loc<S::Type, N>>>(&self, y : I) -> Self::Codomain { |
| 0 | 100 | let σ = self.radius(); |
| 35 | 101 | y.cow().product_map(|x| { |
| 0 | 102 | self.value_1d_σ1(x / σ) / σ |
| 103 | }) | |
| 104 | } | |
| 105 | } | |
| 106 | ||
| 32 | 107 | #[replace_float_literals(S::Type::cast_from(literal))] |
| 108 | impl<S, const N : usize> Lipschitz<L1> for HatConv<S, N> | |
| 109 | where S : Constant { | |
| 110 | type FloatType = S::Type; | |
| 111 | #[inline] | |
| 112 | fn lipschitz_factor(&self, L1 : L1) -> Option<Self::FloatType> { | |
| 113 | // For any ψ_i, we have | |
| 114 | // ∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i) | |
| 115 | // = [ψ_1(x_1)-ψ_1(y_1)] ∏_{i=2}^N ψ_i(x_i) | |
| 116 | // + ψ_1(y_1)[ ∏_{i=2}^N ψ_i(x_i) - ∏_{i=2}^N ψ_i(y_i)] | |
| 117 | // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i) | |
| 118 | // Thus | |
| 119 | // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)| | |
| 35 | 120 | // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_j |ψ_j| |
| 32 | 121 | let σ = self.radius(); |
|
34
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
122 | let l1d = self.lipschitz_1d_σ1() / (σ*σ); |
|
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
123 | let m1d = self.value_1d_σ1(0.0) / σ; |
|
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
124 | Some(l1d * m1d.powi(N as i32 - 1)) |
| 32 | 125 | } |
| 126 | } | |
| 127 | ||
| 128 | impl<S, const N : usize> Lipschitz<L2> for HatConv<S, N> | |
| 129 | where S : Constant { | |
| 130 | type FloatType = S::Type; | |
| 131 | #[inline] | |
| 132 | fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> { | |
| 133 | self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt()) | |
| 134 | } | |
| 135 | } | |
| 136 | ||
| 137 | ||
| 35 | 138 | impl<'a, S, const N : usize> DifferentiableImpl<Loc<S::Type, N>> for HatConv<S, N> |
| 32 | 139 | where S : Constant { |
| 35 | 140 | type Derivative = Loc<S::Type, N>; |
| 141 | ||
| 32 | 142 | #[inline] |
| 35 | 143 | fn differential_impl<I : Instance<Loc<S::Type, N>>>(&self, y0 : I) -> Self::Derivative { |
| 144 | let y = y0.cow(); | |
| 32 | 145 | let σ = self.radius(); |
| 146 | let σ2 = σ * σ; | |
| 147 | let vs = y.map(|x| { | |
| 148 | self.value_1d_σ1(x / σ) / σ | |
| 149 | }); | |
| 35 | 150 | product_differential(&*y, &vs, |x| { |
| 32 | 151 | self.diff_1d_σ1(x / σ) / σ2 |
| 152 | }) | |
| 153 | } | |
| 154 | } | |
| 155 | ||
| 35 | 156 | |
| 157 | #[replace_float_literals(S::Type::cast_from(literal))] | |
| 158 | impl<'a, F : Float, S, const N : usize> Lipschitz<L2> | |
| 159 | for Differential<'a, Loc<F, N>, HatConv<S, N>> | |
| 160 | where S : Constant<Type=F> { | |
| 161 | type FloatType = F; | |
| 162 | ||
| 32 | 163 | #[inline] |
| 35 | 164 | fn lipschitz_factor(&self, _l2 : L2) -> Option<F> { |
| 165 | let h = self.base_fn(); | |
| 166 | let σ = h.radius(); | |
| 167 | Some(product_differential_lipschitz_factor::<F, N>( | |
| 168 | h.value_1d_σ1(0.0) / σ, | |
| 169 | h.lipschitz_1d_σ1() / (σ*σ), | |
| 170 | h.maxabsdiff_1d_σ1() / (σ*σ), | |
| 171 | h.lipschitz_diff_1d_σ1() / (σ*σ), | |
| 172 | )) | |
| 32 | 173 | } |
| 174 | } | |
| 0 | 175 | |
| 35 | 176 | |
| 0 | 177 | #[replace_float_literals(S::Type::cast_from(literal))] |
| 178 | impl<'a, F : Float, S, const N : usize> HatConv<S, N> | |
| 179 | where S : Constant<Type=F> { | |
| 180 | /// Computes the value of the kernel for $n=1$ with $σ=1$. | |
| 181 | #[inline] | |
| 182 | fn value_1d_σ1(&self, x : F) -> F { | |
| 183 | let y = x.abs(); | |
| 184 | if y >= 1.0 { | |
| 185 | 0.0 | |
| 186 | } else if y > 0.5 { | |
| 187 | - (8.0/3.0) * (y - 1.0).powi(3) | |
| 188 | } else /* 0 ≤ y ≤ 0.5 */ { | |
| 189 | (4.0/3.0) + 8.0 * y * y * (y - 1.0) | |
| 190 | } | |
| 191 | } | |
| 32 | 192 | |
| 193 | /// Computes the differential of the kernel for $n=1$ with $σ=1$. | |
| 194 | #[inline] | |
| 195 | fn diff_1d_σ1(&self, x : F) -> F { | |
| 196 | let y = x.abs(); | |
| 197 | if y >= 1.0 { | |
| 198 | 0.0 | |
| 199 | } else if y > 0.5 { | |
| 200 | - 8.0 * (y - 1.0).powi(2) | |
| 201 | } else /* 0 ≤ y ≤ 0.5 */ { | |
| 202 | (24.0 * y - 16.0) * y | |
| 203 | } | |
| 204 | } | |
| 205 | ||
| 206 | /// Computes the Lipschitz factor of the kernel for $n=1$ with $σ=1$. | |
| 207 | #[inline] | |
| 208 | fn lipschitz_1d_σ1(&self) -> F { | |
| 209 | // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis | |
| 210 | 2.0 | |
| 211 | } | |
| 35 | 212 | |
| 213 | /// Computes the maximum absolute differential of the kernel for $n=1$ with $σ=1$. | |
| 214 | #[inline] | |
| 215 | fn maxabsdiff_1d_σ1(&self) -> F { | |
| 216 | // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis | |
| 217 | 2.0 | |
| 218 | } | |
| 219 | ||
| 220 | /// Computes the second differential of the kernel for $n=1$ with $σ=1$. | |
| 221 | #[inline] | |
| 222 | #[allow(dead_code)] | |
| 223 | fn diff2_1d_σ1(&self, x : F) -> F { | |
| 224 | let y = x.abs(); | |
| 225 | if y >= 1.0 { | |
| 226 | 0.0 | |
| 227 | } else if y > 0.5 { | |
| 228 | - 16.0 * (y - 1.0) | |
| 229 | } else /* 0 ≤ y ≤ 0.5 */ { | |
| 230 | 48.0 * y - 16.0 | |
| 231 | } | |
| 232 | } | |
| 233 | ||
| 234 | /// Computes the differential of the kernel for $n=1$ with $σ=1$. | |
| 235 | #[inline] | |
| 236 | fn lipschitz_diff_1d_σ1(&self) -> F { | |
| 237 | // Maximal absolute second differential achieved at 0 by diff2_1d_σ1 analysis | |
| 238 | 16.0 | |
| 239 | } | |
| 0 | 240 | } |
| 241 | ||
| 242 | impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N> | |
| 243 | where S : Constant { | |
| 244 | #[inline] | |
| 245 | fn support_hint(&self) -> Cube<S::Type,N> { | |
| 246 | let σ = self.radius(); | |
| 247 | array_init(|| [-σ, σ]).into() | |
| 248 | } | |
| 249 | ||
| 250 | #[inline] | |
| 251 | fn in_support(&self, y : &Loc<S::Type,N>) -> bool { | |
| 252 | let σ = self.radius(); | |
| 253 | y.iter().all(|x| x.abs() <= σ) | |
| 254 | } | |
| 255 | ||
| 256 | #[inline] | |
| 257 | fn bisection_hint(&self, cube : &Cube<S::Type, N>) -> [Option<S::Type>; N] { | |
| 258 | let σ = self.radius(); | |
| 259 | cube.map(|c, d| symmetric_peak_hint(σ, c, d)) | |
| 260 | } | |
| 261 | } | |
| 262 | ||
| 263 | #[replace_float_literals(S::Type::cast_from(literal))] | |
| 264 | impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>> for HatConv<S, N> | |
| 265 | where S : Constant { | |
| 266 | #[inline] | |
| 267 | fn global_analysis(&self) -> Bounds<S::Type> { | |
| 268 | Bounds(0.0, self.apply(Loc::ORIGIN)) | |
| 269 | } | |
| 270 | } | |
| 271 | ||
| 272 | impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N> for HatConv<S, N> | |
| 273 | where S : Constant { | |
| 274 | #[inline] | |
| 275 | fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> { | |
| 276 | // The function is maximised/minimised where the 2-norm is minimised/maximised. | |
| 277 | let lower = self.apply(cube.maxnorm_point()); | |
| 278 | let upper = self.apply(cube.minnorm_point()); | |
| 279 | Bounds(lower, upper) | |
| 280 | } | |
| 281 | } | |
| 282 | ||
| 283 | #[replace_float_literals(C::Type::cast_from(literal))] | |
| 284 | impl<'a, C : Constant, const N : usize> Norm<C::Type, L1> | |
| 285 | for HatConv<C, N> { | |
| 286 | #[inline] | |
| 287 | fn norm(&self, _ : L1) -> C::Type { | |
| 288 | 1.0 | |
| 289 | } | |
| 290 | } | |
| 291 | ||
| 292 | #[replace_float_literals(C::Type::cast_from(literal))] | |
| 293 | impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity> | |
| 294 | for HatConv<C, N> { | |
| 295 | #[inline] | |
| 296 | fn norm(&self, _ : Linfinity) -> C::Type { | |
| 297 | self.bounds().upper() | |
| 298 | } | |
| 299 | } | |
| 300 | ||
| 301 | #[replace_float_literals(F::cast_from(literal))] | |
| 35 | 302 | impl<'a, F : Float, R, C, const N : usize> Mapping<Loc<F, N>> |
| 0 | 303 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> |
| 304 | where R : Constant<Type=F>, | |
| 305 | C : Constant<Type=F> { | |
| 306 | ||
| 35 | 307 | type Codomain = F; |
| 0 | 308 | |
| 309 | #[inline] | |
| 35 | 310 | fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F { |
| 0 | 311 | let Convolution(ref ind, ref hatconv) = self; |
| 312 | let β = ind.r.value(); | |
| 313 | let σ = hatconv.radius(); | |
| 314 | ||
| 315 | // This is just a product of one-dimensional versions | |
| 35 | 316 | y.cow().product_map(|x| { |
| 0 | 317 | // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution |
| 318 | // we have | |
| 319 | // $$ | |
| 320 | // [χ_{-β,β} * u_σ](x) | |
| 321 | // = ∫_{x-β}^{x+β} u_σ(z) d z | |
| 322 | // = (1/σ)∫_{x-β}^{x+β} u_1(z/σ) d z | |
| 323 | // = ∫_{(x-β)/σ}^{(x+β)/σ} u_1(z) d z | |
| 324 | // = [χ_{-β/σ, β/σ} * u_1](x/σ) | |
| 325 | // $$ | |
| 326 | self.value_1d_σ1(x / σ, β / σ) | |
| 327 | }) | |
| 328 | } | |
| 329 | } | |
| 330 | ||
| 35 | 331 | #[replace_float_literals(F::cast_from(literal))] |
| 332 | impl<'a, F : Float, R, C, const N : usize> DifferentiableImpl<Loc<F, N>> | |
| 0 | 333 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> |
| 334 | where R : Constant<Type=F>, | |
| 335 | C : Constant<Type=F> { | |
| 336 | ||
| 35 | 337 | type Derivative = Loc<F, N>; |
| 0 | 338 | |
| 339 | #[inline] | |
| 35 | 340 | fn differential_impl<I : Instance<Loc<F, N>>>(&self, y0 : I) -> Loc<F, N> { |
| 341 | let y = y0.cow(); | |
| 32 | 342 | let Convolution(ref ind, ref hatconv) = self; |
| 343 | let β = ind.r.value(); | |
| 344 | let σ = hatconv.radius(); | |
| 345 | let σ2 = σ * σ; | |
| 346 | ||
| 347 | let vs = y.map(|x| { | |
| 348 | self.value_1d_σ1(x / σ, β / σ) | |
| 349 | }); | |
| 35 | 350 | product_differential(&*y, &vs, |x| { |
| 32 | 351 | self.diff_1d_σ1(x / σ, β / σ) / σ2 |
| 352 | }) | |
| 353 | } | |
| 354 | } | |
| 355 | ||
| 356 | ||
| 357 | /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$. | |
| 358 | /// If $b > d$, add $g()$ to the result. | |
| 359 | #[inline] | |
| 360 | #[replace_float_literals(F::cast_from(literal))] | |
| 361 | fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F, | |
| 362 | g : impl Fn() -> F) -> F { | |
| 363 | if b < c { | |
| 364 | 0.0 | |
| 365 | } else if b <= d { | |
| 366 | if a <= c { | |
| 367 | f(b) - f(c) | |
| 368 | } else { | |
| 369 | f(b) - f(a) | |
| 370 | } | |
| 371 | } else /* b > d */ { | |
| 372 | g() + if a <= c { | |
| 373 | f(d) - f(c) | |
| 374 | } else if a < d { | |
| 375 | f(d) - f(a) | |
| 376 | } else { | |
| 377 | 0.0 | |
| 378 | } | |
| 379 | } | |
| 380 | } | |
| 0 | 381 | |
| 382 | #[replace_float_literals(F::cast_from(literal))] | |
| 383 | impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 384 | where R : Constant<Type=F>, | |
| 385 | C : Constant<Type=F> { | |
| 32 | 386 | |
| 387 | /// Calculates the value of the 1D hat convolution further convolved by a interval indicator. | |
| 388 | /// As both functions are piecewise polynomials, this is implemented by explicit integral over | |
| 389 | /// all subintervals of polynomiality of the cube indicator, using easily formed | |
| 390 | /// antiderivatives. | |
| 0 | 391 | #[inline] |
| 392 | pub fn value_1d_σ1(&self, x : F, β : F) -> F { | |
| 393 | // The integration interval | |
| 394 | let a = x - β; | |
| 395 | let b = x + β; | |
| 396 | ||
| 397 | #[inline] | |
| 398 | fn pow4<F : Float>(x : F) -> F { | |
| 399 | let y = x * x; | |
| 400 | y * y | |
| 401 | } | |
| 402 | ||
| 403 | // Observe the factor 1/6 at the front from the antiderivatives below. | |
| 404 | // The factor 4 is from normalisation of the original function. | |
| 405 | (4.0/6.0) * i(a, b, -1.0, -0.5, | |
| 32 | 406 | // (2/3) (y+1)^3 on -1 < y ≤ -1/2 |
| 0 | 407 | // The antiderivative is (2/12)(y+1)^4 = (1/6)(y+1)^4 |
| 408 | |y| pow4(y+1.0), | |
| 409 | || i(a, b, -0.5, 0.0, | |
| 410 | // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0 | |
| 411 | // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y | |
| 412 | |y| y*(-y*y*(y*3.0 + 4.0) + 2.0), | |
| 413 | || i(a, b, 0.0, 0.5, | |
| 414 | // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2 | |
| 415 | // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y | |
| 416 | |y| y*(y*y*(y*3.0 - 4.0) + 2.0), | |
| 417 | || i(a, b, 0.5, 1.0, | |
| 418 | // -(2/3) (y-1)^3 on 1/2 < y ≤ 1 | |
| 419 | // The antiderivative is -(2/12)(y-1)^4 = -(1/6)(y-1)^4 | |
| 420 | |y| -pow4(y-1.0), | |
| 421 | || 0.0 | |
| 422 | ) | |
| 423 | ) | |
| 424 | ) | |
| 425 | ) | |
| 426 | } | |
| 32 | 427 | |
| 428 | /// Calculates the derivative of the 1D hat convolution further convolved by a interval | |
| 429 | /// indicator. The implementation is similar to [`Self::value_1d_σ1`], using the fact that | |
| 430 | /// $(θ * ψ)' = θ * ψ'$. | |
| 431 | #[inline] | |
| 432 | pub fn diff_1d_σ1(&self, x : F, β : F) -> F { | |
| 433 | // The integration interval | |
| 434 | let a = x - β; | |
| 435 | let b = x + β; | |
| 436 | ||
| 437 | // The factor 4 is from normalisation of the original function. | |
| 438 | 4.0 * i(a, b, -1.0, -0.5, | |
| 439 | // (2/3) (y+1)^3 on -1 < y ≤ -1/2 | |
| 440 | |y| (2.0/3.0) * (y + 1.0).powi(3), | |
| 441 | || i(a, b, -0.5, 0.0, | |
| 442 | // -2 y^3 - 2 y^2 + 1/3 on -1/2 < y ≤ 0 | |
|
34
efa60bc4f743
Radon FB + sliding improvements
Tuomo Valkonen <tuomov@iki.fi>
parents:
32
diff
changeset
|
443 | |y| -2.0*(y + 1.0) * y * y + (1.0/3.0), |
| 32 | 444 | || i(a, b, 0.0, 0.5, |
| 445 | // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2 | |
| 446 | |y| 2.0*(y - 1.0) * y * y + (1.0/3.0), | |
| 447 | || i(a, b, 0.5, 1.0, | |
| 448 | // -(2/3) (y-1)^3 on 1/2 < y ≤ 1 | |
| 449 | |y| -(2.0/3.0) * (y - 1.0).powi(3), | |
| 450 | || 0.0 | |
| 451 | ) | |
| 452 | ) | |
| 453 | ) | |
| 454 | ) | |
| 455 | } | |
| 0 | 456 | } |
| 457 | ||
| 35 | 458 | /* |
| 459 | impl<'a, F : Float, R, C, const N : usize> Lipschitz<L2> | |
| 460 | for Differential<Loc<F, N>, Convolution<CubeIndicator<R, N>, HatConv<C, N>>> | |
| 461 | where R : Constant<Type=F>, | |
| 462 | C : Constant<Type=F> { | |
| 463 | ||
| 464 | type FloatType = F; | |
| 465 | ||
| 466 | #[inline] | |
| 467 | fn lipschitz_factor(&self, _l2 : L2) -> Option<F> { | |
| 468 | dbg!("unimplemented"); | |
| 469 | None | |
| 470 | } | |
| 471 | } | |
| 472 | */ | |
| 473 | ||
| 0 | 474 | impl<F : Float, R, C, const N : usize> |
| 475 | Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 476 | where R : Constant<Type=F>, | |
| 477 | C : Constant<Type=F> { | |
| 478 | ||
| 479 | #[inline] | |
| 480 | fn get_r(&self) -> F { | |
| 481 | let Convolution(ref ind, ref hatconv) = self; | |
| 482 | ind.r.value() + hatconv.radius() | |
| 483 | } | |
| 484 | } | |
| 485 | ||
| 486 | impl<F : Float, R, C, const N : usize> Support<F, N> | |
| 487 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 488 | where R : Constant<Type=F>, | |
| 489 | C : Constant<Type=F> { | |
| 490 | ||
| 491 | #[inline] | |
| 492 | fn support_hint(&self) -> Cube<F, N> { | |
| 493 | let r = self.get_r(); | |
| 494 | array_init(|| [-r, r]).into() | |
| 495 | } | |
| 496 | ||
| 497 | #[inline] | |
| 498 | fn in_support(&self, y : &Loc<F, N>) -> bool { | |
| 499 | let r = self.get_r(); | |
| 500 | y.iter().all(|x| x.abs() <= r) | |
| 501 | } | |
| 502 | ||
| 503 | #[inline] | |
| 504 | fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] { | |
| 505 | // It is not difficult to verify that [`HatConv`] is C^2. | |
| 506 | // Therefore, so is [`Convolution<CubeIndicator<R, N>, HatConv<C, N>>`] so that a finer | |
| 507 | // subdivision for the hint than this is not particularly useful. | |
| 508 | let r = self.get_r(); | |
| 509 | cube.map(|c, d| symmetric_peak_hint(r, c, d)) | |
| 510 | } | |
| 511 | } | |
| 512 | ||
| 513 | impl<F : Float, R, C, const N : usize> GlobalAnalysis<F, Bounds<F>> | |
| 514 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 515 | where R : Constant<Type=F>, | |
| 516 | C : Constant<Type=F> { | |
| 517 | #[inline] | |
| 518 | fn global_analysis(&self) -> Bounds<F> { | |
| 519 | Bounds(F::ZERO, self.apply(Loc::ORIGIN)) | |
| 520 | } | |
| 521 | } | |
| 522 | ||
| 523 | impl<F : Float, R, C, const N : usize> LocalAnalysis<F, Bounds<F>, N> | |
| 524 | for Convolution<CubeIndicator<R, N>, HatConv<C, N>> | |
| 525 | where R : Constant<Type=F>, | |
| 526 | C : Constant<Type=F> { | |
| 527 | #[inline] | |
| 528 | fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> { | |
| 529 | // The function is maximised/minimised where the absolute value is minimised/maximised. | |
| 530 | let lower = self.apply(cube.maxnorm_point()); | |
| 531 | let upper = self.apply(cube.minnorm_point()); | |
| 532 | //assert!(upper >= lower); | |
| 533 | if upper < lower { | |
| 534 | let Convolution(ref ind, ref hatconv) = self; | |
| 535 | let β = ind.r.value(); | |
| 536 | let σ = hatconv.radius(); | |
| 537 | eprintln!("WARNING: Hat convolution {β} {σ} upper bound {upper} < lower bound {lower} on {cube:?} with min-norm point {:?} and max-norm point {:?}", cube.minnorm_point(), cube.maxnorm_point()); | |
| 538 | Bounds(upper, lower) | |
| 539 | } else { | |
| 540 | Bounds(lower, upper) | |
| 541 | } | |
| 542 | } | |
| 543 | } | |
| 544 | ||
| 545 | ||
| 546 | /// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and | |
| 547 | /// $ψ = χ_{[-a,a]^N}$ for some $a>0$. | |
| 548 | /// | |
| 549 | /// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$, | |
| 550 | /// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat | |
| 551 | /// convolution. | |
| 552 | #[replace_float_literals(F::cast_from(literal))] | |
| 553 | impl<F, C, S, const N : usize> | |
| 554 | BoundedBy<F, SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>> | |
| 555 | for AutoConvolution<HatConv<C, N>> | |
| 556 | where F : Float, | |
| 557 | C : Constant<Type=F>, | |
| 558 | S : Constant<Type=F> { | |
| 559 | ||
| 560 | fn bounding_factor( | |
| 561 | &self, | |
| 562 | kernel : &SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>> | |
| 563 | ) -> Option<F> { | |
| 564 | // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with | |
| 565 | // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and | |
| 566 | // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed | |
| 567 | // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$. | |
| 568 | let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel; | |
| 569 | let σ = self.0.radius(); | |
| 570 | let a = ind.r.value(); | |
| 571 | let bounding_1d = 4.0 / (3.0 * σ); | |
| 572 | ||
| 573 | // Check that the cutting indicator of the comparison | |
| 574 | // `SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>` | |
| 575 | // is wide enough, and that the hat convolution has the same radius as ours. | |
| 576 | if σ <= a && hatconv2 == &self.0 { | |
| 577 | Some(bounding_1d.powi(N as i32)) | |
| 578 | } else { | |
| 579 | // We cannot compare | |
| 580 | None | |
| 581 | } | |
| 582 | } | |
| 583 | } | |
| 584 | ||
| 585 | /// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution. | |
| 586 | /// | |
| 587 | /// This is based on Example 3.3 in the manuscript. | |
| 588 | #[replace_float_literals(F::cast_from(literal))] | |
| 589 | impl<F, C, const N : usize> | |
| 590 | BoundedBy<F, HatConv<C, N>> | |
| 591 | for AutoConvolution<HatConv<C, N>> | |
| 592 | where F : Float, | |
| 593 | C : Constant<Type=F> { | |
| 594 | ||
| 595 | /// Returns an estimate of the factor $L_1$. | |
| 596 | /// | |
| 597 | /// Returns `None` if `kernel` does not have the same width as hat convolution that `self` | |
| 598 | /// is based on. | |
| 599 | fn bounding_factor( | |
| 600 | &self, | |
| 601 | kernel : &HatConv<C, N> | |
| 602 | ) -> Option<F> { | |
| 603 | if kernel == &self.0 { | |
| 604 | Some(1.0) | |
| 605 | } else { | |
| 606 | // We cannot compare | |
| 607 | None | |
| 608 | } | |
| 609 | } | |
| 610 | } | |
| 611 | ||
| 612 | #[cfg(test)] | |
| 613 | mod tests { | |
| 614 | use alg_tools::lingrid::linspace; | |
| 35 | 615 | use alg_tools::mapping::Mapping; |
| 0 | 616 | use alg_tools::norms::Linfinity; |
| 617 | use alg_tools::loc::Loc; | |
| 618 | use crate::kernels::{BallIndicator, CubeIndicator, Convolution}; | |
| 619 | use super::HatConv; | |
| 620 | ||
| 621 | /// Tests numerically that [`HatConv<f64, 1>`] is monotone. | |
| 622 | #[test] | |
| 623 | fn hatconv_monotonicity() { | |
| 624 | let grid = linspace(0.0, 1.0, 100000); | |
| 625 | let hatconv : HatConv<f64, 1> = HatConv{ radius : 1.0 }; | |
| 626 | let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); | |
| 627 | let first = vals.next().unwrap(); | |
| 628 | let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; | |
| 629 | assert!(monotone); | |
| 630 | } | |
| 631 | ||
| 632 | /// Tests numerically that [`Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>`] is monotone. | |
| 633 | #[test] | |
| 634 | fn convolution_cubeind_hatconv_monotonicity() { | |
| 635 | let grid = linspace(-2.0, 0.0, 100000); | |
| 636 | let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> | |
| 637 | = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, | |
| 638 | HatConv{ radius : 1.0 } ); | |
| 639 | let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); | |
| 640 | let first = vals.next().unwrap(); | |
| 641 | let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev <= t)).1; | |
| 642 | assert!(monotone); | |
| 643 | ||
| 644 | let grid = linspace(0.0, 2.0, 100000); | |
| 645 | let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>> | |
| 646 | = Convolution(BallIndicator { r : 0.5, exponent : Linfinity }, | |
| 647 | HatConv{ radius : 1.0 } ); | |
| 648 | let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t))); | |
| 649 | let first = vals.next().unwrap(); | |
| 650 | let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1; | |
| 651 | assert!(monotone); | |
| 652 | } | |
| 653 | } |