Thu, 01 Dec 2022 23:37:14 +0200
README fine-tuning and build.rs for uglifying it for rustdoc.
0 | 1 | /*! |
2 | Solver for the point source localisation problem using a forward-backward splitting method. | |
3 | ||
4 | This corresponds to the manuscript | |
5 | ||
6 | * Valkonen T. - _Proximal methods for point source localisation_. ARXIV TO INSERT. | |
7 | ||
8 | The main routine is [`pointsource_fb`]. It is based on [`generic_pointsource_fb`], which is also | |
9 | used by our [primal-dual proximal splitting][crate::pdps] implementation. | |
10 | ||
11 | FISTA-type inertia can also be enabled through [`FBConfig::meta`]. | |
12 | ||
13 | ## Problem | |
14 | ||
15 | <p> | |
16 | Our objective is to solve | |
17 | $$ | |
18 | \min_{μ ∈ ℳ(Ω)}~ F_0(Aμ-b) + α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(μ), | |
19 | $$ | |
20 | where $F_0(y)=\frac{1}{2}\|y\|_2^2$ and the forward operator $A \in 𝕃(ℳ(Ω); ℝ^n)$. | |
21 | </p> | |
22 | ||
23 | ## Approach | |
24 | ||
25 | <p> | |
26 | As documented in more detail in the paper, on each step we approximately solve | |
27 | $$ | |
28 | \min_{μ ∈ ℳ(Ω)}~ F(x) + α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(x) + \frac{1}{2}\|μ-μ^k|_𝒟^2, | |
29 | $$ | |
30 | where $𝒟: 𝕃(ℳ(Ω); C_c(Ω))$ is typically a convolution operator. | |
31 | </p> | |
32 | ||
33 | ## Finite-dimensional subproblems. | |
34 | ||
35 | With $C$ a projection from [`DiscreteMeasure`] to the weights, and $x^k$ such that $x^k=Cμ^k$, we | |
36 | form the discretised linearised inner problem | |
37 | <p> | |
38 | $$ | |
39 | \min_{x ∈ ℝ^n}~ τ\bigl(F(Cx^k) + [C^*∇F(Cx^k)]^⊤(x-x^k) + α {\vec 1}^⊤ x\bigr) | |
40 | + δ_{≥ 0}(x) + \frac{1}{2}\|x-x^k\|_{C^*𝒟C}^2, | |
41 | $$ | |
42 | equivalently | |
43 | $$ | |
44 | \begin{aligned} | |
45 | \min_x~ & τF(Cx^k) - τ[C^*∇F(Cx^k)]^⊤x^k + \frac{1}{2} (x^k)^⊤ C^*𝒟C x^k | |
46 | \\ | |
47 | & | |
48 | - [C^*𝒟C x^k - τC^*∇F(Cx^k)]^⊤ x | |
49 | \\ | |
50 | & | |
51 | + \frac{1}{2} x^⊤ C^*𝒟C x | |
52 | + τα {\vec 1}^⊤ x + δ_{≥ 0}(x), | |
53 | \end{aligned} | |
54 | $$ | |
55 | In other words, we obtain the quadratic non-negativity constrained problem | |
56 | $$ | |
57 | \min_{x ∈ ℝ^n}~ \frac{1}{2} x^⊤ Ã x - b̃^⊤ x + c + τα {\vec 1}^⊤ x + δ_{≥ 0}(x). | |
58 | $$ | |
59 | where | |
60 | $$ | |
61 | \begin{aligned} | |
62 | Ã & = C^*𝒟C, | |
63 | \\ | |
64 | g̃ & = C^*𝒟C x^k - τ C^*∇F(Cx^k) | |
65 | = C^* 𝒟 μ^k - τ C^*A^*(Aμ^k - b) | |
66 | \\ | |
67 | c & = τ F(Cx^k) - τ[C^*∇F(Cx^k)]^⊤x^k + \frac{1}{2} (x^k)^⊤ C^*𝒟C x^k | |
68 | \\ | |
69 | & | |
70 | = \frac{τ}{2} \|Aμ^k-b\|^2 - τ[Aμ^k-b]^⊤Aμ^k + \frac{1}{2} \|μ_k\|_{𝒟}^2 | |
71 | \\ | |
72 | & | |
73 | = -\frac{τ}{2} \|Aμ^k-b\|^2 + τ[Aμ^k-b]^⊤ b + \frac{1}{2} \|μ_k\|_{𝒟}^2. | |
74 | \end{aligned} | |
75 | $$ | |
76 | </p> | |
77 | ||
78 | We solve this with either SSN or FB via [`quadratic_nonneg`] as determined by | |
79 | [`InnerSettings`] in [`FBGenericConfig::inner`]. | |
80 | */ | |
81 | ||
82 | use numeric_literals::replace_float_literals; | |
83 | use std::cmp::Ordering::*; | |
84 | use serde::{Serialize, Deserialize}; | |
85 | use colored::Colorize; | |
86 | use nalgebra::DVector; | |
87 | ||
88 | use alg_tools::iterate::{ | |
89 | AlgIteratorFactory, | |
90 | AlgIteratorState, | |
91 | }; | |
92 | use alg_tools::euclidean::Euclidean; | |
93 | use alg_tools::norms::Norm; | |
94 | use alg_tools::linops::Apply; | |
95 | use alg_tools::sets::Cube; | |
96 | use alg_tools::loc::Loc; | |
97 | use alg_tools::bisection_tree::{ | |
98 | BTFN, | |
99 | PreBTFN, | |
100 | Bounds, | |
101 | BTNodeLookup, | |
102 | BTNode, | |
103 | BTSearch, | |
104 | P2Minimise, | |
105 | SupportGenerator, | |
106 | LocalAnalysis, | |
107 | Bounded, | |
108 | }; | |
109 | use alg_tools::mapping::RealMapping; | |
110 | use alg_tools::nalgebra_support::ToNalgebraRealField; | |
111 | ||
112 | use crate::types::*; | |
113 | use crate::measures::{ | |
114 | DiscreteMeasure, | |
115 | DeltaMeasure, | |
116 | Radon | |
117 | }; | |
118 | use crate::measures::merging::{ | |
119 | SpikeMergingMethod, | |
120 | SpikeMerging, | |
121 | }; | |
122 | use crate::forward_model::ForwardModel; | |
123 | use crate::seminorms::{ | |
124 | DiscreteMeasureOp, Lipschitz | |
125 | }; | |
126 | use crate::subproblem::{ | |
127 | quadratic_nonneg, | |
128 | InnerSettings, | |
129 | InnerMethod, | |
130 | }; | |
131 | use crate::tolerance::Tolerance; | |
132 | use crate::plot::{ | |
133 | SeqPlotter, | |
134 | Plotting, | |
135 | PlotLookup | |
136 | }; | |
137 | ||
138 | /// Method for constructing $μ$ on each iteration | |
139 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
140 | #[allow(dead_code)] | |
141 | pub enum InsertionStyle { | |
142 | /// Resuse previous $μ$ from previous iteration, optimising weights | |
143 | /// before inserting new spikes. | |
144 | Reuse, | |
145 | /// Start each iteration with $μ=0$. | |
146 | Zero, | |
147 | } | |
148 | ||
149 | /// Meta-algorithm type | |
150 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
151 | #[allow(dead_code)] | |
152 | pub enum FBMetaAlgorithm { | |
153 | /// No meta-algorithm | |
154 | None, | |
155 | /// FISTA-style inertia | |
156 | InertiaFISTA, | |
157 | } | |
158 | ||
159 | /// Ergodic tolerance application style | |
160 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
161 | #[allow(dead_code)] | |
162 | pub enum ErgodicTolerance<F> { | |
163 | /// Non-ergodic iteration-wise tolerance | |
164 | NonErgodic, | |
165 | /// Bound after `n`th iteration to `factor` times value on that iteration. | |
166 | AfterNth{ n : usize, factor : F }, | |
167 | } | |
168 | ||
169 | /// Settings for [`pointsource_fb`]. | |
170 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
171 | #[serde(default)] | |
172 | pub struct FBConfig<F : Float> { | |
173 | /// Step length scaling | |
174 | pub τ0 : F, | |
175 | /// Meta-algorithm to apply | |
176 | pub meta : FBMetaAlgorithm, | |
177 | /// Generic parameters | |
178 | pub insertion : FBGenericConfig<F>, | |
179 | } | |
180 | ||
181 | /// Settings for the solution of the stepwise optimality condition in algorithms based on | |
182 | /// [`generic_pointsource_fb`]. | |
183 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
184 | #[serde(default)] | |
185 | pub struct FBGenericConfig<F : Float> { | |
186 | /// Method for constructing $μ$ on each iteration; see [`InsertionStyle`]. | |
187 | pub insertion_style : InsertionStyle, | |
188 | /// Tolerance for point insertion. | |
189 | pub tolerance : Tolerance<F>, | |
190 | /// Stop looking for predual maximum (where to isert a new point) below | |
191 | /// `tolerance` multiplied by this factor. | |
192 | pub insertion_cutoff_factor : F, | |
193 | /// Apply tolerance ergodically | |
194 | pub ergodic_tolerance : ErgodicTolerance<F>, | |
195 | /// Settings for branch and bound refinement when looking for predual maxima | |
196 | pub refinement : RefinementSettings<F>, | |
197 | /// Maximum insertions within each outer iteration | |
198 | pub max_insertions : usize, | |
199 | /// Pair `(n, m)` for maximum insertions `m` on first `n` iterations. | |
200 | pub bootstrap_insertions : Option<(usize, usize)>, | |
201 | /// Inner method settings | |
202 | pub inner : InnerSettings<F>, | |
203 | /// Spike merging method | |
204 | pub merging : SpikeMergingMethod<F>, | |
205 | /// Tolerance multiplier for merges | |
206 | pub merge_tolerance_mult : F, | |
207 | /// Spike merging method after the last step | |
208 | pub final_merging : SpikeMergingMethod<F>, | |
209 | /// Iterations between merging heuristic tries | |
210 | pub merge_every : usize, | |
211 | /// Save $μ$ for postprocessing optimisation | |
212 | pub postprocessing : bool | |
213 | } | |
214 | ||
215 | #[replace_float_literals(F::cast_from(literal))] | |
216 | impl<F : Float> Default for FBConfig<F> { | |
217 | fn default() -> Self { | |
218 | FBConfig { | |
219 | τ0 : 0.99, | |
220 | meta : FBMetaAlgorithm::None, | |
221 | insertion : Default::default() | |
222 | } | |
223 | } | |
224 | } | |
225 | ||
226 | #[replace_float_literals(F::cast_from(literal))] | |
227 | impl<F : Float> Default for FBGenericConfig<F> { | |
228 | fn default() -> Self { | |
229 | FBGenericConfig { | |
230 | insertion_style : InsertionStyle::Reuse, | |
231 | tolerance : Default::default(), | |
232 | insertion_cutoff_factor : 1.0, | |
233 | ergodic_tolerance : ErgodicTolerance::NonErgodic, | |
234 | refinement : Default::default(), | |
235 | max_insertions : 100, | |
236 | //bootstrap_insertions : None, | |
237 | bootstrap_insertions : Some((10, 1)), | |
238 | inner : InnerSettings { | |
239 | method : InnerMethod::SSN, | |
240 | .. Default::default() | |
241 | }, | |
242 | merging : SpikeMergingMethod::None, | |
243 | //merging : Default::default(), | |
244 | final_merging : Default::default(), | |
245 | merge_every : 10, | |
246 | merge_tolerance_mult : 2.0, | |
247 | postprocessing : false, | |
248 | } | |
249 | } | |
250 | } | |
251 | ||
252 | /// Trait for specialisation of [`generic_pointsource_fb`] to basic FB, FISTA. | |
253 | /// | |
254 | /// The idea is that the residual $Aμ - b$ in the forward step can be replaced by an arbitrary | |
255 | /// value. For example, to implement [primal-dual proximal splitting][crate::pdps] we replace it | |
256 | /// with the dual variable $y$. We can then also implement alternative data terms, as the | |
257 | /// (pre)differential of $F(μ)=F\_0(Aμ-b)$ is $F\'(μ) = A\_*F\_0\'(Aμ-b)$. In the case of the | |
258 | /// quadratic fidelity $F_0(y)=\frac{1}{2}\\|y\\|_2^2$ in a Hilbert space, of course, | |
259 | /// $F\_0\'(Aμ-b)=Aμ-b$ is the residual. | |
260 | pub trait FBSpecialisation<F : Float, Observable : Euclidean<F>, const N : usize> : Sized { | |
261 | /// Updates the residual and does any necessary pruning of `μ`. | |
262 | /// | |
263 | /// Returns the new residual and possibly a new step length. | |
264 | /// | |
265 | /// The measure `μ` may also be modified to apply, e.g., inertia to it. | |
266 | /// The updated residual should correspond to the residual at `μ`. | |
267 | /// See the [trait documentation][FBSpecialisation] for the use and meaning of the residual. | |
268 | /// | |
269 | /// The parameter `μ_base` is the base point of the iteration, typically the previous iterate, | |
270 | /// but for, e.g., FISTA has inertia applied to it. | |
271 | fn update( | |
272 | &mut self, | |
273 | μ : &mut DiscreteMeasure<Loc<F, N>, F>, | |
274 | μ_base : &DiscreteMeasure<Loc<F, N>, F>, | |
275 | ) -> (Observable, Option<F>); | |
276 | ||
277 | /// Calculates the data term value corresponding to iterate `μ` and available residual. | |
278 | /// | |
279 | /// Inertia and other modifications, as deemed, necessary, should be applied to `μ`. | |
280 | /// | |
281 | /// The blanket implementation correspondsn to the 2-norm-squared data fidelity | |
282 | /// $\\|\text{residual}\\|\_2^2/2$. | |
283 | fn calculate_fit( | |
284 | &self, | |
285 | _μ : &DiscreteMeasure<Loc<F, N>, F>, | |
286 | residual : &Observable | |
287 | ) -> F { | |
288 | residual.norm2_squared_div2() | |
289 | } | |
290 | ||
291 | /// Calculates the data term value at $μ$. | |
292 | /// | |
293 | /// Unlike [`Self::calculate_fit`], no inertia, etc., should be applied to `μ`. | |
294 | fn calculate_fit_simple( | |
295 | &self, | |
296 | μ : &DiscreteMeasure<Loc<F, N>, F>, | |
297 | ) -> F; | |
298 | ||
299 | /// Returns the final iterate after any necessary postprocess pruning, merging, etc. | |
300 | fn postprocess(self, mut μ : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>) | |
301 | -> DiscreteMeasure<Loc<F, N>, F> | |
302 | where DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
303 | μ.merge_spikes_fitness(merging, | |
304 | |μ̃| self.calculate_fit_simple(μ̃), | |
305 | |&v| v); | |
306 | μ.prune(); | |
307 | μ | |
308 | } | |
309 | ||
310 | /// Returns measure to be used for value calculations, which may differ from μ. | |
311 | fn value_μ<'c, 'b : 'c>(&'b self, μ : &'c DiscreteMeasure<Loc<F, N>, F>) | |
312 | -> &'c DiscreteMeasure<Loc<F, N>, F> { | |
313 | μ | |
314 | } | |
315 | } | |
316 | ||
317 | /// Specialisation of [`generic_pointsource_fb`] to basic μFB. | |
318 | struct BasicFB< | |
319 | 'a, | |
320 | F : Float + ToNalgebraRealField, | |
321 | A : ForwardModel<Loc<F, N>, F>, | |
322 | const N : usize | |
323 | > { | |
324 | /// The data | |
325 | b : &'a A::Observable, | |
326 | /// The forward operator | |
327 | opA : &'a A, | |
328 | } | |
329 | ||
330 | /// Implementation of [`FBSpecialisation`] for basic μFB forward-backward splitting. | |
331 | #[replace_float_literals(F::cast_from(literal))] | |
332 | impl<'a, F : Float + ToNalgebraRealField , A : ForwardModel<Loc<F, N>, F>, const N : usize> | |
333 | FBSpecialisation<F, A::Observable, N> for BasicFB<'a, F, A, N> { | |
334 | fn update( | |
335 | &mut self, | |
336 | μ : &mut DiscreteMeasure<Loc<F, N>, F>, | |
337 | _μ_base : &DiscreteMeasure<Loc<F, N>, F> | |
338 | ) -> (A::Observable, Option<F>) { | |
339 | μ.prune(); | |
340 | //*residual = self.opA.apply(μ) - self.b; | |
341 | let mut residual = self.b.clone(); | |
342 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
343 | (residual, None) | |
344 | } | |
345 | ||
346 | fn calculate_fit_simple( | |
347 | &self, | |
348 | μ : &DiscreteMeasure<Loc<F, N>, F>, | |
349 | ) -> F { | |
350 | let mut residual = self.b.clone(); | |
351 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
352 | residual.norm2_squared_div2() | |
353 | } | |
354 | } | |
355 | ||
356 | /// Specialisation of [`generic_pointsource_fb`] to FISTA. | |
357 | struct FISTA< | |
358 | 'a, | |
359 | F : Float + ToNalgebraRealField, | |
360 | A : ForwardModel<Loc<F, N>, F>, | |
361 | const N : usize | |
362 | > { | |
363 | /// The data | |
364 | b : &'a A::Observable, | |
365 | /// The forward operator | |
366 | opA : &'a A, | |
367 | /// Current inertial parameter | |
368 | λ : F, | |
369 | /// Previous iterate without inertia applied. | |
370 | /// We need to store this here because `μ_base` passed to [`FBSpecialisation::update`] will | |
371 | /// have inertia applied to it, so is not useful to use. | |
372 | μ_prev : DiscreteMeasure<Loc<F, N>, F>, | |
373 | } | |
374 | ||
375 | /// Implementation of [`FBSpecialisation`] for μFISTA inertial forward-backward splitting. | |
376 | #[replace_float_literals(F::cast_from(literal))] | |
377 | impl<'a, F : Float + ToNalgebraRealField, A : ForwardModel<Loc<F, N>, F>, const N : usize> | |
378 | FBSpecialisation<F, A::Observable, N> for FISTA<'a, F, A, N> { | |
379 | fn update( | |
380 | &mut self, | |
381 | μ : &mut DiscreteMeasure<Loc<F, N>, F>, | |
382 | _μ_base : &DiscreteMeasure<Loc<F, N>, F> | |
383 | ) -> (A::Observable, Option<F>) { | |
384 | // Update inertial parameters | |
385 | let λ_prev = self.λ; | |
386 | self.λ = 2.0 * λ_prev / ( λ_prev + (4.0 + λ_prev * λ_prev).sqrt() ); | |
387 | let θ = self.λ / λ_prev - self.λ; | |
388 | // Perform inertial update on μ. | |
389 | // This computes μ ← (1 + θ) * μ - θ * μ_prev, pruning spikes where both μ | |
390 | // and μ_prev have zero weight. Since both have weights from the finite-dimensional | |
391 | // subproblem with a proximal projection step, this is likely to happen when the | |
392 | // spike is not needed. A copy of the pruned μ without artithmetic performed is | |
393 | // stored in μ_prev. | |
394 | μ.pruning_sub(1.0 + θ, θ, &mut self.μ_prev); | |
395 | ||
396 | //*residual = self.opA.apply(μ) - self.b; | |
397 | let mut residual = self.b.clone(); | |
398 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
399 | (residual, None) | |
400 | } | |
401 | ||
402 | fn calculate_fit_simple( | |
403 | &self, | |
404 | μ : &DiscreteMeasure<Loc<F, N>, F>, | |
405 | ) -> F { | |
406 | let mut residual = self.b.clone(); | |
407 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
408 | residual.norm2_squared_div2() | |
409 | } | |
410 | ||
411 | fn calculate_fit( | |
412 | &self, | |
413 | _μ : &DiscreteMeasure<Loc<F, N>, F>, | |
414 | _residual : &A::Observable | |
415 | ) -> F { | |
416 | self.calculate_fit_simple(&self.μ_prev) | |
417 | } | |
418 | ||
419 | // For FISTA we need to do a final pruning as well, due to the limited | |
420 | // pruning that can be done on each step. | |
421 | fn postprocess(mut self, μ_base : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>) | |
422 | -> DiscreteMeasure<Loc<F, N>, F> | |
423 | where DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
424 | let mut μ = self.μ_prev; | |
425 | self.μ_prev = μ_base; | |
426 | μ.merge_spikes_fitness(merging, | |
427 | |μ̃| self.calculate_fit_simple(μ̃), | |
428 | |&v| v); | |
429 | μ.prune(); | |
430 | μ | |
431 | } | |
432 | ||
433 | fn value_μ<'c, 'b : 'c>(&'c self, _μ : &'c DiscreteMeasure<Loc<F, N>, F>) | |
434 | -> &'c DiscreteMeasure<Loc<F, N>, F> { | |
435 | &self.μ_prev | |
436 | } | |
437 | } | |
438 | ||
439 | /// Iteratively solve the pointsource localisation problem using forward-backward splitting | |
440 | /// | |
441 | /// The settings in `config` have their [respective documentation](FBConfig). `opA` is the | |
442 | /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight. | |
443 | /// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution | |
444 | /// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control | |
445 | /// as documented in [`alg_tools::iterate`]. | |
446 | /// | |
447 | /// For details on the mathematical formulation, see the [module level](self) documentation. | |
448 | /// | |
449 | /// Returns the final iterate. | |
450 | #[replace_float_literals(F::cast_from(literal))] | |
451 | pub fn pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, const N : usize>( | |
452 | opA : &'a A, | |
453 | b : &A::Observable, | |
454 | α : F, | |
455 | op𝒟 : &'a 𝒟, | |
456 | config : &FBConfig<F>, | |
457 | iterator : I, | |
458 | plotter : SeqPlotter<F, N> | |
459 | ) -> DiscreteMeasure<Loc<F, N>, F> | |
460 | where F : Float + ToNalgebraRealField, | |
461 | I : AlgIteratorFactory<IterInfo<F, N>>, | |
462 | for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>, | |
463 | //+ std::ops::Mul<F, Output=A::Observable>, <-- FIXME: compiler overflow | |
464 | A::Observable : std::ops::MulAssign<F>, | |
465 | GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone, | |
466 | A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>> | |
467 | + Lipschitz<𝒟, FloatType=F>, | |
468 | BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>, | |
469 | G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone, | |
470 | 𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>, | |
471 | 𝒟::Codomain : RealMapping<F, N>, | |
472 | S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
473 | K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
474 | BTNodeLookup: BTNode<F, usize, Bounds<F>, N>, | |
475 | Cube<F, N>: P2Minimise<Loc<F, N>, F>, | |
476 | PlotLookup : Plotting<N>, | |
477 | DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
478 | ||
479 | let initial_residual = -b; | |
480 | let τ = config.τ0/opA.lipschitz_factor(&op𝒟).unwrap(); | |
481 | ||
482 | match config.meta { | |
483 | FBMetaAlgorithm::None => generic_pointsource_fb( | |
484 | opA, α, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual, | |
485 | BasicFB{ b, opA } | |
486 | ), | |
487 | FBMetaAlgorithm::InertiaFISTA => generic_pointsource_fb( | |
488 | opA, α, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual, | |
489 | FISTA{ b, opA, λ : 1.0, μ_prev : DiscreteMeasure::new() } | |
490 | ), | |
491 | } | |
492 | } | |
493 | ||
494 | /// Generic implementation of [`pointsource_fb`]. | |
495 | /// | |
496 | /// The method can be specialised to even primal-dual proximal splitting through the | |
497 | /// [`FBSpecialisation`] parameter `specialisation`. | |
498 | /// The settings in `config` have their [respective documentation](FBGenericConfig). `opA` is the | |
499 | /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight. | |
500 | /// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution | |
501 | /// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control | |
502 | /// as documented in [`alg_tools::iterate`]. | |
503 | /// | |
504 | /// The implementation relies on [`alg_tools::bisection_tree::BTFN`] presentations of | |
505 | /// sums of simple functions usign bisection trees, and the related | |
506 | /// [`alg_tools::bisection_tree::Aggregator`]s, to efficiently search for component functions | |
507 | /// active at a specific points, and to maximise their sums. Through the implementation of the | |
508 | /// [`alg_tools::bisection_tree::BT`] bisection trees, it also relies on the copy-on-write features | |
509 | /// of [`std::sync::Arc`] to only update relevant parts of the bisection tree when adding functions. | |
510 | /// | |
511 | /// Returns the final iterate. | |
512 | #[replace_float_literals(F::cast_from(literal))] | |
513 | pub fn generic_pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, Spec, const N : usize>( | |
514 | opA : &'a A, | |
515 | α : F, | |
516 | op𝒟 : &'a 𝒟, | |
517 | mut τ : F, | |
518 | config : &FBGenericConfig<F>, | |
519 | iterator : I, | |
520 | mut plotter : SeqPlotter<F, N>, | |
521 | mut residual : A::Observable, | |
522 | mut specialisation : Spec, | |
523 | ) -> DiscreteMeasure<Loc<F, N>, F> | |
524 | where F : Float + ToNalgebraRealField, | |
525 | I : AlgIteratorFactory<IterInfo<F, N>>, | |
526 | Spec : FBSpecialisation<F, A::Observable, N>, | |
527 | A::Observable : std::ops::MulAssign<F>, | |
528 | GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone, | |
529 | A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>> | |
530 | + Lipschitz<𝒟, FloatType=F>, | |
531 | BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>, | |
532 | G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone, | |
533 | 𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>, | |
534 | 𝒟::Codomain : RealMapping<F, N>, | |
535 | S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
536 | K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
537 | BTNodeLookup: BTNode<F, usize, Bounds<F>, N>, | |
538 | Cube<F, N>: P2Minimise<Loc<F, N>, F>, | |
539 | PlotLookup : Plotting<N>, | |
540 | DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
541 | ||
542 | // Set up parameters | |
543 | let quiet = iterator.is_quiet(); | |
544 | let op𝒟norm = op𝒟.opnorm_bound(); | |
545 | // We multiply tolerance by τ for FB since | |
546 | // our subproblems depending on tolerances are scaled by τ compared to the conditional | |
547 | // gradient approach. | |
548 | let mut tolerance = config.tolerance * τ * α; | |
549 | let mut ε = tolerance.initial(); | |
550 | ||
551 | // Initialise operators | |
552 | let preadjA = opA.preadjoint(); | |
553 | ||
554 | // Initialise iterates | |
555 | let mut μ = DiscreteMeasure::new(); | |
556 | ||
557 | let mut after_nth_bound = F::INFINITY; | |
558 | // FIXME: Don't allocate if not needed. | |
559 | let mut after_nth_accum = opA.zero_observable(); | |
560 | ||
561 | let mut inner_iters = 0; | |
562 | let mut this_iters = 0; | |
563 | let mut pruned = 0; | |
564 | let mut merged = 0; | |
565 | ||
566 | let μ_diff = |μ_new : &DiscreteMeasure<Loc<F, N>, F>, | |
567 | μ_base : &DiscreteMeasure<Loc<F, N>, F>| { | |
568 | let mut ν : DiscreteMeasure<Loc<F, N>, F> = match config.insertion_style { | |
569 | InsertionStyle::Reuse => { | |
570 | μ_new.iter_spikes() | |
571 | .zip(μ_base.iter_masses().chain(std::iter::repeat(0.0))) | |
572 | .map(|(δ, α_base)| (δ.x, α_base - δ.α)) | |
573 | .collect() | |
574 | }, | |
575 | InsertionStyle::Zero => { | |
576 | μ_new.iter_spikes() | |
577 | .map(|δ| -δ) | |
578 | .chain(μ_base.iter_spikes().copied()) | |
579 | .collect() | |
580 | } | |
581 | }; | |
582 | ν.prune(); // Potential small performance improvement | |
583 | ν | |
584 | }; | |
585 | ||
586 | // Run the algorithm | |
587 | iterator.iterate(|state| { | |
588 | // Calculate subproblem tolerances, and update main tolerance for next iteration | |
589 | let τα = τ * α; | |
590 | // if μ.len() == 0 /*state.iteration() == 1*/ { | |
591 | // let t = minus_τv.bounds().upper() * 0.001; | |
592 | // if t > 0.0 { | |
593 | // let (ξ, v_ξ) = minus_τv.maximise(t, config.refinement.max_steps); | |
594 | // if τα + ε > v_ξ && v_ξ > τα { | |
595 | // // The zero measure is already within bounds, so improve them | |
596 | // tolerance = config.tolerance * (v_ξ - τα); | |
597 | // ε = tolerance.initial(); | |
598 | // } | |
599 | // μ += DeltaMeasure { x : ξ, α : 0.0 }; | |
600 | // } else { | |
601 | // // Zero is the solution. | |
602 | // return Step::Terminated | |
603 | // } | |
604 | // } | |
605 | let target_bounds = Bounds(τα - ε, τα + ε); | |
606 | let merge_tolerance = config.merge_tolerance_mult * ε; | |
607 | let merge_target_bounds = Bounds(τα - merge_tolerance, τα + merge_tolerance); | |
608 | let inner_tolerance = ε * config.inner.tolerance_mult; | |
609 | let refinement_tolerance = ε * config.refinement.tolerance_mult; | |
610 | let maximise_above = τα + ε * config.insertion_cutoff_factor; | |
611 | let mut ε1 = ε; | |
612 | let ε_prev = ε; | |
613 | ε = tolerance.update(ε, state.iteration()); | |
614 | ||
615 | // Maximum insertion count and measure difference calculation depend on insertion style. | |
616 | let (m, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) { | |
617 | (i, Some((l, k))) if i <= l => (k, false), | |
618 | _ => (config.max_insertions, !quiet), | |
619 | }; | |
620 | let max_insertions = match config.insertion_style { | |
621 | InsertionStyle::Zero => { | |
622 | todo!("InsertionStyle::Zero does not currently work with FISTA, so diabled."); | |
623 | // let n = μ.len(); | |
624 | // μ = DiscreteMeasure::new(); | |
625 | // n + m | |
626 | }, | |
627 | InsertionStyle::Reuse => m, | |
628 | }; | |
629 | ||
630 | // Calculate smooth part of surrogate model. | |
631 | residual *= -τ; | |
632 | if let ErgodicTolerance::AfterNth{ .. } = config.ergodic_tolerance { | |
633 | // Negative residual times τ expected here, as set above. | |
634 | // TODO: is this the correct location? | |
635 | after_nth_accum += &residual; | |
636 | } | |
637 | // Using `std::mem::replace` here is not ideal, and expects that `empty_observable` | |
638 | // has no significant overhead. For some reosn Rust doesn't allow us simply moving | |
639 | // the residual and replacing it below before the end of this closure. | |
640 | let r = std::mem::replace(&mut residual, opA.empty_observable()); | |
641 | let minus_τv = preadjA.apply(r); // minus_τv = -τA^*(Aμ^k-b) | |
642 | // TODO: should avoid a second copy of μ here; μ_base already stores a copy. | |
643 | let ω0 = op𝒟.apply(μ.clone()); // 𝒟μ^k | |
644 | //let g = &minus_τv + ω0; // Linear term of surrogate model | |
645 | ||
646 | // Save current base point | |
647 | let μ_base = μ.clone(); | |
648 | ||
649 | // Add points to support until within error tolerance or maximum insertion count reached. | |
650 | let mut count = 0; | |
651 | let (within_tolerances, d) = 'insertion: loop { | |
652 | if μ.len() > 0 { | |
653 | // Form finite-dimensional subproblem. The subproblem references to the original μ^k | |
654 | // from the beginning of the iteration are all contained in the immutable c and g. | |
655 | let à = op𝒟.findim_matrix(μ.iter_locations()); | |
656 | let g̃ = DVector::from_iterator(μ.len(), | |
657 | μ.iter_locations() | |
658 | .map(|ζ| minus_τv.apply(ζ) + ω0.apply(ζ)) | |
659 | .map(F::to_nalgebra_mixed)); | |
660 | let mut x = μ.masses_dvector(); | |
661 | ||
662 | // The gradient of the forward component of the inner objective is C^*𝒟Cx - g̃. | |
663 | // We have |C^*𝒟Cx|_2 = sup_{|z|_2 ≤ 1} ⟨z, C^*𝒟Cx⟩ = sup_{|z|_2 ≤ 1} ⟨Cz|𝒟Cx⟩ | |
664 | // ≤ sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟Cx|_∞ ≤ sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟| |Cx|_ℳ | |
665 | // ≤ sup_{|z|_2 ≤ 1} |z|_1 |𝒟| |x|_1 ≤ sup_{|z|_2 ≤ 1} n |z|_2 |𝒟| |x|_2 | |
666 | // = n |𝒟| |x|_2, where n is the number of points. Therefore | |
667 | let inner_τ = config.inner.τ0 / (op𝒟norm * F::cast_from(μ.len())); | |
668 | ||
669 | // Solve finite-dimensional subproblem. | |
670 | let inner_it = config.inner.iterator_options.stop_target(inner_tolerance); | |
671 | inner_iters += quadratic_nonneg(config.inner.method, &Ã, &g̃, τ*α, &mut x, | |
672 | inner_τ, inner_it); | |
673 | ||
674 | // Update masses of μ based on solution of finite-dimensional subproblem. | |
675 | μ.set_masses_dvector(&x); | |
676 | } | |
677 | ||
678 | // Form d = ω0 - τv - 𝒟μ = -𝒟(μ - μ^k) - τv for checking the proximate optimality | |
679 | // conditions in the predual space, and finding new points for insertion, if necessary. | |
680 | let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ, &μ_base)); | |
681 | ||
682 | // If no merging heuristic is used, let's be more conservative about spike insertion, | |
683 | // and skip it after first round. If merging is done, being more greedy about spike | |
684 | // insertion also seems to improve performance. | |
685 | let may_break = if let SpikeMergingMethod::None = config.merging { | |
686 | false | |
687 | } else { | |
688 | count > 0 | |
689 | }; | |
690 | ||
691 | // First do a rough check whether we are within bounds and can stop. | |
692 | let in_bounds = match config.ergodic_tolerance { | |
693 | ErgodicTolerance::NonErgodic => { | |
694 | target_bounds.superset(&d.bounds()) | |
695 | }, | |
696 | ErgodicTolerance::AfterNth{ n, factor } => { | |
697 | // Bound -τ∑_{k=0}^{N-1}[A_*(Aμ^k-b)+α] from above. | |
698 | match state.iteration().cmp(&n) { | |
699 | Less => true, | |
700 | Equal => { | |
701 | let iter = F::cast_from(state.iteration()); | |
702 | let mut tmp = preadjA.apply(&after_nth_accum); | |
703 | let (_, v0) = tmp.maximise(refinement_tolerance, | |
704 | config.refinement.max_steps); | |
705 | let v = v0 - iter * τ * α; | |
706 | after_nth_bound = factor * v; | |
707 | println!("{}", format!("Set ergodic tolerance to {}", after_nth_bound)); | |
708 | true | |
709 | }, | |
710 | Greater => { | |
711 | // TODO: can divide after_nth_accum by N, so use basic tolerance on that. | |
712 | let iter = F::cast_from(state.iteration()); | |
713 | let mut tmp = preadjA.apply(&after_nth_accum); | |
714 | tmp.has_upper_bound(after_nth_bound + iter * τ * α, | |
715 | refinement_tolerance, | |
716 | config.refinement.max_steps) | |
717 | } | |
718 | } | |
719 | } | |
720 | }; | |
721 | ||
722 | // If preliminary check indicates that we are in bonds, and if it otherwise matches | |
723 | // the insertion strategy, skip insertion. | |
724 | if may_break && in_bounds { | |
725 | break 'insertion (true, d) | |
726 | } | |
727 | ||
728 | // If the rough check didn't indicate stopping, find maximising point, maintaining for | |
729 | // the calculations in the beginning of the loop that v_ξ = (ω0-τv-𝒟μ)(ξ) = d(ξ), | |
730 | // where 𝒟μ is now distinct from μ0 after the insertions already performed. | |
731 | // We do not need to check lower bounds, as a solution of the finite-dimensional | |
732 | // subproblem should always satisfy them. | |
733 | ||
734 | // // Find the mimimum over the support of μ. | |
735 | // let d_min_supp = d_max;μ.iter_spikes().filter_map(|&DeltaMeasure{ α, ref x }| { | |
736 | // (α != F::ZERO).then(|| d.value(x)) | |
737 | // }).reduce(F::min).unwrap_or(0.0); | |
738 | ||
739 | let (ξ, v_ξ) = if false /* μ.len() == 0*/ /*count == 0 &&*/ { | |
740 | // If μ has no spikes, just find the maximum of d. Then adjust the tolerance, if | |
741 | // necessary, to adapt it to the problem. | |
742 | let (ξ, v_ξ) = d.maximise(refinement_tolerance, config.refinement.max_steps); | |
743 | //dbg!((τα, v_ξ, target_bounds.upper(), maximise_above)); | |
744 | if τα < v_ξ && v_ξ < target_bounds.upper() { | |
745 | ε1 = v_ξ - τα; | |
746 | ε *= ε1 / ε_prev; | |
747 | tolerance *= ε1 / ε_prev; | |
748 | } | |
749 | (ξ, v_ξ) | |
750 | } else { | |
751 | // If μ has some spikes, only find a maximum of d if it is above a threshold | |
752 | // defined by the refinment tolerance. | |
753 | match d.maximise_above(maximise_above, refinement_tolerance, | |
754 | config.refinement.max_steps) { | |
755 | None => break 'insertion (true, d), | |
756 | Some(res) => res, | |
757 | } | |
758 | }; | |
759 | ||
760 | // // Do a one final check whether we can stop already without inserting more points | |
761 | // // because `d` actually in bounds based on a more refined estimate. | |
762 | // if may_break && target_bounds.upper() >= v_ξ { | |
763 | // break (true, d) | |
764 | // } | |
765 | ||
766 | // Break if maximum insertion count reached | |
767 | if count >= max_insertions { | |
768 | let in_bounds2 = target_bounds.upper() >= v_ξ; | |
769 | break 'insertion (in_bounds2, d) | |
770 | } | |
771 | ||
772 | // No point in optimising the weight here; the finite-dimensional algorithm is fast. | |
773 | μ += DeltaMeasure { x : ξ, α : 0.0 }; | |
774 | count += 1; | |
775 | }; | |
776 | ||
777 | if !within_tolerances && warn_insertions { | |
778 | // Complain (but continue) if we failed to get within tolerances | |
779 | // by inserting more points. | |
780 | let err = format!("Maximum insertions reached without achieving \ | |
781 | subproblem solution tolerance"); | |
782 | println!("{}", err.red()); | |
783 | } | |
784 | ||
785 | // Merge spikes | |
786 | if state.iteration() % config.merge_every == 0 { | |
787 | let n_before_merge = μ.len(); | |
788 | μ.merge_spikes(config.merging, |μ_candidate| { | |
789 | //println!("Merge attempt!"); | |
790 | let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ_candidate, &μ_base)); | |
791 | ||
792 | if merge_target_bounds.superset(&d.bounds()) { | |
793 | //println!("…Early Ok"); | |
794 | return Some(()) | |
795 | } | |
796 | ||
797 | let d_min_supp = μ_candidate.iter_spikes().filter_map(|&DeltaMeasure{ α, ref x }| { | |
798 | (α != 0.0).then(|| d.apply(x)) | |
799 | }).reduce(F::min); | |
800 | ||
801 | if d_min_supp.map_or(true, |b| b >= merge_target_bounds.lower()) && | |
802 | d.has_upper_bound(merge_target_bounds.upper(), refinement_tolerance, | |
803 | config.refinement.max_steps) { | |
804 | //println!("…Ok"); | |
805 | Some(()) | |
806 | } else { | |
807 | //println!("…Fail"); | |
808 | None | |
809 | } | |
810 | }); | |
811 | debug_assert!(μ.len() >= n_before_merge); | |
812 | merged += μ.len() - n_before_merge; | |
813 | } | |
814 | ||
815 | let n_before_prune = μ.len(); | |
816 | (residual, τ) = match specialisation.update(&mut μ, &μ_base) { | |
817 | (r, None) => (r, τ), | |
818 | (r, Some(new_τ)) => (r, new_τ) | |
819 | }; | |
820 | debug_assert!(μ.len() <= n_before_prune); | |
821 | pruned += n_before_prune - μ.len(); | |
822 | ||
823 | this_iters += 1; | |
824 | ||
825 | // Give function value if needed | |
826 | state.if_verbose(|| { | |
827 | let value_μ = specialisation.value_μ(&μ); | |
828 | // Plot if so requested | |
829 | plotter.plot_spikes( | |
830 | format!("iter {} end; {}", state.iteration(), within_tolerances), &d, | |
831 | "start".to_string(), Some(&minus_τv), | |
832 | Some(target_bounds), value_μ, | |
833 | ); | |
834 | // Calculate mean inner iterations and reset relevant counters | |
835 | // Return the statistics | |
836 | let res = IterInfo { | |
837 | value : specialisation.calculate_fit(&μ, &residual) + α * value_μ.norm(Radon), | |
838 | n_spikes : value_μ.len(), | |
839 | inner_iters, | |
840 | this_iters, | |
841 | merged, | |
842 | pruned, | |
843 | ε : ε_prev, | |
844 | maybe_ε1 : Some(ε1), | |
845 | postprocessing: config.postprocessing.then(|| value_μ.clone()), | |
846 | }; | |
847 | inner_iters = 0; | |
848 | this_iters = 0; | |
849 | merged = 0; | |
850 | pruned = 0; | |
851 | res | |
852 | }) | |
853 | }); | |
854 | ||
855 | specialisation.postprocess(μ, config.final_merging) | |
856 | } | |
857 | ||
858 | ||
859 | ||
860 |