Tue, 06 Dec 2022 15:02:49 +0200
Oops, some μ were x in the README.
0 | 1 | /*! |
2 | Solver for the point source localisation problem using a forward-backward splitting method. | |
3 | ||
4 | This corresponds to the manuscript | |
5 | ||
6 | * Valkonen T. - _Proximal methods for point source localisation_. ARXIV TO INSERT. | |
7 | ||
8 | The main routine is [`pointsource_fb`]. It is based on [`generic_pointsource_fb`], which is also | |
9 | used by our [primal-dual proximal splitting][crate::pdps] implementation. | |
10 | ||
11 | FISTA-type inertia can also be enabled through [`FBConfig::meta`]. | |
12 | ||
13 | ## Problem | |
14 | ||
15 | <p> | |
16 | Our objective is to solve | |
17 | $$ | |
18 | \min_{μ ∈ ℳ(Ω)}~ F_0(Aμ-b) + α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(μ), | |
19 | $$ | |
20 | where $F_0(y)=\frac{1}{2}\|y\|_2^2$ and the forward operator $A \in 𝕃(ℳ(Ω); ℝ^n)$. | |
21 | </p> | |
22 | ||
23 | ## Approach | |
24 | ||
25 | <p> | |
26 | As documented in more detail in the paper, on each step we approximately solve | |
27 | $$ | |
28 | \min_{μ ∈ ℳ(Ω)}~ F(x) + α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(x) + \frac{1}{2}\|μ-μ^k|_𝒟^2, | |
29 | $$ | |
30 | where $𝒟: 𝕃(ℳ(Ω); C_c(Ω))$ is typically a convolution operator. | |
31 | </p> | |
32 | ||
33 | ## Finite-dimensional subproblems. | |
34 | ||
35 | With $C$ a projection from [`DiscreteMeasure`] to the weights, and $x^k$ such that $x^k=Cμ^k$, we | |
36 | form the discretised linearised inner problem | |
37 | <p> | |
38 | $$ | |
39 | \min_{x ∈ ℝ^n}~ τ\bigl(F(Cx^k) + [C^*∇F(Cx^k)]^⊤(x-x^k) + α {\vec 1}^⊤ x\bigr) | |
40 | + δ_{≥ 0}(x) + \frac{1}{2}\|x-x^k\|_{C^*𝒟C}^2, | |
41 | $$ | |
42 | equivalently | |
43 | $$ | |
44 | \begin{aligned} | |
45 | \min_x~ & τF(Cx^k) - τ[C^*∇F(Cx^k)]^⊤x^k + \frac{1}{2} (x^k)^⊤ C^*𝒟C x^k | |
46 | \\ | |
47 | & | |
48 | - [C^*𝒟C x^k - τC^*∇F(Cx^k)]^⊤ x | |
49 | \\ | |
50 | & | |
51 | + \frac{1}{2} x^⊤ C^*𝒟C x | |
52 | + τα {\vec 1}^⊤ x + δ_{≥ 0}(x), | |
53 | \end{aligned} | |
54 | $$ | |
55 | In other words, we obtain the quadratic non-negativity constrained problem | |
56 | $$ | |
57 | \min_{x ∈ ℝ^n}~ \frac{1}{2} x^⊤ Ã x - b̃^⊤ x + c + τα {\vec 1}^⊤ x + δ_{≥ 0}(x). | |
58 | $$ | |
59 | where | |
60 | $$ | |
61 | \begin{aligned} | |
62 | Ã & = C^*𝒟C, | |
63 | \\ | |
64 | g̃ & = C^*𝒟C x^k - τ C^*∇F(Cx^k) | |
65 | = C^* 𝒟 μ^k - τ C^*A^*(Aμ^k - b) | |
66 | \\ | |
67 | c & = τ F(Cx^k) - τ[C^*∇F(Cx^k)]^⊤x^k + \frac{1}{2} (x^k)^⊤ C^*𝒟C x^k | |
68 | \\ | |
69 | & | |
70 | = \frac{τ}{2} \|Aμ^k-b\|^2 - τ[Aμ^k-b]^⊤Aμ^k + \frac{1}{2} \|μ_k\|_{𝒟}^2 | |
71 | \\ | |
72 | & | |
73 | = -\frac{τ}{2} \|Aμ^k-b\|^2 + τ[Aμ^k-b]^⊤ b + \frac{1}{2} \|μ_k\|_{𝒟}^2. | |
74 | \end{aligned} | |
75 | $$ | |
76 | </p> | |
77 | ||
78 | We solve this with either SSN or FB via [`quadratic_nonneg`] as determined by | |
79 | [`InnerSettings`] in [`FBGenericConfig::inner`]. | |
80 | */ | |
81 | ||
82 | use numeric_literals::replace_float_literals; | |
83 | use serde::{Serialize, Deserialize}; | |
84 | use colored::Colorize; | |
85 | use nalgebra::DVector; | |
86 | ||
87 | use alg_tools::iterate::{ | |
88 | AlgIteratorFactory, | |
89 | AlgIteratorState, | |
90 | }; | |
91 | use alg_tools::euclidean::Euclidean; | |
92 | use alg_tools::norms::Norm; | |
93 | use alg_tools::linops::Apply; | |
94 | use alg_tools::sets::Cube; | |
95 | use alg_tools::loc::Loc; | |
96 | use alg_tools::bisection_tree::{ | |
97 | BTFN, | |
98 | PreBTFN, | |
99 | Bounds, | |
100 | BTNodeLookup, | |
101 | BTNode, | |
102 | BTSearch, | |
103 | P2Minimise, | |
104 | SupportGenerator, | |
105 | LocalAnalysis, | |
106 | Bounded, | |
107 | }; | |
108 | use alg_tools::mapping::RealMapping; | |
109 | use alg_tools::nalgebra_support::ToNalgebraRealField; | |
110 | ||
111 | use crate::types::*; | |
112 | use crate::measures::{ | |
113 | DiscreteMeasure, | |
114 | DeltaMeasure, | |
115 | Radon | |
116 | }; | |
117 | use crate::measures::merging::{ | |
118 | SpikeMergingMethod, | |
119 | SpikeMerging, | |
120 | }; | |
121 | use crate::forward_model::ForwardModel; | |
122 | use crate::seminorms::{ | |
123 | DiscreteMeasureOp, Lipschitz | |
124 | }; | |
125 | use crate::subproblem::{ | |
126 | quadratic_nonneg, | |
127 | InnerSettings, | |
128 | InnerMethod, | |
129 | }; | |
130 | use crate::tolerance::Tolerance; | |
131 | use crate::plot::{ | |
132 | SeqPlotter, | |
133 | Plotting, | |
134 | PlotLookup | |
135 | }; | |
136 | ||
137 | /// Method for constructing $μ$ on each iteration | |
138 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
139 | #[allow(dead_code)] | |
140 | pub enum InsertionStyle { | |
141 | /// Resuse previous $μ$ from previous iteration, optimising weights | |
142 | /// before inserting new spikes. | |
143 | Reuse, | |
144 | /// Start each iteration with $μ=0$. | |
145 | Zero, | |
146 | } | |
147 | ||
148 | /// Meta-algorithm type | |
149 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
150 | #[allow(dead_code)] | |
151 | pub enum FBMetaAlgorithm { | |
152 | /// No meta-algorithm | |
153 | None, | |
154 | /// FISTA-style inertia | |
155 | InertiaFISTA, | |
156 | } | |
157 | ||
158 | /// Settings for [`pointsource_fb`]. | |
159 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
160 | #[serde(default)] | |
161 | pub struct FBConfig<F : Float> { | |
162 | /// Step length scaling | |
163 | pub τ0 : F, | |
164 | /// Meta-algorithm to apply | |
165 | pub meta : FBMetaAlgorithm, | |
166 | /// Generic parameters | |
167 | pub insertion : FBGenericConfig<F>, | |
168 | } | |
169 | ||
170 | /// Settings for the solution of the stepwise optimality condition in algorithms based on | |
171 | /// [`generic_pointsource_fb`]. | |
172 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
173 | #[serde(default)] | |
174 | pub struct FBGenericConfig<F : Float> { | |
175 | /// Method for constructing $μ$ on each iteration; see [`InsertionStyle`]. | |
176 | pub insertion_style : InsertionStyle, | |
177 | /// Tolerance for point insertion. | |
178 | pub tolerance : Tolerance<F>, | |
179 | /// Stop looking for predual maximum (where to isert a new point) below | |
180 | /// `tolerance` multiplied by this factor. | |
181 | pub insertion_cutoff_factor : F, | |
182 | /// Settings for branch and bound refinement when looking for predual maxima | |
183 | pub refinement : RefinementSettings<F>, | |
184 | /// Maximum insertions within each outer iteration | |
185 | pub max_insertions : usize, | |
186 | /// Pair `(n, m)` for maximum insertions `m` on first `n` iterations. | |
187 | pub bootstrap_insertions : Option<(usize, usize)>, | |
188 | /// Inner method settings | |
189 | pub inner : InnerSettings<F>, | |
190 | /// Spike merging method | |
191 | pub merging : SpikeMergingMethod<F>, | |
192 | /// Tolerance multiplier for merges | |
193 | pub merge_tolerance_mult : F, | |
194 | /// Spike merging method after the last step | |
195 | pub final_merging : SpikeMergingMethod<F>, | |
196 | /// Iterations between merging heuristic tries | |
197 | pub merge_every : usize, | |
198 | /// Save $μ$ for postprocessing optimisation | |
199 | pub postprocessing : bool | |
200 | } | |
201 | ||
202 | #[replace_float_literals(F::cast_from(literal))] | |
203 | impl<F : Float> Default for FBConfig<F> { | |
204 | fn default() -> Self { | |
205 | FBConfig { | |
206 | τ0 : 0.99, | |
207 | meta : FBMetaAlgorithm::None, | |
208 | insertion : Default::default() | |
209 | } | |
210 | } | |
211 | } | |
212 | ||
213 | #[replace_float_literals(F::cast_from(literal))] | |
214 | impl<F : Float> Default for FBGenericConfig<F> { | |
215 | fn default() -> Self { | |
216 | FBGenericConfig { | |
217 | insertion_style : InsertionStyle::Reuse, | |
218 | tolerance : Default::default(), | |
219 | insertion_cutoff_factor : 1.0, | |
220 | refinement : Default::default(), | |
221 | max_insertions : 100, | |
222 | //bootstrap_insertions : None, | |
223 | bootstrap_insertions : Some((10, 1)), | |
224 | inner : InnerSettings { | |
225 | method : InnerMethod::SSN, | |
226 | .. Default::default() | |
227 | }, | |
228 | merging : SpikeMergingMethod::None, | |
229 | //merging : Default::default(), | |
230 | final_merging : Default::default(), | |
231 | merge_every : 10, | |
232 | merge_tolerance_mult : 2.0, | |
233 | postprocessing : false, | |
234 | } | |
235 | } | |
236 | } | |
237 | ||
238 | /// Trait for specialisation of [`generic_pointsource_fb`] to basic FB, FISTA. | |
239 | /// | |
240 | /// The idea is that the residual $Aμ - b$ in the forward step can be replaced by an arbitrary | |
241 | /// value. For example, to implement [primal-dual proximal splitting][crate::pdps] we replace it | |
242 | /// with the dual variable $y$. We can then also implement alternative data terms, as the | |
243 | /// (pre)differential of $F(μ)=F\_0(Aμ-b)$ is $F\'(μ) = A\_*F\_0\'(Aμ-b)$. In the case of the | |
244 | /// quadratic fidelity $F_0(y)=\frac{1}{2}\\|y\\|_2^2$ in a Hilbert space, of course, | |
245 | /// $F\_0\'(Aμ-b)=Aμ-b$ is the residual. | |
246 | pub trait FBSpecialisation<F : Float, Observable : Euclidean<F>, const N : usize> : Sized { | |
247 | /// Updates the residual and does any necessary pruning of `μ`. | |
248 | /// | |
249 | /// Returns the new residual and possibly a new step length. | |
250 | /// | |
251 | /// The measure `μ` may also be modified to apply, e.g., inertia to it. | |
252 | /// The updated residual should correspond to the residual at `μ`. | |
253 | /// See the [trait documentation][FBSpecialisation] for the use and meaning of the residual. | |
254 | /// | |
255 | /// The parameter `μ_base` is the base point of the iteration, typically the previous iterate, | |
256 | /// but for, e.g., FISTA has inertia applied to it. | |
257 | fn update( | |
258 | &mut self, | |
259 | μ : &mut DiscreteMeasure<Loc<F, N>, F>, | |
260 | μ_base : &DiscreteMeasure<Loc<F, N>, F>, | |
261 | ) -> (Observable, Option<F>); | |
262 | ||
263 | /// Calculates the data term value corresponding to iterate `μ` and available residual. | |
264 | /// | |
265 | /// Inertia and other modifications, as deemed, necessary, should be applied to `μ`. | |
266 | /// | |
267 | /// The blanket implementation correspondsn to the 2-norm-squared data fidelity | |
268 | /// $\\|\text{residual}\\|\_2^2/2$. | |
269 | fn calculate_fit( | |
270 | &self, | |
271 | _μ : &DiscreteMeasure<Loc<F, N>, F>, | |
272 | residual : &Observable | |
273 | ) -> F { | |
274 | residual.norm2_squared_div2() | |
275 | } | |
276 | ||
277 | /// Calculates the data term value at $μ$. | |
278 | /// | |
279 | /// Unlike [`Self::calculate_fit`], no inertia, etc., should be applied to `μ`. | |
280 | fn calculate_fit_simple( | |
281 | &self, | |
282 | μ : &DiscreteMeasure<Loc<F, N>, F>, | |
283 | ) -> F; | |
284 | ||
285 | /// Returns the final iterate after any necessary postprocess pruning, merging, etc. | |
286 | fn postprocess(self, mut μ : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>) | |
287 | -> DiscreteMeasure<Loc<F, N>, F> | |
288 | where DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
289 | μ.merge_spikes_fitness(merging, | |
290 | |μ̃| self.calculate_fit_simple(μ̃), | |
291 | |&v| v); | |
292 | μ.prune(); | |
293 | μ | |
294 | } | |
295 | ||
296 | /// Returns measure to be used for value calculations, which may differ from μ. | |
297 | fn value_μ<'c, 'b : 'c>(&'b self, μ : &'c DiscreteMeasure<Loc<F, N>, F>) | |
298 | -> &'c DiscreteMeasure<Loc<F, N>, F> { | |
299 | μ | |
300 | } | |
301 | } | |
302 | ||
303 | /// Specialisation of [`generic_pointsource_fb`] to basic μFB. | |
304 | struct BasicFB< | |
305 | 'a, | |
306 | F : Float + ToNalgebraRealField, | |
307 | A : ForwardModel<Loc<F, N>, F>, | |
308 | const N : usize | |
309 | > { | |
310 | /// The data | |
311 | b : &'a A::Observable, | |
312 | /// The forward operator | |
313 | opA : &'a A, | |
314 | } | |
315 | ||
316 | /// Implementation of [`FBSpecialisation`] for basic μFB forward-backward splitting. | |
317 | #[replace_float_literals(F::cast_from(literal))] | |
318 | impl<'a, F : Float + ToNalgebraRealField , A : ForwardModel<Loc<F, N>, F>, const N : usize> | |
319 | FBSpecialisation<F, A::Observable, N> for BasicFB<'a, F, A, N> { | |
320 | fn update( | |
321 | &mut self, | |
322 | μ : &mut DiscreteMeasure<Loc<F, N>, F>, | |
323 | _μ_base : &DiscreteMeasure<Loc<F, N>, F> | |
324 | ) -> (A::Observable, Option<F>) { | |
325 | μ.prune(); | |
326 | //*residual = self.opA.apply(μ) - self.b; | |
327 | let mut residual = self.b.clone(); | |
328 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
329 | (residual, None) | |
330 | } | |
331 | ||
332 | fn calculate_fit_simple( | |
333 | &self, | |
334 | μ : &DiscreteMeasure<Loc<F, N>, F>, | |
335 | ) -> F { | |
336 | let mut residual = self.b.clone(); | |
337 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
338 | residual.norm2_squared_div2() | |
339 | } | |
340 | } | |
341 | ||
342 | /// Specialisation of [`generic_pointsource_fb`] to FISTA. | |
343 | struct FISTA< | |
344 | 'a, | |
345 | F : Float + ToNalgebraRealField, | |
346 | A : ForwardModel<Loc<F, N>, F>, | |
347 | const N : usize | |
348 | > { | |
349 | /// The data | |
350 | b : &'a A::Observable, | |
351 | /// The forward operator | |
352 | opA : &'a A, | |
353 | /// Current inertial parameter | |
354 | λ : F, | |
355 | /// Previous iterate without inertia applied. | |
356 | /// We need to store this here because `μ_base` passed to [`FBSpecialisation::update`] will | |
357 | /// have inertia applied to it, so is not useful to use. | |
358 | μ_prev : DiscreteMeasure<Loc<F, N>, F>, | |
359 | } | |
360 | ||
361 | /// Implementation of [`FBSpecialisation`] for μFISTA inertial forward-backward splitting. | |
362 | #[replace_float_literals(F::cast_from(literal))] | |
363 | impl<'a, F : Float + ToNalgebraRealField, A : ForwardModel<Loc<F, N>, F>, const N : usize> | |
364 | FBSpecialisation<F, A::Observable, N> for FISTA<'a, F, A, N> { | |
365 | fn update( | |
366 | &mut self, | |
367 | μ : &mut DiscreteMeasure<Loc<F, N>, F>, | |
368 | _μ_base : &DiscreteMeasure<Loc<F, N>, F> | |
369 | ) -> (A::Observable, Option<F>) { | |
370 | // Update inertial parameters | |
371 | let λ_prev = self.λ; | |
372 | self.λ = 2.0 * λ_prev / ( λ_prev + (4.0 + λ_prev * λ_prev).sqrt() ); | |
373 | let θ = self.λ / λ_prev - self.λ; | |
374 | // Perform inertial update on μ. | |
375 | // This computes μ ← (1 + θ) * μ - θ * μ_prev, pruning spikes where both μ | |
376 | // and μ_prev have zero weight. Since both have weights from the finite-dimensional | |
377 | // subproblem with a proximal projection step, this is likely to happen when the | |
378 | // spike is not needed. A copy of the pruned μ without artithmetic performed is | |
379 | // stored in μ_prev. | |
380 | μ.pruning_sub(1.0 + θ, θ, &mut self.μ_prev); | |
381 | ||
382 | //*residual = self.opA.apply(μ) - self.b; | |
383 | let mut residual = self.b.clone(); | |
384 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
385 | (residual, None) | |
386 | } | |
387 | ||
388 | fn calculate_fit_simple( | |
389 | &self, | |
390 | μ : &DiscreteMeasure<Loc<F, N>, F>, | |
391 | ) -> F { | |
392 | let mut residual = self.b.clone(); | |
393 | self.opA.gemv(&mut residual, 1.0, μ, -1.0); | |
394 | residual.norm2_squared_div2() | |
395 | } | |
396 | ||
397 | fn calculate_fit( | |
398 | &self, | |
399 | _μ : &DiscreteMeasure<Loc<F, N>, F>, | |
400 | _residual : &A::Observable | |
401 | ) -> F { | |
402 | self.calculate_fit_simple(&self.μ_prev) | |
403 | } | |
404 | ||
405 | // For FISTA we need to do a final pruning as well, due to the limited | |
406 | // pruning that can be done on each step. | |
407 | fn postprocess(mut self, μ_base : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>) | |
408 | -> DiscreteMeasure<Loc<F, N>, F> | |
409 | where DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
410 | let mut μ = self.μ_prev; | |
411 | self.μ_prev = μ_base; | |
412 | μ.merge_spikes_fitness(merging, | |
413 | |μ̃| self.calculate_fit_simple(μ̃), | |
414 | |&v| v); | |
415 | μ.prune(); | |
416 | μ | |
417 | } | |
418 | ||
419 | fn value_μ<'c, 'b : 'c>(&'c self, _μ : &'c DiscreteMeasure<Loc<F, N>, F>) | |
420 | -> &'c DiscreteMeasure<Loc<F, N>, F> { | |
421 | &self.μ_prev | |
422 | } | |
423 | } | |
424 | ||
425 | /// Iteratively solve the pointsource localisation problem using forward-backward splitting | |
426 | /// | |
427 | /// The settings in `config` have their [respective documentation](FBConfig). `opA` is the | |
428 | /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight. | |
429 | /// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution | |
430 | /// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control | |
431 | /// as documented in [`alg_tools::iterate`]. | |
432 | /// | |
433 | /// For details on the mathematical formulation, see the [module level](self) documentation. | |
434 | /// | |
435 | /// Returns the final iterate. | |
436 | #[replace_float_literals(F::cast_from(literal))] | |
437 | pub fn pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, const N : usize>( | |
438 | opA : &'a A, | |
439 | b : &A::Observable, | |
440 | α : F, | |
441 | op𝒟 : &'a 𝒟, | |
442 | config : &FBConfig<F>, | |
443 | iterator : I, | |
444 | plotter : SeqPlotter<F, N> | |
445 | ) -> DiscreteMeasure<Loc<F, N>, F> | |
446 | where F : Float + ToNalgebraRealField, | |
447 | I : AlgIteratorFactory<IterInfo<F, N>>, | |
448 | for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>, | |
449 | //+ std::ops::Mul<F, Output=A::Observable>, <-- FIXME: compiler overflow | |
450 | A::Observable : std::ops::MulAssign<F>, | |
451 | GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone, | |
452 | A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>> | |
453 | + Lipschitz<𝒟, FloatType=F>, | |
454 | BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>, | |
455 | G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone, | |
456 | 𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>, | |
457 | 𝒟::Codomain : RealMapping<F, N>, | |
458 | S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
459 | K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
460 | BTNodeLookup: BTNode<F, usize, Bounds<F>, N>, | |
461 | Cube<F, N>: P2Minimise<Loc<F, N>, F>, | |
462 | PlotLookup : Plotting<N>, | |
463 | DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
464 | ||
465 | let initial_residual = -b; | |
466 | let τ = config.τ0/opA.lipschitz_factor(&op𝒟).unwrap(); | |
467 | ||
468 | match config.meta { | |
469 | FBMetaAlgorithm::None => generic_pointsource_fb( | |
470 | opA, α, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual, | |
471 | BasicFB{ b, opA } | |
472 | ), | |
473 | FBMetaAlgorithm::InertiaFISTA => generic_pointsource_fb( | |
474 | opA, α, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual, | |
475 | FISTA{ b, opA, λ : 1.0, μ_prev : DiscreteMeasure::new() } | |
476 | ), | |
477 | } | |
478 | } | |
479 | ||
480 | /// Generic implementation of [`pointsource_fb`]. | |
481 | /// | |
482 | /// The method can be specialised to even primal-dual proximal splitting through the | |
483 | /// [`FBSpecialisation`] parameter `specialisation`. | |
484 | /// The settings in `config` have their [respective documentation](FBGenericConfig). `opA` is the | |
485 | /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight. | |
486 | /// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution | |
487 | /// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control | |
488 | /// as documented in [`alg_tools::iterate`]. | |
489 | /// | |
490 | /// The implementation relies on [`alg_tools::bisection_tree::BTFN`] presentations of | |
491 | /// sums of simple functions usign bisection trees, and the related | |
492 | /// [`alg_tools::bisection_tree::Aggregator`]s, to efficiently search for component functions | |
493 | /// active at a specific points, and to maximise their sums. Through the implementation of the | |
494 | /// [`alg_tools::bisection_tree::BT`] bisection trees, it also relies on the copy-on-write features | |
495 | /// of [`std::sync::Arc`] to only update relevant parts of the bisection tree when adding functions. | |
496 | /// | |
497 | /// Returns the final iterate. | |
498 | #[replace_float_literals(F::cast_from(literal))] | |
499 | pub fn generic_pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, Spec, const N : usize>( | |
500 | opA : &'a A, | |
501 | α : F, | |
502 | op𝒟 : &'a 𝒟, | |
503 | mut τ : F, | |
504 | config : &FBGenericConfig<F>, | |
505 | iterator : I, | |
506 | mut plotter : SeqPlotter<F, N>, | |
507 | mut residual : A::Observable, | |
508 | mut specialisation : Spec, | |
509 | ) -> DiscreteMeasure<Loc<F, N>, F> | |
510 | where F : Float + ToNalgebraRealField, | |
511 | I : AlgIteratorFactory<IterInfo<F, N>>, | |
512 | Spec : FBSpecialisation<F, A::Observable, N>, | |
513 | A::Observable : std::ops::MulAssign<F>, | |
514 | GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone, | |
515 | A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>> | |
516 | + Lipschitz<𝒟, FloatType=F>, | |
517 | BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>, | |
518 | G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone, | |
519 | 𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>, | |
520 | 𝒟::Codomain : RealMapping<F, N>, | |
521 | S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
522 | K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>, | |
523 | BTNodeLookup: BTNode<F, usize, Bounds<F>, N>, | |
524 | Cube<F, N>: P2Minimise<Loc<F, N>, F>, | |
525 | PlotLookup : Plotting<N>, | |
526 | DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> { | |
527 | ||
528 | // Set up parameters | |
529 | let quiet = iterator.is_quiet(); | |
530 | let op𝒟norm = op𝒟.opnorm_bound(); | |
531 | // We multiply tolerance by τ for FB since | |
532 | // our subproblems depending on tolerances are scaled by τ compared to the conditional | |
533 | // gradient approach. | |
8
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
534 | let tolerance = config.tolerance * τ * α; |
0 | 535 | let mut ε = tolerance.initial(); |
536 | ||
537 | // Initialise operators | |
538 | let preadjA = opA.preadjoint(); | |
539 | ||
540 | // Initialise iterates | |
541 | let mut μ = DiscreteMeasure::new(); | |
542 | ||
543 | let mut inner_iters = 0; | |
544 | let mut this_iters = 0; | |
545 | let mut pruned = 0; | |
546 | let mut merged = 0; | |
547 | ||
548 | let μ_diff = |μ_new : &DiscreteMeasure<Loc<F, N>, F>, | |
549 | μ_base : &DiscreteMeasure<Loc<F, N>, F>| { | |
550 | let mut ν : DiscreteMeasure<Loc<F, N>, F> = match config.insertion_style { | |
551 | InsertionStyle::Reuse => { | |
552 | μ_new.iter_spikes() | |
553 | .zip(μ_base.iter_masses().chain(std::iter::repeat(0.0))) | |
554 | .map(|(δ, α_base)| (δ.x, α_base - δ.α)) | |
555 | .collect() | |
556 | }, | |
557 | InsertionStyle::Zero => { | |
558 | μ_new.iter_spikes() | |
559 | .map(|δ| -δ) | |
560 | .chain(μ_base.iter_spikes().copied()) | |
561 | .collect() | |
562 | } | |
563 | }; | |
564 | ν.prune(); // Potential small performance improvement | |
565 | ν | |
566 | }; | |
567 | ||
568 | // Run the algorithm | |
569 | iterator.iterate(|state| { | |
570 | // Calculate subproblem tolerances, and update main tolerance for next iteration | |
571 | let τα = τ * α; | |
572 | let target_bounds = Bounds(τα - ε, τα + ε); | |
573 | let merge_tolerance = config.merge_tolerance_mult * ε; | |
574 | let merge_target_bounds = Bounds(τα - merge_tolerance, τα + merge_tolerance); | |
575 | let inner_tolerance = ε * config.inner.tolerance_mult; | |
576 | let refinement_tolerance = ε * config.refinement.tolerance_mult; | |
577 | let maximise_above = τα + ε * config.insertion_cutoff_factor; | |
578 | let ε_prev = ε; | |
579 | ε = tolerance.update(ε, state.iteration()); | |
580 | ||
581 | // Maximum insertion count and measure difference calculation depend on insertion style. | |
582 | let (m, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) { | |
583 | (i, Some((l, k))) if i <= l => (k, false), | |
584 | _ => (config.max_insertions, !quiet), | |
585 | }; | |
586 | let max_insertions = match config.insertion_style { | |
587 | InsertionStyle::Zero => { | |
588 | todo!("InsertionStyle::Zero does not currently work with FISTA, so diabled."); | |
589 | // let n = μ.len(); | |
590 | // μ = DiscreteMeasure::new(); | |
591 | // n + m | |
592 | }, | |
593 | InsertionStyle::Reuse => m, | |
594 | }; | |
595 | ||
596 | // Calculate smooth part of surrogate model. | |
597 | // Using `std::mem::replace` here is not ideal, and expects that `empty_observable` | |
598 | // has no significant overhead. For some reosn Rust doesn't allow us simply moving | |
599 | // the residual and replacing it below before the end of this closure. | |
7
c32171f7cce5
Remove ergodic tolerance; it's not useful.
Tuomo Valkonen <tuomov@iki.fi>
parents:
0
diff
changeset
|
600 | residual *= -τ; |
0 | 601 | let r = std::mem::replace(&mut residual, opA.empty_observable()); |
602 | let minus_τv = preadjA.apply(r); // minus_τv = -τA^*(Aμ^k-b) | |
603 | // TODO: should avoid a second copy of μ here; μ_base already stores a copy. | |
604 | let ω0 = op𝒟.apply(μ.clone()); // 𝒟μ^k | |
605 | //let g = &minus_τv + ω0; // Linear term of surrogate model | |
606 | ||
607 | // Save current base point | |
608 | let μ_base = μ.clone(); | |
609 | ||
610 | // Add points to support until within error tolerance or maximum insertion count reached. | |
611 | let mut count = 0; | |
612 | let (within_tolerances, d) = 'insertion: loop { | |
613 | if μ.len() > 0 { | |
614 | // Form finite-dimensional subproblem. The subproblem references to the original μ^k | |
615 | // from the beginning of the iteration are all contained in the immutable c and g. | |
616 | let à = op𝒟.findim_matrix(μ.iter_locations()); | |
617 | let g̃ = DVector::from_iterator(μ.len(), | |
618 | μ.iter_locations() | |
619 | .map(|ζ| minus_τv.apply(ζ) + ω0.apply(ζ)) | |
620 | .map(F::to_nalgebra_mixed)); | |
621 | let mut x = μ.masses_dvector(); | |
622 | ||
623 | // The gradient of the forward component of the inner objective is C^*𝒟Cx - g̃. | |
624 | // We have |C^*𝒟Cx|_2 = sup_{|z|_2 ≤ 1} ⟨z, C^*𝒟Cx⟩ = sup_{|z|_2 ≤ 1} ⟨Cz|𝒟Cx⟩ | |
625 | // ≤ sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟Cx|_∞ ≤ sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟| |Cx|_ℳ | |
626 | // ≤ sup_{|z|_2 ≤ 1} |z|_1 |𝒟| |x|_1 ≤ sup_{|z|_2 ≤ 1} n |z|_2 |𝒟| |x|_2 | |
627 | // = n |𝒟| |x|_2, where n is the number of points. Therefore | |
628 | let inner_τ = config.inner.τ0 / (op𝒟norm * F::cast_from(μ.len())); | |
629 | ||
630 | // Solve finite-dimensional subproblem. | |
631 | let inner_it = config.inner.iterator_options.stop_target(inner_tolerance); | |
632 | inner_iters += quadratic_nonneg(config.inner.method, &Ã, &g̃, τ*α, &mut x, | |
633 | inner_τ, inner_it); | |
634 | ||
635 | // Update masses of μ based on solution of finite-dimensional subproblem. | |
636 | μ.set_masses_dvector(&x); | |
637 | } | |
638 | ||
639 | // Form d = ω0 - τv - 𝒟μ = -𝒟(μ - μ^k) - τv for checking the proximate optimality | |
640 | // conditions in the predual space, and finding new points for insertion, if necessary. | |
641 | let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ, &μ_base)); | |
642 | ||
643 | // If no merging heuristic is used, let's be more conservative about spike insertion, | |
644 | // and skip it after first round. If merging is done, being more greedy about spike | |
645 | // insertion also seems to improve performance. | |
646 | let may_break = if let SpikeMergingMethod::None = config.merging { | |
647 | false | |
648 | } else { | |
649 | count > 0 | |
650 | }; | |
651 | ||
652 | // If preliminary check indicates that we are in bonds, and if it otherwise matches | |
653 | // the insertion strategy, skip insertion. | |
7
c32171f7cce5
Remove ergodic tolerance; it's not useful.
Tuomo Valkonen <tuomov@iki.fi>
parents:
0
diff
changeset
|
654 | if may_break && target_bounds.superset(&d.bounds()) { |
0 | 655 | break 'insertion (true, d) |
656 | } | |
657 | ||
658 | // If the rough check didn't indicate stopping, find maximising point, maintaining for | |
659 | // the calculations in the beginning of the loop that v_ξ = (ω0-τv-𝒟μ)(ξ) = d(ξ), | |
660 | // where 𝒟μ is now distinct from μ0 after the insertions already performed. | |
661 | // We do not need to check lower bounds, as a solution of the finite-dimensional | |
662 | // subproblem should always satisfy them. | |
663 | ||
8
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
664 | // If μ has some spikes, only find a maximum of d if it is above a threshold |
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
665 | // defined by the refinment tolerance. |
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
666 | let (ξ, v_ξ) = match d.maximise_above(maximise_above, refinement_tolerance, |
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
667 | config.refinement.max_steps) { |
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
668 | None => break 'insertion (true, d), |
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
669 | Some(res) => res, |
0 | 670 | }; |
671 | ||
672 | // Break if maximum insertion count reached | |
673 | if count >= max_insertions { | |
674 | let in_bounds2 = target_bounds.upper() >= v_ξ; | |
675 | break 'insertion (in_bounds2, d) | |
676 | } | |
677 | ||
678 | // No point in optimising the weight here; the finite-dimensional algorithm is fast. | |
679 | μ += DeltaMeasure { x : ξ, α : 0.0 }; | |
680 | count += 1; | |
681 | }; | |
682 | ||
683 | if !within_tolerances && warn_insertions { | |
684 | // Complain (but continue) if we failed to get within tolerances | |
685 | // by inserting more points. | |
686 | let err = format!("Maximum insertions reached without achieving \ | |
687 | subproblem solution tolerance"); | |
688 | println!("{}", err.red()); | |
689 | } | |
690 | ||
691 | // Merge spikes | |
692 | if state.iteration() % config.merge_every == 0 { | |
693 | let n_before_merge = μ.len(); | |
694 | μ.merge_spikes(config.merging, |μ_candidate| { | |
695 | let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ_candidate, &μ_base)); | |
696 | ||
697 | if merge_target_bounds.superset(&d.bounds()) { | |
698 | return Some(()) | |
699 | } | |
700 | ||
701 | let d_min_supp = μ_candidate.iter_spikes().filter_map(|&DeltaMeasure{ α, ref x }| { | |
702 | (α != 0.0).then(|| d.apply(x)) | |
703 | }).reduce(F::min); | |
704 | ||
705 | if d_min_supp.map_or(true, |b| b >= merge_target_bounds.lower()) && | |
706 | d.has_upper_bound(merge_target_bounds.upper(), refinement_tolerance, | |
707 | config.refinement.max_steps) { | |
708 | Some(()) | |
709 | } else { | |
710 | None | |
711 | } | |
712 | }); | |
713 | debug_assert!(μ.len() >= n_before_merge); | |
714 | merged += μ.len() - n_before_merge; | |
715 | } | |
716 | ||
717 | let n_before_prune = μ.len(); | |
718 | (residual, τ) = match specialisation.update(&mut μ, &μ_base) { | |
719 | (r, None) => (r, τ), | |
720 | (r, Some(new_τ)) => (r, new_τ) | |
721 | }; | |
722 | debug_assert!(μ.len() <= n_before_prune); | |
723 | pruned += n_before_prune - μ.len(); | |
724 | ||
725 | this_iters += 1; | |
726 | ||
727 | // Give function value if needed | |
728 | state.if_verbose(|| { | |
729 | let value_μ = specialisation.value_μ(&μ); | |
730 | // Plot if so requested | |
731 | plotter.plot_spikes( | |
732 | format!("iter {} end; {}", state.iteration(), within_tolerances), &d, | |
733 | "start".to_string(), Some(&minus_τv), | |
734 | Some(target_bounds), value_μ, | |
735 | ); | |
8
ea3ca78873e8
Clean up / remove various unused FB algorithm family hacks.
Tuomo Valkonen <tuomov@iki.fi>
parents:
7
diff
changeset
|
736 | // Calculate mean inner iterations and reset relevant counters. |
0 | 737 | // Return the statistics |
738 | let res = IterInfo { | |
739 | value : specialisation.calculate_fit(&μ, &residual) + α * value_μ.norm(Radon), | |
740 | n_spikes : value_μ.len(), | |
741 | inner_iters, | |
742 | this_iters, | |
743 | merged, | |
744 | pruned, | |
745 | ε : ε_prev, | |
746 | postprocessing: config.postprocessing.then(|| value_μ.clone()), | |
747 | }; | |
748 | inner_iters = 0; | |
749 | this_iters = 0; | |
750 | merged = 0; | |
751 | pruned = 0; | |
752 | res | |
753 | }) | |
754 | }); | |
755 | ||
756 | specialisation.postprocess(μ, config.final_merging) | |
757 | } | |
758 | ||
759 | ||
760 | ||
761 |