Tue, 29 Nov 2022 15:36:12 +0200
fubar
0 | 1 | //! Iterative algorithms for solving finite-dimensional subproblems. |
2 | ||
3 | use serde::{Serialize, Deserialize}; | |
4 | use nalgebra::{DVector, DMatrix}; | |
5 | use numeric_literals::replace_float_literals; | |
6 | use itertools::{izip, Itertools}; | |
7 | use colored::Colorize; | |
8 | ||
9 | use alg_tools::iter::Mappable; | |
10 | use alg_tools::error::NumericalError; | |
11 | use alg_tools::iterate::{ | |
12 | AlgIteratorFactory, | |
13 | AlgIteratorState, | |
14 | AlgIteratorOptions, | |
15 | Verbose, | |
16 | Step, | |
17 | }; | |
2 | 18 | use alg_tools::linops::{GEMV, Adjointable, AXPY}; |
0 | 19 | use alg_tools::nalgebra_support::ToNalgebraRealField; |
2 | 20 | use alg_tools::norms::{Projection, Linfinity}; |
21 | use alg_tools::euclidean::Euclidean; | |
0 | 22 | |
23 | use crate::types::*; | |
24 | ||
25 | /// Method for solving finite-dimensional subproblems | |
26 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
27 | #[allow(dead_code)] | |
28 | pub enum InnerMethod { | |
29 | /// Forward-backward | |
30 | FB, | |
31 | /// Semismooth Newton | |
32 | SSN, | |
33 | } | |
34 | ||
35 | /// Settings for the solution of finite-dimensional subproblems | |
36 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
37 | pub struct InnerSettings<F : Float> { | |
38 | /// Method | |
39 | pub method : InnerMethod, | |
40 | /// Proportional step length (∈ [0, 1) for `InnerMethod::FB`). | |
41 | pub τ0 : F, | |
42 | /// Fraction of `tolerance` given to inner algorithm | |
43 | pub tolerance_mult : F, | |
44 | /// Iterator options | |
45 | #[serde(flatten)] | |
46 | pub iterator_options : AlgIteratorOptions, | |
47 | } | |
48 | ||
49 | #[replace_float_literals(F::cast_from(literal))] | |
50 | impl<F : Float> Default for InnerSettings<F> { | |
51 | fn default() -> Self { | |
52 | InnerSettings { | |
53 | τ0 : 0.99, | |
54 | iterator_options : AlgIteratorOptions { | |
55 | // max_iter cannot be very small, as initially FB needs many iterations, although | |
56 | // on later invocations even one or two tends to be enough | |
57 | max_iter : 2000, | |
58 | // verbose_iter affects testing of sufficient convergence, so we set it to | |
59 | // a small value… | |
60 | verbose_iter : Verbose::Every(1), | |
61 | // … but don't print out anything | |
62 | quiet : true, | |
63 | .. Default::default() | |
64 | }, | |
65 | method : InnerMethod::FB, | |
66 | tolerance_mult : 0.01, | |
67 | } | |
68 | } | |
69 | } | |
70 | ||
71 | /// Compute the proximal operator of $x \mapsto x + \delta\_{[0, \infty)}$, i.e., | |
72 | /// the non-negativity contrained soft-thresholding operator. | |
73 | #[inline] | |
74 | #[replace_float_literals(F::cast_from(literal))] | |
75 | fn nonneg_soft_thresholding<F : Float>(v : F, λ : F) -> F { | |
76 | (v - λ).max(0.0) | |
77 | } | |
78 | ||
79 | /// Forward-backward splitting implementation of [`quadratic_nonneg`]. | |
80 | /// For detailed documentation of the inputs and outputs, refer to there. | |
81 | /// | |
82 | /// The `λ` component of the model is handled in the proximal step instead of the gradient step | |
83 | /// for potential performance improvements. | |
84 | #[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())] | |
85 | pub fn quadratic_nonneg_fb<F, I>( | |
86 | mA : &DMatrix<F::MixedType>, | |
87 | g : &DVector<F::MixedType>, | |
88 | //c_ : F, | |
89 | λ_ : F, | |
90 | x : &mut DVector<F::MixedType>, | |
91 | τ_ : F, | |
92 | iterator : I | |
93 | ) -> usize | |
94 | where F : Float + ToNalgebraRealField, | |
95 | I : AlgIteratorFactory<F> | |
96 | { | |
97 | let mut xprev = x.clone(); | |
98 | //let c = c_.to_nalgebra_mixed(); | |
99 | let λ = λ_.to_nalgebra_mixed(); | |
100 | let τ = τ_.to_nalgebra_mixed(); | |
101 | let τλ = τ * λ; | |
102 | let mut v = DVector::zeros(x.len()); | |
103 | let mut iters = 0; | |
104 | ||
2 | 105 | assert!(λ > 0.0); |
106 | ||
0 | 107 | iterator.iterate(|state| { |
108 | // Replace `x` with $x - τ[Ax-g]= [x + τg]- τAx$ | |
109 | v.copy_from(g); // v = g | |
110 | v.axpy(1.0, x, τ); // v = x + τ*g | |
111 | v.sygemv(-τ, mA, x, 1.0); // v = [x + τg]- τAx | |
112 | let backup = state.if_verbose(|| { | |
113 | xprev.copy_from(x) | |
114 | }); | |
115 | // Calculate the proximal map | |
116 | x.iter_mut().zip(v.iter()).for_each(|(x_i, &v_i)| { | |
117 | *x_i = nonneg_soft_thresholding(v_i, τλ); | |
118 | }); | |
119 | ||
120 | iters +=1; | |
121 | ||
122 | backup.map(|_| { | |
123 | // The subdifferential of the objective is $Ax - g + λ + ∂ δ_{≥ 0}(x)$. | |
124 | // We return the minimal ∞-norm over all subderivatives. | |
125 | v.copy_from(g); // d = g | |
126 | mA.gemv(&mut v, 1.0, x, -1.0); // d = Ax - g | |
127 | let mut val = 0.0; | |
128 | for (&v_i, &x_i) in izip!(v.iter(), x.iter()) { | |
129 | let d = v_i + λ; | |
130 | if x_i > 0.0 || d < 0.0 { | |
131 | val = val.max(d.abs()); | |
132 | } | |
133 | } | |
134 | F::from_nalgebra_mixed(val) | |
135 | }) | |
136 | }); | |
137 | ||
138 | iters | |
139 | } | |
140 | ||
141 | /// Semismooth Newton implementation of [`quadratic_nonneg`]. | |
142 | /// | |
143 | /// For detailed documentation of the inputs, refer to there. | |
144 | /// This function returns the number of iterations taken if there was no inversion failure, | |
145 | /// | |
146 | /// ## Method derivation | |
147 | /// | |
148 | /// **The below may look like garbage. Sorry, but rustdoc is obsolete rubbish | |
149 | /// that doesn't directly support by-now standard-in-markdown LaTeX math. Instead it | |
150 | /// forces one into unreliable KaTeX autorender postprocessing andescape hell and that | |
151 | /// it doesn't even process correctly.** | |
152 | /// | |
153 | /// <p> | |
154 | /// For the objective | |
155 | /// $$ | |
156 | /// J(x) = \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + c + δ_{≥ 0}(x), | |
157 | /// $$ | |
158 | /// we have the optimality condition | |
159 | /// $$ | |
160 | /// x - \mathop{\mathrm{prox}}_{τλ{\vec 1}^⊤ + δ_{≥ 0}}(x - τ[Ax-g^⊤]) = 0, | |
161 | /// $$ | |
162 | /// which we write as | |
163 | /// $$ | |
164 | /// x - [G ∘ F](x)=0 | |
165 | /// $$ | |
166 | /// for | |
167 | /// $$ | |
168 | /// G(x) = \mathop{\mathrm{prox}}_{λ{\vec 1}^⊤ + δ_{≥ 0}} | |
169 | /// \quad\text{and}\quad | |
170 | /// F(x) = x - τ Ax + τ g^⊤ | |
171 | /// $$ | |
172 | /// We can use Newton derivative chain rule to compute | |
173 | /// $D_N[G ∘ F](x) = D_N G(F(x)) D_N F(x)$, where | |
174 | /// $D_N F(x) = \mathop{\mathrm{Id}} - τ A$, | |
175 | /// and $[D_N G(F(x))]_i = 1$ for inactive coordinates and $=0$ for active coordinates. | |
176 | /// </p> | |
177 | /// | |
178 | /// <p> | |
179 | /// The method itself involves solving $D_N[Id - G ∘ F](x^k) s^k = - [Id - G ∘ F](x^k)$ and | |
180 | /// updating $x^{k+1} = x^k + s^k$. Consequently | |
181 | /// $$ | |
182 | /// s^k - D_N G(F(x^k)) [s^k - τ As^k] = - x^k + [G ∘ F](x^k) | |
183 | /// $$ | |
184 | /// For $𝒜$ the set of active coordinates and $ℐ$ the set of inactive coordinates, this | |
185 | /// expands as | |
186 | /// $$ | |
187 | /// [τ A_{ℐ × ℐ}]s^k_ℐ = - x^k_ℐ + [G ∘ F](x^k)_ℐ - [τ A_{ℐ × 𝒜}]s^k_𝒜 | |
188 | /// $$ | |
189 | /// and | |
190 | /// $$ | |
191 | /// s^k_𝒜 = - x^k_𝒜 + [G ∘ F](x^k)_𝒜. | |
192 | /// $$ | |
193 | /// Thus on $𝒜$ the update $[x^k + s^k]_𝒜 = [G ∘ F](x^k)_𝒜$ is just the forward-backward update. | |
194 | /// </p> | |
195 | /// | |
196 | /// <p> | |
197 | /// We need to detect stopping by a subdifferential and return $x$ satisfying $x ≥ 0$, | |
198 | /// which is in general not true for the SSN. We therefore use that $[G ∘ F](x^k)$ is a valid | |
199 | /// forward-backward step. | |
200 | /// </p> | |
201 | #[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())] | |
202 | pub fn quadratic_nonneg_ssn<F, I>( | |
203 | mA : &DMatrix<F::MixedType>, | |
204 | g : &DVector<F::MixedType>, | |
205 | //c_ : F, | |
206 | λ_ : F, | |
207 | x : &mut DVector<F::MixedType>, | |
208 | τ_ : F, | |
209 | iterator : I | |
210 | ) -> Result<usize, NumericalError> | |
211 | where F : Float + ToNalgebraRealField, | |
212 | I : AlgIteratorFactory<F> | |
213 | { | |
214 | let n = x.len(); | |
215 | let mut xprev = x.clone(); | |
216 | let mut v = DVector::zeros(n); | |
217 | //let c = c_.to_nalgebra_mixed(); | |
218 | let λ = λ_.to_nalgebra_mixed(); | |
219 | let τ = τ_.to_nalgebra_mixed(); | |
220 | let τλ = τ * λ; | |
221 | let mut inact : Vec<bool> = Vec::from_iter(std::iter::repeat(false).take(n)); | |
222 | let mut s = DVector::zeros(0); | |
223 | let mut decomp = nalgebra::linalg::LU::new(DMatrix::zeros(0, 0)); | |
224 | let mut iters = 0; | |
225 | ||
2 | 226 | assert!(λ > 0.0); |
227 | ||
0 | 228 | let res = iterator.iterate_fallible(|state| { |
229 | // 1. Perform delayed SSN-update based on previously computed step on active | |
230 | // coordinates. The step is delayed to the beginning of the loop because | |
231 | // the SSN step may violate constraints, so we arrange `x` to contain at the | |
232 | // end of the loop the valid FB step that forms part of the SSN step | |
233 | let mut si = s.iter(); | |
234 | for (&ast, x_i, xprev_i) in izip!(inact.iter(), x.iter_mut(), xprev.iter_mut()) { | |
235 | if ast { | |
236 | *x_i = *xprev_i + *si.next().unwrap() | |
237 | } | |
238 | *xprev_i = *x_i; | |
239 | } | |
240 | ||
241 | //xprev.copy_from(x); | |
242 | ||
243 | // 2. Calculate FB step. | |
244 | // 2.1. Replace `x` with $x⁻ - τ[Ax⁻-g]= [x⁻ + τg]- τAx⁻$ | |
245 | x.axpy(τ, g, 1.0); // x = x⁻ + τ*g | |
246 | x.sygemv(-τ, mA, &xprev, 1.0); // x = [x⁻ + τg]- τAx⁻ | |
247 | // 2.2. Calculate prox and set of active coordinates at the same time | |
248 | let mut act_changed = false; | |
249 | let mut n_inact = 0; | |
250 | for (x_i, ast) in izip!(x.iter_mut(), inact.iter_mut()) { | |
251 | if *x_i > τλ { | |
252 | *x_i -= τλ; | |
253 | if !*ast { | |
254 | act_changed = true; | |
255 | *ast = true; | |
256 | } | |
257 | n_inact += 1; | |
258 | } else { | |
259 | *x_i = 0.0; | |
260 | if *ast { | |
261 | act_changed = true; | |
262 | *ast = false; | |
263 | } | |
264 | } | |
265 | } | |
266 | ||
267 | // *** x now contains forward-backward step *** | |
268 | ||
269 | // 3. Solve SSN step `s`. | |
270 | // 3.1 Construct [τ A_{ℐ × ℐ}] if the set of inactive coordinates has changed. | |
271 | if act_changed { | |
272 | let decomp_iter = inact.iter().cartesian_product(inact.iter()).zip(mA.iter()); | |
273 | let decomp_constr = decomp_iter.filter_map(|((&i_inact, &j_inact), &mAij)| { | |
274 | //(i_inact && j_inact).then_some(mAij * τ) | |
275 | (i_inact && j_inact).then_some(mAij) // 🔺 below matches removal of τ | |
276 | }); | |
277 | let mat = DMatrix::from_iterator(n_inact, n_inact, decomp_constr); | |
278 | decomp = nalgebra::linalg::LU::new(mat); | |
279 | } | |
280 | ||
281 | // 3.2 Solve `s` = $s_ℐ^k$ from | |
282 | // $[τ A_{ℐ × ℐ}]s^k_ℐ = - x^k_ℐ + [G ∘ F](x^k)_ℐ - [τ A_{ℐ × 𝒜}]s^k_𝒜$. | |
283 | // With current variable setup we have $[G ∘ F](x^k) = $`x` and $x^k = x⁻$ = `xprev`, | |
284 | // so the system to solve is $[τ A_{ℐ × ℐ}]s^k_ℐ = (x-x⁻)_ℐ - [τ A_{ℐ × 𝒜}](x-x⁻)_𝒜$ | |
285 | // The matrix $[τ A_{ℐ × ℐ}]$ we have already LU-decomposed above into `decomp`. | |
286 | s = if n_inact > 0 { | |
287 | // 3.2.1 Construct `rhs` = $(x-x⁻)_ℐ - [τ A_{ℐ × 𝒜}](x-x⁻)_𝒜$ | |
288 | let inactfilt = inact.iter().copied(); | |
289 | let rhs_iter = izip!(x.iter(), xprev.iter(), mA.row_iter()).filter_zip(inactfilt); | |
290 | let rhs_constr = rhs_iter.map(|(&x_i, &xprev_i, mAi)| { | |
291 | // Calculate row i of [τ A_{ℐ × 𝒜}]s^k_𝒜 = [τ A_{ℐ × 𝒜}](x-xprev)_𝒜 | |
292 | let actfilt = inact.iter().copied().map(std::ops::Not::not); | |
293 | let actit = izip!(x.iter(), xprev.iter(), mAi.iter()).filter_zip(actfilt); | |
294 | let actpart = actit.map(|(&x_j, &xprev_j, &mAij)| { | |
295 | mAij * (x_j - xprev_j) | |
296 | }).sum(); | |
297 | // Subtract it from [x-prev]_i | |
298 | //x_i - xprev_i - τ * actpart | |
299 | (x_i - xprev_i) / τ - actpart // 🔺 change matches removal of τ above | |
300 | }); | |
301 | let mut rhs = DVector::from_iterator(n_inact, rhs_constr); | |
302 | assert_eq!(rhs.len(), n_inact); | |
303 | // Solve the system | |
304 | if !decomp.solve_mut(&mut rhs) { | |
305 | return Step::Failure(NumericalError( | |
306 | "Failed to solve linear system for subproblem SSN." | |
307 | )) | |
308 | } | |
309 | rhs | |
310 | } else { | |
311 | DVector::zeros(0) | |
312 | }; | |
313 | ||
314 | iters += 1; | |
315 | ||
316 | // 4. Report solution quality | |
317 | state.if_verbose(|| { | |
318 | // Calculate subdifferential at the FB step `x` that hasn't yet had `s` yet added. | |
319 | // The subdifferential of the objective is $Ax - g + λ + ∂ δ_{≥ 0}(x)$. | |
320 | // We return the minimal ∞-norm over all subderivatives. | |
321 | v.copy_from(g); // d = g | |
322 | mA.gemv(&mut v, 1.0, x, -1.0); // d = Ax - g | |
323 | let mut val = 0.0; | |
324 | for (&v_i, &x_i) in izip!(v.iter(), x.iter()) { | |
325 | let d = v_i + λ; | |
326 | if x_i > 0.0 || d < 0.0 { | |
327 | val = val.max(d.abs()); | |
328 | } | |
329 | } | |
330 | F::from_nalgebra_mixed(val) | |
331 | }) | |
332 | }); | |
333 | ||
334 | res.map(|_| iters) | |
335 | } | |
336 | ||
337 | /// This function applies an iterative method for the solution of the quadratic non-negativity | |
338 | /// constrained problem | |
339 | /// <div>$$ | |
340 | /// \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + c + δ_{≥ 0}(x). | |
341 | /// $$</div> | |
342 | /// Semismooth Newton or forward-backward are supported based on the setting in `method`. | |
343 | /// The parameter `mA` is matrix $A$, and `g` and `λ` are as in the mathematical formulation. | |
344 | /// The constant $c$ does not need to be provided. The step length parameter is `τ` while | |
345 | /// `x` contains the initial iterate and on return the final one. The `iterator` controls | |
346 | /// stopping. The “verbose” value output by all methods is the $ℓ\_∞$ distance of some | |
347 | /// subdifferential of the objective to zero. | |
348 | /// | |
349 | /// Interior point methods could offer a further alternative, for example, the one in: | |
350 | /// | |
351 | /// * Valkonen T. - _A method for weighted projections to the positive definite | |
352 | /// cone_, <https://doi.org/10.1080/02331934.2014.929680>. | |
353 | /// | |
354 | /// This function returns the number of iterations taken. | |
355 | pub fn quadratic_nonneg<F, I>( | |
356 | method : InnerMethod, | |
357 | mA : &DMatrix<F::MixedType>, | |
358 | g : &DVector<F::MixedType>, | |
359 | //c_ : F, | |
360 | λ : F, | |
361 | x : &mut DVector<F::MixedType>, | |
362 | τ : F, | |
363 | iterator : I | |
364 | ) -> usize | |
365 | where F : Float + ToNalgebraRealField, | |
366 | I : AlgIteratorFactory<F> | |
367 | { | |
368 | ||
369 | match method { | |
370 | InnerMethod::FB => | |
371 | quadratic_nonneg_fb(mA, g, λ, x, τ, iterator), | |
372 | InnerMethod::SSN => | |
373 | quadratic_nonneg_ssn(mA, g, λ, x, τ, iterator).unwrap_or_else(|e| { | |
374 | println!("{}", format!("{e}. Using FB fallback.").red()); | |
375 | let ins = InnerSettings::<F>::default(); | |
376 | quadratic_nonneg_fb(mA, g, λ, x, τ, ins.iterator_options) | |
377 | }) | |
378 | } | |
379 | } | |
2 | 380 | |
381 | /// PDPSimplementation of [`l1_nonneg`]. | |
382 | /// For detailed documentation of the inputs and outputs, refer to there. | |
383 | #[replace_float_literals(F::cast_from(literal))] | |
384 | pub fn l1_nonneg_pdps<F, I, A, Y>( | |
385 | mA : &A, | |
386 | b : &Y, | |
387 | λ : F, | |
388 | x : &mut DVector<F>, | |
389 | τ : F, | |
390 | σ : F, | |
391 | iterator : I | |
392 | ) -> usize | |
393 | where F : Float + ToNalgebraRealField, | |
394 | I : AlgIteratorFactory<F>, | |
395 | Y : Euclidean<F, Output = Y> + Projection<F, Linfinity> + AXPY<F>, | |
396 | A : GEMV<F, DVector<F>, Codomain=Y> | |
397 | + Adjointable<DVector<F>, Y>, | |
398 | for<'a> A::Adjoint<'a> : GEMV<F, Y, Codomain=DVector<F>> | |
399 | { | |
400 | let mAt = mA.adjoint(); | |
401 | let mut xprev = x.clone(); | |
402 | let τλ = τ * λ; | |
403 | let mut v = DVector::zeros(x.len()); | |
404 | let mut y = b.similar_origin(); | |
405 | let mut iters = 0; | |
406 | ||
407 | assert!(λ > 0.0); | |
408 | ||
409 | iterator.iterate(|state| { | |
410 | // Replace `x` with $x - τA^*y | |
411 | xprev.copy_from(x); | |
412 | mAt.gemv(x, -τ, &y, 1.0); | |
413 | // Calculate the proximal map | |
414 | x.iter_mut().for_each(|x_i| *x_i = nonneg_soft_thresholding(*x_i, τλ)); | |
415 | // Calculate v = 2x - xprev | |
416 | v.copy_from(x); | |
417 | v.axpy(-1.0, &xprev, 2.0); | |
418 | // Calculate y = y + σ(A v - b) | |
419 | y.axpy(-σ, b, 1.0); | |
420 | mA.gemv(&mut y, σ, &v, 1.0); | |
421 | y.proj_ball_mut(1.0, Linfinity); | |
422 | ||
423 | iters +=1; | |
424 | ||
425 | state.if_verbose(|| { | |
426 | // The subdifferential of the objective is $A^* ∂\|.\|(Ax - g) + λ + ∂δ_{≥ 0}(x)$. | |
427 | // We return the minimal ∞-norm over all subderivatives. | |
428 | v.copy_from(b); // d = g | |
429 | mA.gemv(&mut ỹ, 1.0, x, -1.0); // d = Ax - b | |
430 | let mut val = 0.0; | |
431 | izip!(ỹ.iter(), x.iter()).for_each(|(&ỹ_i, &x_i)| { | |
432 | use std::cmp::Ordering::*; | |
433 | let tmp = match ỹ_i.partial_cmp(&0.0) { | |
434 | None => F::INFINITY, | |
435 | Some(c) => match (c, x_i > 0.0) { | |
436 | (Greater, true) => 1.0 + λ, | |
437 | (Greater, false) | (Equal, false) => 0.0, | |
438 | (Less, true) => -1.0 + λ, | |
439 | (Less, false) => 0.0.min(-1.0 + λ), | |
440 | (Equal, true) => 0.0.max(-1.0 + λ), // since λ > 0 | |
441 | } | |
442 | }; | |
443 | val = val.max(tmp); | |
444 | }); | |
445 | val | |
446 | }) | |
447 | }); | |
448 | ||
449 | iters | |
450 | } | |
451 | ||
452 | /// Method for L1 weight optimisation | |
453 | #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] | |
454 | #[allow(dead_code)] | |
455 | pub enum L1InnerMethod { | |
456 | /// PDPS | |
457 | PDPS, | |
458 | } | |
459 | ||
460 | /// This function applies an iterative method for the solution of the L1 non-negativity | |
461 | /// constrained problem | |
462 | /// <div>$$ | |
463 | /// \min_{x ∈ ℝ^n} \|Ax - b\|_1 + λ{\vec 1}^⊤ x + δ_{≥ 0}(x). | |
464 | /// $$</div> | |
465 | /// | |
466 | /// This function returns the number of iterations taken. | |
467 | pub fn l1_nonneg<F, I>( | |
468 | method : L1InnerMethod, | |
469 | mA : &DMatrix<F::MixedType>, | |
470 | b : &DVector<F::MixedType>, | |
471 | λ : F, | |
472 | σ : F, | |
473 | x : &mut DVector<F::MixedType>, | |
474 | τ : F, | |
475 | iterator : I | |
476 | ) -> usize | |
477 | where F : Float + ToNalgebraRealField, | |
478 | I : AlgIteratorFactory<F> { | |
479 | ||
480 | match method { | |
481 | L1InnerMethod::PDPS => l1_nonneg_pdps(mA, b, λ, x, τ, σ, iterator) | |
482 | } | |
483 | } |