Clean up / remove various unused FB algorithm family hacks.

Fri, 02 Dec 2022 18:14:03 +0200

author
Tuomo Valkonen <tuomov@iki.fi>
date
Fri, 02 Dec 2022 18:14:03 +0200
changeset 8
ea3ca78873e8
parent 7
c32171f7cce5
child 9
21b0e537ac0e

Clean up / remove various unused FB algorithm family hacks.

src/fb.rs file | annotate | diff | comparison | revisions
src/frank_wolfe.rs file | annotate | diff | comparison | revisions
src/types.rs file | annotate | diff | comparison | revisions
--- a/src/fb.rs	Fri Dec 02 18:08:40 2022 +0200
+++ b/src/fb.rs	Fri Dec 02 18:14:03 2022 +0200
@@ -531,7 +531,7 @@
     // We multiply tolerance by τ for FB since
     // our subproblems depending on tolerances are scaled by τ compared to the conditional
     // gradient approach.
-    let mut tolerance = config.tolerance * τ * α;
+    let tolerance = config.tolerance * τ * α;
     let mut ε = tolerance.initial();
 
     // Initialise operators
@@ -569,28 +569,12 @@
     iterator.iterate(|state| {
         // Calculate subproblem tolerances, and update main tolerance for next iteration
         let τα = τ * α;
-        // if μ.len() == 0 /*state.iteration() == 1*/ {
-        //     let t = minus_τv.bounds().upper() * 0.001;
-        //     if t > 0.0 {
-        //         let (ξ, v_ξ) = minus_τv.maximise(t, config.refinement.max_steps);
-        //         if τα + ε > v_ξ && v_ξ > τα {
-        //             // The zero measure is already within bounds, so improve them
-        //             tolerance = config.tolerance * (v_ξ - τα);
-        //             ε = tolerance.initial();
-        //         }
-        //         μ += DeltaMeasure { x : ξ, α : 0.0 };
-        //     } else {
-        //         // Zero is the solution.
-        //         return Step::Terminated
-        //     }
-        // }
         let target_bounds = Bounds(τα - ε,  τα + ε);
         let merge_tolerance = config.merge_tolerance_mult * ε;
         let merge_target_bounds = Bounds(τα - merge_tolerance,  τα + merge_tolerance);
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let maximise_above = τα + ε * config.insertion_cutoff_factor;
-        let mut ε1 = ε;
         let ε_prev = ε;
         ε = tolerance.update(ε, state.iteration());
 
@@ -677,38 +661,14 @@
             // We do not need to check lower bounds, as a solution of the finite-dimensional
             // subproblem should always satisfy them.
 
-            // // Find the mimimum over the support of μ.
-            // let d_min_supp = d_max;μ.iter_spikes().filter_map(|&DeltaMeasure{ α, ref x }| {
-            //    (α != F::ZERO).then(|| d.value(x))
-            // }).reduce(F::min).unwrap_or(0.0);
-
-            let (ξ, v_ξ) = if false /* μ.len() == 0*/ /*count == 0 &&*/ {
-                // If μ has no spikes, just find the maximum of d. Then adjust the tolerance, if
-                // necessary, to adapt it to the problem.
-                let (ξ, v_ξ) = d.maximise(refinement_tolerance, config.refinement.max_steps);
-                //dbg!((τα, v_ξ, target_bounds.upper(), maximise_above));
-                if τα < v_ξ  && v_ξ < target_bounds.upper() {
-                    ε1 = v_ξ - τα;
-                    ε *= ε1 / ε_prev;
-                    tolerance *= ε1 / ε_prev;
-                }
-                (ξ, v_ξ)
-            } else {
-                // If μ has some spikes, only find a maximum of d if it is above a threshold
-                // defined by the refinment tolerance.
-                match d.maximise_above(maximise_above, refinement_tolerance,
-                                    config.refinement.max_steps) {
-                    None => break 'insertion (true, d),
-                    Some(res) => res,
-                }
+            // If μ has some spikes, only find a maximum of d if it is above a threshold
+            // defined by the refinment tolerance.
+            let (ξ, v_ξ) =  match d.maximise_above(maximise_above, refinement_tolerance,
+                                                   config.refinement.max_steps) {
+                None => break 'insertion (true, d),
+                Some(res) => res,
             };
 
-            // // Do a one final check whether we can stop already without inserting more points
-            // // because `d` actually in bounds based on a more refined estimate.
-            // if may_break && target_bounds.upper() >= v_ξ {
-            //     break (true, d)
-            // }
-
             // Break if maximum insertion count reached
             if count >= max_insertions {
                 let in_bounds2 = target_bounds.upper() >= v_ξ;
@@ -732,11 +692,9 @@
         if state.iteration() % config.merge_every == 0 {
             let n_before_merge = μ.len();
             μ.merge_spikes(config.merging, |μ_candidate| {
-                //println!("Merge attempt!");
                 let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ_candidate, &μ_base));
 
                 if merge_target_bounds.superset(&d.bounds()) {
-                    //println!("…Early Ok");
                     return Some(())
                 }
 
@@ -747,10 +705,8 @@
                 if d_min_supp.map_or(true, |b| b >= merge_target_bounds.lower()) &&
                 d.has_upper_bound(merge_target_bounds.upper(), refinement_tolerance,
                                     config.refinement.max_steps) {
-                    //println!("…Ok");
                     Some(())
                 } else {
-                    //println!("…Fail");
                     None
                 }
             });
@@ -777,7 +733,7 @@
                 "start".to_string(), Some(&minus_τv),
                 Some(target_bounds), value_μ,
             );
-            // Calculate mean inner iterations and reset relevant counters
+            // Calculate mean inner iterations and reset relevant counters.
             // Return the statistics
             let res = IterInfo {
                 value : specialisation.calculate_fit(&μ, &residual) + α * value_μ.norm(Radon),
@@ -787,7 +743,6 @@
                 merged,
                 pruned,
                 ε : ε_prev,
-                maybe_ε1 : Some(ε1),
                 postprocessing: config.postprocessing.then(|| value_μ.clone()),
             };
             inner_iters = 0;
--- a/src/frank_wolfe.rs	Fri Dec 02 18:08:40 2022 +0200
+++ b/src/frank_wolfe.rs	Fri Dec 02 18:14:03 2022 +0200
@@ -313,7 +313,6 @@
                 merged,
                 pruned,
                 ε : ε_prev,
-                maybe_ε1 : None,
                 postprocessing : None,
             };
             inner_iters = 0;
--- a/src/types.rs	Fri Dec 02 18:08:40 2022 +0200
+++ b/src/types.rs	Fri Dec 02 18:14:03 2022 +0200
@@ -39,22 +39,15 @@
     pub inner_iters : usize,
     /// Current tolerance
     pub ε : F,
-    /// Strict tolerance update if one was used
-    pub maybe_ε1 : Option<F>,
     /// Solve fin.dim problem for this measure to get the optimal `value`.
     pub postprocessing : Option<DiscreteMeasure<Loc<F, N>, F>>,
 }
 
 impl<F, const N : usize> LogRepr for IterInfo<F, N> where F : LogRepr + Float {
     fn logrepr(&self) -> ColoredString {
-        let eqsign = match self.maybe_ε1 {
-            Some(ε1) if ε1 < self.ε => '≛',
-            _ => '=',
-        };
-        format!("{}\t| N = {}, ε {} {:.8}, inner_iters_mean = {}, merged+pruned_mean = {}+{}",
+        format!("{}\t| N = {}, ε = {:.8}, inner_iters_mean = {}, merged+pruned_mean = {}+{}",
                 self.value.logrepr(),
                 self.n_spikes,
-                eqsign,
                 self.ε,
                 self.inner_iters as float / self.this_iters as float,
                 self.merged as float / self.this_iters as float,

mercurial