Update documentation references dev

Mon, 17 Feb 2025 13:51:50 -0500

author
Tuomo Valkonen <tuomov@iki.fi>
date
Mon, 17 Feb 2025 13:51:50 -0500
branch
dev
changeset 51
0693cc9ba9f0
parent 50
39c5e6c7759d
child 52
f0e8704d3f0e

Update documentation references

Also allow Zed to reindent.

src/fb.rs file | annotate | diff | comparison | revisions
src/measures/merging.rs file | annotate | diff | comparison | revisions
src/prox_penalty.rs file | annotate | diff | comparison | revisions
src/regularisation.rs file | annotate | diff | comparison | revisions
--- a/src/fb.rs	Mon Feb 17 13:45:11 2025 -0500
+++ b/src/fb.rs	Mon Feb 17 13:51:50 2025 -0500
@@ -74,69 +74,50 @@
 </p>
 
 We solve this with either SSN or FB as determined by
-[`InnerSettings`] in [`FBGenericConfig::inner`].
+[`crate::subproblem::InnerSettings`] in [`FBGenericConfig::inner`].
 */
 
+use colored::Colorize;
 use numeric_literals::replace_float_literals;
-use serde::{Serialize, Deserialize};
-use colored::Colorize;
+use serde::{Deserialize, Serialize};
 
+use alg_tools::euclidean::Euclidean;
+use alg_tools::instance::Instance;
 use alg_tools::iterate::AlgIteratorFactory;
-use alg_tools::euclidean::Euclidean;
 use alg_tools::linops::{Mapping, GEMV};
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
-use alg_tools::instance::Instance;
 
-use crate::types::*;
-use crate::measures::{
-    DiscreteMeasure,
-    RNDM,
-};
+use crate::dataterm::{calculate_residual, DataTerm, L2Squared};
+use crate::forward_model::{AdjointProductBoundedBy, ForwardModel};
 use crate::measures::merging::SpikeMerging;
-use crate::forward_model::{
-    ForwardModel,
-    AdjointProductBoundedBy,
-};
-use crate::plot::{
-    SeqPlotter,
-    Plotting,
-    PlotLookup
-};
+use crate::measures::{DiscreteMeasure, RNDM};
+use crate::plot::{PlotLookup, Plotting, SeqPlotter};
+pub use crate::prox_penalty::{FBGenericConfig, ProxPenalty};
 use crate::regularisation::RegTerm;
-use crate::dataterm::{
-    calculate_residual,
-    L2Squared,
-    DataTerm,
-};
-pub use crate::prox_penalty::{
-    FBGenericConfig,
-    ProxPenalty
-};
+use crate::types::*;
 
 /// Settings for [`pointsource_fb_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
-pub struct FBConfig<F : Float> {
+pub struct FBConfig<F: Float> {
     /// Step length scaling
-    pub τ0 : F,
+    pub τ0: F,
     /// Generic parameters
-    pub generic : FBGenericConfig<F>,
+    pub generic: FBGenericConfig<F>,
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> Default for FBConfig<F> {
+impl<F: Float> Default for FBConfig<F> {
     fn default() -> Self {
         FBConfig {
-            τ0 : 0.99,
-            generic : Default::default(),
+            τ0: 0.99,
+            generic: Default::default(),
         }
     }
 }
 
-pub(crate) fn prune_with_stats<F : Float, const N : usize>(
-    μ : &mut RNDM<F, N>,
-) -> usize {
+pub(crate) fn prune_with_stats<F: Float, const N: usize>(μ: &mut RNDM<F, N>) -> usize {
     let n_before_prune = μ.len();
     μ.prune();
     debug_assert!(μ.len() <= n_before_prune);
@@ -145,25 +126,27 @@
 
 #[replace_float_literals(F::cast_from(literal))]
 pub(crate) fn postprocess<
-    F : Float,
-    V : Euclidean<F> + Clone,
-    A : GEMV<F, RNDM<F, N>, Codomain = V>,
-    D : DataTerm<F, V, N>,
-    const N : usize
-> (
-    mut μ : RNDM<F, N>,
-    config : &FBGenericConfig<F>,
-    dataterm : D,
-    opA : &A,
-    b : &V,
+    F: Float,
+    V: Euclidean<F> + Clone,
+    A: GEMV<F, RNDM<F, N>, Codomain = V>,
+    D: DataTerm<F, V, N>,
+    const N: usize,
+>(
+    mut μ: RNDM<F, N>,
+    config: &FBGenericConfig<F>,
+    dataterm: D,
+    opA: &A,
+    b: &V,
 ) -> RNDM<F, N>
 where
-    RNDM<F, N> : SpikeMerging<F>,
-    for<'a> &'a RNDM<F, N> : Instance<RNDM<F, N>>,
+    RNDM<F, N>: SpikeMerging<F>,
+    for<'a> &'a RNDM<F, N>: Instance<RNDM<F, N>>,
 {
-    μ.merge_spikes_fitness(config.final_merging_method(),
-                           |μ̃| dataterm.calculate_fit_op(μ̃, opA, b),
-                           |&v| v);
+    μ.merge_spikes_fitness(
+        config.final_merging_method(),
+        |μ̃| dataterm.calculate_fit_op(μ̃, opA, b),
+        |&v| v,
+    );
     μ.prune();
     μ
 }
@@ -187,33 +170,29 @@
 ///
 /// Returns the final iterate.
 #[replace_float_literals(F::cast_from(literal))]
-pub fn pointsource_fb_reg<
-    F, I, A, Reg, P, const N : usize
->(
-    opA : &A,
-    b : &A::Observable,
-    reg : Reg,
-    prox_penalty : &P,
-    fbconfig : &FBConfig<F>,
-    iterator : I,
-    mut plotter : SeqPlotter<F, N>,
+pub fn pointsource_fb_reg<F, I, A, Reg, P, const N: usize>(
+    opA: &A,
+    b: &A::Observable,
+    reg: Reg,
+    prox_penalty: &P,
+    fbconfig: &FBConfig<F>,
+    iterator: I,
+    mut plotter: SeqPlotter<F, N>,
 ) -> RNDM<F, N>
 where
-    F : Float + ToNalgebraRealField,
-    I : AlgIteratorFactory<IterInfo<F, N>>,
-    for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-    A : ForwardModel<RNDM<F, N>, F>
-        + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType=F>,
-    A::PreadjointCodomain : RealMapping<F, N>,
-    PlotLookup : Plotting<N>,
-    RNDM<F, N> : SpikeMerging<F>,
-    Reg : RegTerm<F, N>,
-    P : ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<IterInfo<F, N>>,
+    for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable>,
+    A: ForwardModel<RNDM<F, N>, F> + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType = F>,
+    A::PreadjointCodomain: RealMapping<F, N>,
+    PlotLookup: Plotting<N>,
+    RNDM<F, N>: SpikeMerging<F>,
+    Reg: RegTerm<F, N>,
+    P: ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
 {
-
     // Set up parameters
     let config = &fbconfig.generic;
-    let τ = fbconfig.τ0/opA.adjoint_product_bound(prox_penalty).unwrap();
+    let τ = fbconfig.τ0 / opA.adjoint_product_bound(prox_penalty).unwrap();
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
     let tolerance = config.tolerance * τ * reg.tolerance_scaling();
@@ -224,14 +203,12 @@
     let mut residual = -b;
 
     // Statistics
-    let full_stats = |residual : &A::Observable,
-                      μ : &RNDM<F, N>,
-                      ε, stats| IterInfo {
-        value : residual.norm2_squared_div2() + reg.apply(μ),
-        n_spikes : μ.len(),
+    let full_stats = |residual: &A::Observable, μ: &RNDM<F, N>, ε, stats| IterInfo {
+        value: residual.norm2_squared_div2() + reg.apply(μ),
+        n_spikes: μ.len(),
         ε,
         //postprocessing: config.postprocessing.then(|| μ.clone()),
-        .. stats
+        ..stats
     };
     let mut stats = IterInfo::new();
 
@@ -242,19 +219,24 @@
 
         // Save current base point
         let μ_base = μ.clone();
-            
+
         // Insert and reweigh
         let (maybe_d, _within_tolerances) = prox_penalty.insert_and_reweigh(
-            &mut μ, &mut τv, &μ_base, None,
-            τ, ε,
-            config, &reg, &state, &mut stats
+            &mut μ, &mut τv, &μ_base, None, τ, ε, config, &reg, &state, &mut stats,
         );
 
         // Prune and possibly merge spikes
         if config.merge_now(&state) {
             stats.merged += prox_penalty.merge_spikes(
-                &mut μ, &mut τv, &μ_base, None, τ, ε, config, &reg,
-                Some(|μ̃ : &RNDM<F, N>| L2Squared.calculate_fit_op(μ̃, opA, b)),
+                &mut μ,
+                &mut τv,
+                &μ_base,
+                None,
+                τ,
+                ε,
+                config,
+                &reg,
+                Some(|μ̃: &RNDM<F, N>| L2Squared.calculate_fit_op(μ̃, opA, b)),
             );
         }
 
@@ -269,9 +251,14 @@
         // Give statistics if needed
         state.if_verbose(|| {
             plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv), &μ);
-            full_stats(&residual, &μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+            full_stats(
+                &residual,
+                &μ,
+                ε,
+                std::mem::replace(&mut stats, IterInfo::new()),
+            )
         });
-        
+
         // Update main tolerance for next iteration
         ε = tolerance.update(ε, iter);
     }
@@ -298,33 +285,29 @@
 ///
 /// Returns the final iterate.
 #[replace_float_literals(F::cast_from(literal))]
-pub fn pointsource_fista_reg<
-    F, I, A, Reg, P, const N : usize
->(
-    opA : &A,
-    b : &A::Observable,
-    reg : Reg,
-    prox_penalty : &P,
-    fbconfig : &FBConfig<F>,
-    iterator : I,
-    mut plotter : SeqPlotter<F, N>,
+pub fn pointsource_fista_reg<F, I, A, Reg, P, const N: usize>(
+    opA: &A,
+    b: &A::Observable,
+    reg: Reg,
+    prox_penalty: &P,
+    fbconfig: &FBConfig<F>,
+    iterator: I,
+    mut plotter: SeqPlotter<F, N>,
 ) -> RNDM<F, N>
 where
-    F : Float + ToNalgebraRealField,
-    I : AlgIteratorFactory<IterInfo<F, N>>,
-    for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-    A : ForwardModel<RNDM<F, N>, F>
-        + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType=F>,
-    A::PreadjointCodomain : RealMapping<F, N>,
-    PlotLookup : Plotting<N>,
-    RNDM<F, N> : SpikeMerging<F>,
-    Reg : RegTerm<F, N>,
-    P : ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<IterInfo<F, N>>,
+    for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable>,
+    A: ForwardModel<RNDM<F, N>, F> + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType = F>,
+    A::PreadjointCodomain: RealMapping<F, N>,
+    PlotLookup: Plotting<N>,
+    RNDM<F, N>: SpikeMerging<F>,
+    Reg: RegTerm<F, N>,
+    P: ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
 {
-
     // Set up parameters
     let config = &fbconfig.generic;
-    let τ = fbconfig.τ0/opA.adjoint_product_bound(prox_penalty).unwrap();
+    let τ = fbconfig.τ0 / opA.adjoint_product_bound(prox_penalty).unwrap();
     let mut λ = 1.0;
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
@@ -338,12 +321,12 @@
     let mut warned_merging = false;
 
     // Statistics
-    let full_stats = |ν : &RNDM<F, N>, ε, stats| IterInfo {
-        value : L2Squared.calculate_fit_op(ν, opA, b) + reg.apply(ν),
-        n_spikes : ν.len(),
+    let full_stats = |ν: &RNDM<F, N>, ε, stats| IterInfo {
+        value: L2Squared.calculate_fit_op(ν, opA, b) + reg.apply(ν),
+        n_spikes: ν.len(),
         ε,
         // postprocessing: config.postprocessing.then(|| ν.clone()),
-        .. stats
+        ..stats
     };
     let mut stats = IterInfo::new();
 
@@ -354,12 +337,10 @@
 
         // Save current base point
         let μ_base = μ.clone();
-            
+
         // Insert new spikes and reweigh
         let (maybe_d, _within_tolerances) = prox_penalty.insert_and_reweigh(
-            &mut μ, &mut τv, &μ_base, None,
-            τ, ε,
-            config, &reg, &state, &mut stats
+            &mut μ, &mut τv, &μ_base, None, τ, ε, config, &reg, &state, &mut stats,
         );
 
         // (Do not) merge spikes.
@@ -371,7 +352,7 @@
 
         // Update inertial prameters
         let λ_prev = λ;
-        λ = 2.0 * λ_prev / ( λ_prev + (4.0 + λ_prev * λ_prev).sqrt() );
+        λ = 2.0 * λ_prev / (λ_prev + (4.0 + λ_prev * λ_prev).sqrt());
         let θ = λ / λ_prev - λ;
 
         // Perform inertial update on μ.
--- a/src/measures/merging.rs	Mon Feb 17 13:45:11 2025 -0500
+++ b/src/measures/merging.rs	Mon Feb 17 13:51:50 2025 -0500
@@ -7,35 +7,34 @@
 */
 
 use numeric_literals::replace_float_literals;
+use serde::{Deserialize, Serialize};
 use std::cmp::Ordering;
-use serde::{Serialize, Deserialize};
 //use clap::builder::{PossibleValuesParser, PossibleValue};
 use alg_tools::nanleast::NaNLeast;
 
-use crate::types::*;
 use super::delta::*;
 use super::discrete::*;
+use crate::types::*;
 
 /// Spike merging heuristic selection
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[allow(dead_code)]
 pub struct SpikeMergingMethod<F> {
     // Merging radius
-    pub(crate) radius : F,
+    pub(crate) radius: F,
     // Enabled
-    pub(crate) enabled : bool,
+    pub(crate) enabled: bool,
     // Interpolate merged points
-    pub(crate) interp : bool,
+    pub(crate) interp: bool,
 }
 
-
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> Default for SpikeMergingMethod<F> {
+impl<F: Float> Default for SpikeMergingMethod<F> {
     fn default() -> Self {
-        SpikeMergingMethod{
-            radius : 0.01,
-            enabled : false,
-            interp : true,
+        SpikeMergingMethod {
+            radius: 0.01,
+            enabled: false,
+            interp: true,
         }
     }
 }
@@ -55,8 +54,10 @@
     /// removed spikes is set to zero, new ones inserted at the end of the spike vector.
     /// They merge may also be performed by increasing the weights of the existing spikes,
     /// without inserting new spikes.
-    fn merge_spikes<G>(&mut self, method : SpikeMergingMethod<F>, accept : G) -> usize
-    where G : FnMut(&'_ Self) -> bool {
+    fn merge_spikes<G>(&mut self, method: SpikeMergingMethod<F>, accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
         if method.enabled {
             self.do_merge_spikes_radius(method.radius, method.interp, accept)
         } else {
@@ -72,13 +73,15 @@
     /// `self` is returned. also the number of merges is returned;
     fn merge_spikes_fitness<G, H, V, O>(
         &mut self,
-        method : SpikeMergingMethod<F>,
-        value : G,
-        fitness : H
+        method: SpikeMergingMethod<F>,
+        value: G,
+        fitness: H,
     ) -> (V, usize)
-    where G : Fn(&'_ Self) -> V,
-          H : Fn(&'_ V) -> O,
-          O : PartialOrd {
+    where
+        G: Fn(&'_ Self) -> V,
+        H: Fn(&'_ V) -> O,
+        O: PartialOrd,
+    {
         let mut res = value(self);
         let initial_fitness = fitness(&res);
         let count = self.merge_spikes(method, |μ| {
@@ -90,15 +93,14 @@
 
     /// Attempt to merge spikes that are within radius $ρ$ of each other (unspecified norm).
     ///
-    /// This method implements [`SpikeMerging::merge_spikes`] for
-    /// [`SpikeMergingMethod::HeuristicRadius`]. The closure `accept` and the return value are
-    /// as for that method.
-    fn do_merge_spikes_radius<G>(&mut self, ρ : F, interp : bool, accept : G) -> usize
-    where G : FnMut(&'_ Self) -> bool;
+    /// This method implements [`SpikeMerging::merge_spikes`].
+    fn do_merge_spikes_radius<G>(&mut self, ρ: F, interp: bool, accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool;
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float, const N : usize>  DiscreteMeasure<Loc<F, N>, F> {
+impl<F: Float, const N: usize> DiscreteMeasure<Loc<F, N>, F> {
     /// Attempts to merge spikes with indices `i` and `j`.
     ///
     /// This assumes that the weights of the two spikes have already been checked not to be zero.
@@ -110,14 +112,16 @@
     /// Returns the index of `self.spikes` storing the new spike.
     fn attempt_merge<G>(
         &mut self,
-        i : usize,
-        j : usize,
-        interp : bool,
-        accept : &mut G
+        i: usize,
+        j: usize,
+        interp: bool,
+        accept: &mut G,
     ) -> Option<usize>
-    where G : FnMut(&'_ Self) -> bool {
-        let &DeltaMeasure{ x : xi, α : αi } = &self.spikes[i];
-        let &DeltaMeasure{ x : xj, α : αj } = &self.spikes[j];
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
+        let &DeltaMeasure { x: xi, α: αi } = &self.spikes[i];
+        let &DeltaMeasure { x: xj, α: αj } = &self.spikes[j];
 
         if interp {
             // Merge inplace
@@ -125,9 +129,12 @@
             self.spikes[j].α = 0.0;
             let αia = αi.abs();
             let αja = αj.abs();
-            self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi * αia + xj * αja) / (αia + αja) });
+            self.spikes.push(DeltaMeasure {
+                α: αi + αj,
+                x: (xi * αia + xj * αja) / (αia + αja),
+            });
             if accept(self) {
-                Some(self.spikes.len()-1)
+                Some(self.spikes.len() - 1)
             } else {
                 // Merge not accepted, restore modification
                 self.spikes[i].α = αi;
@@ -164,8 +171,9 @@
 ///
 /// The closure `compare` operators on references to elements of `slice`.
 /// Returns the sorted vector of indices into `slice`.
-pub fn sort_indices_by<V, F>(slice : &[V], mut compare : F) -> Vec<usize>
-where F : FnMut(&V, &V) -> Ordering
+pub fn sort_indices_by<V, F>(slice: &[V], mut compare: F) -> Vec<usize>
+where
+    F: FnMut(&V, &V) -> Ordering,
 {
     let mut indices = Vec::from_iter(0..slice.len());
     indices.sort_by(|&i, &j| compare(&slice[i], &slice[j]));
@@ -173,15 +181,11 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 1>, F> {
-
-    fn do_merge_spikes_radius<G>(
-        &mut self,
-        ρ : F,
-        interp : bool,
-        mut accept : G
-    ) -> usize
-    where G : FnMut(&'_ Self) -> bool {
+impl<F: Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 1>, F> {
+    fn do_merge_spikes_radius<G>(&mut self, ρ: F, interp: bool, mut accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
         // Sort by coordinate into an indexing array.
         let mut indices = sort_indices_by(&self.spikes, |&δ1, &δ2| {
             let &Loc([x1]) = &δ1.x;
@@ -195,20 +199,26 @@
 
         // Scan consecutive pairs and merge if close enough and accepted by `accept`.
         if indices.len() == 0 {
-            return count
+            return count;
         }
-        for k in 0..(indices.len()-1) {
+        for k in 0..(indices.len() - 1) {
             let i = indices[k];
-            let j = indices[k+1];
-            let &DeltaMeasure{ x : Loc([xi]), α : αi } = &self.spikes[i];
-            let &DeltaMeasure{ x : Loc([xj]), α : αj } = &self.spikes[j];
+            let j = indices[k + 1];
+            let &DeltaMeasure {
+                x: Loc([xi]),
+                α: αi,
+            } = &self.spikes[i];
+            let &DeltaMeasure {
+                x: Loc([xj]),
+                α: αj,
+            } = &self.spikes[j];
             debug_assert!(xi <= xj);
             // If close enough, attempt merging
             if αi != 0.0 && αj != 0.0 && xj <= xi + ρ {
                 if let Some(l) = self.attempt_merge(i, j, interp, &mut accept) {
                     // For this to work (the debug_assert! to not trigger above), the new
                     // coordinate produced by attempt_merge has to be at most xj.
-                    indices[k+1] = l;
+                    indices[k + 1] = l;
                     count += 1
                 }
             }
@@ -219,9 +229,9 @@
 }
 
 /// Orders `δ1` and `δ1` according to the first coordinate.
-fn compare_first_coordinate<F : Float>(
-    δ1 : &DeltaMeasure<Loc<F, 2>, F>,
-    δ2 : &DeltaMeasure<Loc<F, 2>, F>
+fn compare_first_coordinate<F: Float>(
+    δ1: &DeltaMeasure<Loc<F, 2>, F>,
+    δ2: &DeltaMeasure<Loc<F, 2>, F>,
 ) -> Ordering {
     let &Loc([x11, ..]) = &δ1.x;
     let &Loc([x21, ..]) = &δ2.x;
@@ -230,10 +240,11 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 2>, F> {
-
-    fn do_merge_spikes_radius<G>(&mut self, ρ : F, interp : bool, mut accept : G) -> usize
-    where G : FnMut(&'_ Self) -> bool {
+impl<F: Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 2>, F> {
+    fn do_merge_spikes_radius<G>(&mut self, ρ: F, interp: bool, mut accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
         // Sort by first coordinate into an indexing array.
         let mut indices = sort_indices_by(&self.spikes, compare_first_coordinate);
 
@@ -243,15 +254,18 @@
 
         // Scan in order
         if indices.len() == 0 {
-            return count
+            return count;
         }
-        for k in 0..indices.len()-1 {
+        for k in 0..indices.len() - 1 {
             let i = indices[k];
-            let &DeltaMeasure{ x : Loc([xi1, xi2]), α : αi } = &self[i];
+            let &DeltaMeasure {
+                x: Loc([xi1, xi2]),
+                α: αi,
+            } = &self[i];
 
             if αi == 0.0 {
                 // Nothin to be done if the weight is already zero
-                continue
+                continue;
             }
 
             let mut closest = None;
@@ -261,35 +275,38 @@
             // the _closest_ mergeable spike might have index less than `k` in `indices`, and a
             // merge with it might have not been attempted with this spike if a different closer
             // spike was discovered based on the second coordinate.
-            'scan_2nd: for l in (start_scan_2nd+1)..indices.len() {
+            'scan_2nd: for l in (start_scan_2nd + 1)..indices.len() {
                 if l == k {
                     // Do not attempt to merge a spike with itself
-                    continue
+                    continue;
                 }
                 let j = indices[l];
-                let &DeltaMeasure{ x : Loc([xj1, xj2]), α : αj } = &self[j];
+                let &DeltaMeasure {
+                    x: Loc([xj1, xj2]),
+                    α: αj,
+                } = &self[j];
 
                 if xj1 < xi1 - ρ {
                     // Spike `j = indices[l]` has too low first coordinate. Update starting index
                     // for next iteration, and continue scanning.
                     start_scan_2nd = l;
-                    continue 'scan_2nd
+                    continue 'scan_2nd;
                 } else if xj1 > xi1 + ρ {
                     // Break out: spike `j = indices[l]` has already too high first coordinate, no
                     // more close enough spikes can be found due to the sorting of `indices`.
-                    break 'scan_2nd
+                    break 'scan_2nd;
                 }
 
                 // If also second coordinate is close enough, attempt merging if closer than
                 // previously discovered mergeable spikes.
-                let d2 = (xi2-xj2).abs();
+                let d2 = (xi2 - xj2).abs();
                 if αj != 0.0 && d2 <= ρ {
-                    let r1 = xi1-xj1;
-                    let d = (d2*d2 + r1*r1).sqrt();
+                    let r1 = xi1 - xj1;
+                    let d = (d2 * d2 + r1 * r1).sqrt();
                     match closest {
                         None => closest = Some((l, j, d)),
                         Some((_, _, r)) if r > d => closest = Some((l, j, d)),
-                        _ => {},
+                        _ => {}
                     }
                 }
             }
@@ -300,13 +317,12 @@
                     // If merge was succesfull, make new spike candidate for merging.
                     indices[l] = n;
                     count += 1;
-                    let compare = |i, j| compare_first_coordinate(&self.spikes[i],
-                                                                  &self.spikes[j]);
+                    let compare = |i, j| compare_first_coordinate(&self.spikes[i], &self.spikes[j]);
                     // Re-sort relevant range of indices
                     if l < k {
                         indices[l..k].sort_by(|&i, &j| compare(i, j));
                     } else {
-                        indices[k+1..=l].sort_by(|&i, &j| compare(i, j));
+                        indices[k + 1..=l].sort_by(|&i, &j| compare(i, j));
                     }
                 }
             }
@@ -315,4 +331,3 @@
         count
     }
 }
-
--- a/src/prox_penalty.rs	Mon Feb 17 13:45:11 2025 -0500
+++ b/src/prox_penalty.rs	Mon Feb 17 13:51:50 2025 -0500
@@ -2,119 +2,114 @@
 Proximal penalty abstraction
 */
 
+use alg_tools::types::*;
 use numeric_literals::replace_float_literals;
-use alg_tools::types::*;
-use serde::{Serialize, Deserialize};
+use serde::{Deserialize, Serialize};
 
+use crate::measures::merging::SpikeMergingMethod;
+use crate::measures::RNDM;
+use crate::regularisation::RegTerm;
+use crate::subproblem::InnerSettings;
+use crate::tolerance::Tolerance;
+use crate::types::{IterInfo, RefinementSettings};
+use alg_tools::iterate::{AlgIterator, AlgIteratorIteration};
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
-use alg_tools::iterate::{
-    AlgIteratorIteration,
-    AlgIterator,
-};
-use crate::measures::RNDM;
-use crate::types::{
-    RefinementSettings,
-    IterInfo,
-};
-use crate::subproblem::InnerSettings;
-use crate::tolerance::Tolerance;
-use crate::measures::merging::SpikeMergingMethod;
-use crate::regularisation::RegTerm;
 
+pub mod radon_squared;
 pub mod wave;
-pub mod radon_squared;
 pub use radon_squared::RadonSquared;
 
 /// Settings for the solution of the stepwise optimality condition.
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
-pub struct FBGenericConfig<F : Float> {
+pub struct FBGenericConfig<F: Float> {
     /// Tolerance for point insertion.
-    pub tolerance : Tolerance<F>,
+    pub tolerance: Tolerance<F>,
 
     /// Stop looking for predual maximum (where to isert a new point) below
     /// `tolerance` multiplied by this factor.
     ///
-    /// Not used by [`super::radon_fb`].
-    pub insertion_cutoff_factor : F,
+    /// Not used by [`crate::prox_penalty::radon_squared`].
+    pub insertion_cutoff_factor: F,
 
     /// Settings for branch and bound refinement when looking for predual maxima
-    pub refinement : RefinementSettings<F>,
+    pub refinement: RefinementSettings<F>,
 
     /// Maximum insertions within each outer iteration
     ///
-    /// Not used by [`super::radon_fb`].
-    pub max_insertions : usize,
+    /// Not used by [`crate::prox_penalty::radon_squared`].
+    pub max_insertions: usize,
 
     /// Pair `(n, m)` for maximum insertions `m` on first `n` iterations.
     ///
-    /// Not used by [`super::radon_fb`].
-    pub bootstrap_insertions : Option<(usize, usize)>,
+    /// Not used by [`crate::prox_penalty::radon_squared`].
+    pub bootstrap_insertions: Option<(usize, usize)>,
 
     /// Inner method settings
-    pub inner : InnerSettings<F>,
+    pub inner: InnerSettings<F>,
 
     /// Spike merging method
-    pub merging : SpikeMergingMethod<F>,
+    pub merging: SpikeMergingMethod<F>,
 
     /// Tolerance multiplier for merges
-    pub merge_tolerance_mult : F,
+    pub merge_tolerance_mult: F,
 
     /// Merge spikes after last step (even if merging not generally enabled)
-    pub final_merging : bool,
+    pub final_merging: bool,
 
     /// Use fitness as merging criterion. Implies worse convergence guarantees.
-    pub fitness_merging : bool,
+    pub fitness_merging: bool,
 
     /// Iterations between merging heuristic tries
-    pub merge_every : usize,
-
+    pub merge_every: usize,
     // /// Save $μ$ for postprocessing optimisation
     // pub postprocessing : bool
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> Default for FBGenericConfig<F> {
+impl<F: Float> Default for FBGenericConfig<F> {
     fn default() -> Self {
         FBGenericConfig {
-            tolerance : Default::default(),
-            insertion_cutoff_factor : 1.0,
-            refinement : Default::default(),
-            max_insertions : 100,
+            tolerance: Default::default(),
+            insertion_cutoff_factor: 1.0,
+            refinement: Default::default(),
+            max_insertions: 100,
             //bootstrap_insertions : None,
-            bootstrap_insertions : Some((10, 1)),
-            inner : Default::default(),
-            merging : Default::default(),
-            final_merging : true,
-            fitness_merging : false,
-            merge_every : 10,
-            merge_tolerance_mult : 2.0,
+            bootstrap_insertions: Some((10, 1)),
+            inner: Default::default(),
+            merging: Default::default(),
+            final_merging: true,
+            fitness_merging: false,
+            merge_every: 10,
+            merge_tolerance_mult: 2.0,
             // postprocessing : false,
         }
     }
 }
 
-impl<F : Float> FBGenericConfig<F> {
+impl<F: Float> FBGenericConfig<F> {
     /// Check if merging should be attempted this iteration
-    pub fn merge_now<I : AlgIterator>(&self, state : &AlgIteratorIteration<I>) -> bool {
+    pub fn merge_now<I: AlgIterator>(&self, state: &AlgIteratorIteration<I>) -> bool {
         self.merging.enabled && state.iteration() % self.merge_every == 0
     }
 
     /// Returns the final merging method
     pub fn final_merging_method(&self) -> SpikeMergingMethod<F> {
-        SpikeMergingMethod{ enabled : self.final_merging, ..self.merging}
+        SpikeMergingMethod {
+            enabled: self.final_merging,
+            ..self.merging
+        }
     }
 }
 
-
 /// Trait for proximal penalties
-pub trait ProxPenalty<F, PreadjointCodomain, Reg, const N : usize>
+pub trait ProxPenalty<F, PreadjointCodomain, Reg, const N: usize>
 where
-    F : Float + ToNalgebraRealField,
-    Reg : RegTerm<F, N>,
+    F: Float + ToNalgebraRealField,
+    Reg: RegTerm<F, N>,
 {
-    type ReturnMapping : RealMapping<F, N>;
+    type ReturnMapping: RealMapping<F, N>;
 
     /// Insert new spikes into `μ` to approximately satisfy optimality conditions
     /// with the forward step term fixed to `τv`.
@@ -125,24 +120,23 @@
     /// `μ_base + ν_delta` is the base point, where `μ` and `μ_base` are assumed to have the same
     /// spike locations, while `ν_delta` may have different locations.
     ///
-    /// `τv` is mutable to allow [`alg_tools::bisection_tree::btfn::BTFN`] refinement.
+    /// `τv` is mutable to allow [`alg_tools::bisection_tree::BTFN`] refinement.
     /// Actual values of `τv` are not supposed to be mutated.
     fn insert_and_reweigh<I>(
         &self,
-        μ : &mut RNDM<F, N>,
-        τv : &mut PreadjointCodomain,
-        μ_base : &RNDM<F, N>,
+        μ: &mut RNDM<F, N>,
+        τv: &mut PreadjointCodomain,
+        μ_base: &RNDM<F, N>,
         ν_delta: Option<&RNDM<F, N>>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-        reg : &Reg,
-        state : &AlgIteratorIteration<I>,
-        stats : &mut IterInfo<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        reg: &Reg,
+        state: &AlgIteratorIteration<I>,
+        stats: &mut IterInfo<F, N>,
     ) -> (Option<Self::ReturnMapping>, bool)
     where
-        I : AlgIterator;
-
+        I: AlgIterator;
 
     /// Merge spikes, if possible.
     ///
@@ -151,39 +145,48 @@
     /// is set.
     fn merge_spikes(
         &self,
-        μ : &mut RNDM<F, N>,
-        τv : &mut PreadjointCodomain,
-        μ_base : &RNDM<F, N>,
+        μ: &mut RNDM<F, N>,
+        τv: &mut PreadjointCodomain,
+        μ_base: &RNDM<F, N>,
         ν_delta: Option<&RNDM<F, N>>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-        reg : &Reg,
-        fitness : Option<impl Fn(&RNDM<F, N>) -> F>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        reg: &Reg,
+        fitness: Option<impl Fn(&RNDM<F, N>) -> F>,
     ) -> usize;
 
     /// Merge spikes, if possible.
     ///
-    /// Unlike [`merge_spikes`], this variant only supports optimality condition based merging
+    /// Unlike [`Self::merge_spikes`], this variant only supports optimality condition based merging
     #[inline]
     fn merge_spikes_no_fitness(
         &self,
-        μ : &mut RNDM<F, N>,
-        τv : &mut PreadjointCodomain,
-        μ_base : &RNDM<F, N>,
+        μ: &mut RNDM<F, N>,
+        τv: &mut PreadjointCodomain,
+        μ_base: &RNDM<F, N>,
         ν_delta: Option<&RNDM<F, N>>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-        reg : &Reg,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        reg: &Reg,
     ) -> usize {
         /// This is a hack to create a `None` of same type as a `Some`
         // for the `impl Fn` parameter of `merge_spikes`.
         #[inline]
-        fn into_none<T>(_ : Option<T>) -> Option<T>{
+        fn into_none<T>(_: Option<T>) -> Option<T> {
             None
         }
-        self.merge_spikes(μ, τv, μ_base, ν_delta, τ, ε, config, reg,
-                          into_none(Some(|_ : &RNDM<F, N>| F::ZERO)))
+        self.merge_spikes(
+            μ,
+            τv,
+            μ_base,
+            ν_delta,
+            τ,
+            ε,
+            config,
+            reg,
+            into_none(Some(|_: &RNDM<F, N>| F::ZERO)),
+        )
     }
 }
--- a/src/regularisation.rs	Mon Feb 17 13:45:11 2025 -0500
+++ b/src/regularisation.rs	Mon Feb 17 13:51:50 2025 -0500
@@ -2,53 +2,41 @@
 Regularisation terms
 */
 
-use numeric_literals::replace_float_literals;
-use serde::{Serialize, Deserialize};
-use alg_tools::norms::Norm;
-use alg_tools::linops::Mapping;
-use alg_tools::instance::Instance;
-use alg_tools::loc::Loc;
-use crate::types::*;
-use crate::measures::{
-    RNDM,
-    DeltaMeasure,
-    Radon,
-};
-use crate::fb::FBGenericConfig;
 #[allow(unused_imports)] // Used by documentation.
 use crate::fb::pointsource_fb_reg;
+use crate::fb::FBGenericConfig;
+use crate::measures::{DeltaMeasure, Radon, RNDM};
 #[allow(unused_imports)] // Used by documentation.
 use crate::sliding_fb::pointsource_sliding_fb_reg;
+use crate::types::*;
+use alg_tools::instance::Instance;
+use alg_tools::linops::Mapping;
+use alg_tools::loc::Loc;
+use alg_tools::norms::Norm;
+use numeric_literals::replace_float_literals;
+use serde::{Deserialize, Serialize};
 
-use nalgebra::{DVector, DMatrix};
-use alg_tools::nalgebra_support::ToNalgebraRealField;
+use crate::subproblem::{
+    l1squared_nonneg::l1squared_nonneg, l1squared_unconstrained::l1squared_unconstrained,
+    nonneg::quadratic_nonneg, unconstrained::quadratic_unconstrained,
+};
 use alg_tools::bisection_tree::{
-    BTFN,
-    Bounds,
-    BTSearch,
-    P2Minimise,
-    SupportGenerator,
-    LocalAnalysis,
-    Bounded,
-};
-use crate::subproblem::{
-    nonneg::quadratic_nonneg,
-    unconstrained::quadratic_unconstrained,
-    l1squared_unconstrained::l1squared_unconstrained,
-    l1squared_nonneg::l1squared_nonneg
+    BTSearch, Bounded, Bounds, LocalAnalysis, P2Minimise, SupportGenerator, BTFN,
 };
 use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use nalgebra::{DMatrix, DVector};
 
-use std::cmp::Ordering::{Greater, Less, Equal};
+use std::cmp::Ordering::{Equal, Greater, Less};
 
 /// The regularisation term $α\\|μ\\|\_{ℳ(Ω)} + δ_{≥ 0}(μ)$ for [`pointsource_fb_reg`] and other
 /// algorithms.
 ///
 /// The only member of the struct is the regularisation parameter α.
 #[derive(Copy, Clone, Debug, Serialize, Deserialize)]
-pub struct NonnegRadonRegTerm<F : Float>(pub F /* α */);
+pub struct NonnegRadonRegTerm<F: Float>(pub F /* α */);
 
-impl<'a, F : Float> NonnegRadonRegTerm<F> {
+impl<'a, F: Float> NonnegRadonRegTerm<F> {
     /// Returns the regularisation parameter
     pub fn α(&self) -> F {
         let &NonnegRadonRegTerm(α) = self;
@@ -56,25 +44,24 @@
     }
 }
 
-impl<'a, F : Float, const N : usize> Mapping<RNDM<F, N>>
-for NonnegRadonRegTerm<F> {
+impl<'a, F: Float, const N: usize> Mapping<RNDM<F, N>> for NonnegRadonRegTerm<F> {
     type Codomain = F;
-    
-    fn apply<I>(&self, μ : I) -> F
-    where I : Instance<RNDM<F, N>> {
+
+    fn apply<I>(&self, μ: I) -> F
+    where
+        I: Instance<RNDM<F, N>>,
+    {
         self.α() * μ.eval(|x| x.norm(Radon))
     }
 }
 
-
 /// The regularisation term $α\|μ\|_{ℳ(Ω)}$ for [`pointsource_fb_reg`].
 ///
 /// The only member of the struct is the regularisation parameter α.
 #[derive(Copy, Clone, Debug, Serialize, Deserialize)]
-pub struct RadonRegTerm<F : Float>(pub F /* α */);
+pub struct RadonRegTerm<F: Float>(pub F /* α */);
 
-
-impl<'a, F : Float> RadonRegTerm<F> {
+impl<'a, F: Float> RadonRegTerm<F> {
     /// Returns the regularisation parameter
     pub fn α(&self) -> F {
         let &RadonRegTerm(α) = self;
@@ -82,31 +69,33 @@
     }
 }
 
-impl<'a, F : Float, const N : usize> Mapping<RNDM<F, N>>
-for RadonRegTerm<F> {
+impl<'a, F: Float, const N: usize> Mapping<RNDM<F, N>> for RadonRegTerm<F> {
     type Codomain = F;
-    
-    fn apply<I>(&self, μ : I) -> F
-    where I : Instance<RNDM<F, N>> {
+
+    fn apply<I>(&self, μ: I) -> F
+    where
+        I: Instance<RNDM<F, N>>,
+    {
         self.α() * μ.eval(|x| x.norm(Radon))
     }
 }
 
 /// Regularisation term configuration
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
-pub enum Regularisation<F : Float> {
+pub enum Regularisation<F: Float> {
     /// $α \\|μ\\|\_{ℳ(Ω)}$
     Radon(F),
     /// $α\\|μ\\|\_{ℳ(Ω)} + δ_{≥ 0}(μ)$
     NonnegRadon(F),
 }
 
-impl<'a, F : Float, const N : usize> Mapping<RNDM<F, N>>
-for Regularisation<F> {
+impl<'a, F: Float, const N: usize> Mapping<RNDM<F, N>> for Regularisation<F> {
     type Codomain = F;
 
-    fn apply<I>(&self, μ : I) -> F
-    where I : Instance<RNDM<F, N>> {
+    fn apply<I>(&self, μ: I) -> F
+    where
+        I: Instance<RNDM<F, N>>,
+    {
         match *self {
             Self::Radon(α) => RadonRegTerm(α).apply(μ),
             Self::NonnegRadon(α) => NonnegRadonRegTerm(α).apply(μ),
@@ -115,8 +104,9 @@
 }
 
 /// Abstraction of regularisation terms.
-pub trait RegTerm<F : Float + ToNalgebraRealField, const N : usize>
-: Mapping<RNDM<F, N>, Codomain = F> {
+pub trait RegTerm<F: Float + ToNalgebraRealField, const N: usize>:
+    Mapping<RNDM<F, N>, Codomain = F>
+{
     /// Approximately solve the problem
     /// <div>$$
     ///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + τ G(x)
@@ -129,13 +119,13 @@
     /// Returns the number of iterations taken.
     fn solve_findim(
         &self,
-        mA : &DMatrix<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        mA_normest : F,
-        ε : F,
-        config : &FBGenericConfig<F>
+        mA: &DMatrix<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        mA_normest: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> usize;
 
     /// Approximately solve the problem
@@ -147,12 +137,12 @@
     /// Returns the number of iterations taken.
     fn solve_findim_l1squared(
         &self,
-        y : &DVector<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        ε : F,
-        config : &FBGenericConfig<F>
+        y: &DVector<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> usize;
 
     /// Find a point where `d` may violate the tolerance `ε`.
@@ -166,22 +156,24 @@
     /// and a boolean indicating whether the found point is in bounds.
     fn find_tolerance_violation<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        τ : F,
-        ε : F,
-        skip_by_rough_check : bool,
-        config : &FBGenericConfig<F>,
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
     ) -> Option<(Loc<F, N>, F, bool)>
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         self.find_tolerance_violation_slack(d, τ, ε, skip_by_rough_check, config, F::ZERO)
     }
 
     /// Find a point where `d` may violate the tolerance `ε`.
     ///
     /// This version includes a `slack` parameter to expand the tolerances.
-    /// It is used for Radon-norm squared proximal term in [`crate::radon_fb`].
+    /// It is used for Radon-norm squared proximal term in [`crate::prox_penalty::radon_squared`].
     ///
     /// If `skip_by_rough_check` is set, do not find the point if a rough check indicates that we
     /// are in bounds. `ε` is the current main tolerance and `τ` a scaling factor for the
@@ -192,56 +184,58 @@
     /// and a boolean indicating whether the found point is in bounds.
     fn find_tolerance_violation_slack<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        τ : F,
-        ε : F,
-        skip_by_rough_check : bool,
-        config : &FBGenericConfig<F>,
-        slack : F,
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
+        slack: F,
     ) -> Option<(Loc<F, N>, F, bool)>
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
-
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
 
     /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
     ///
     /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
     fn verify_merge_candidate<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &RNDM<F, N>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
 
     /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
     ///
-    /// This version is s used for Radon-norm squared proximal term in [`crate::radon_fb`].
+    /// This version is s used for Radon-norm squared proximal term in
+    /// [`crate::prox_penalty::radon_squared`].
     /// The [measures][crate::measures::DiscreteMeasure] `μ` and `radon_μ` are supposed to have
     /// same coordinates at same agreeing indices.
     ///
     /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
     fn verify_merge_candidate_radonsq<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &RNDM<F, N>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-        radon_μ :&RNDM<F, N>,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        radon_μ: &RNDM<F, N>,
     ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
-
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
 
     /// TODO: document this
-    fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>>;
+    fn target_bounds(&self, τ: F, ε: F) -> Option<Bounds<F>>;
 
     /// Returns a scaling factor for the tolerance sequence.
     ///
@@ -250,78 +244,77 @@
 }
 
 /// Abstraction of regularisation terms for [`pointsource_sliding_fb_reg`].
-pub trait SlidingRegTerm<F : Float + ToNalgebraRealField, const N : usize>
-: RegTerm<F, N> {
+pub trait SlidingRegTerm<F: Float + ToNalgebraRealField, const N: usize>: RegTerm<F, N> {
     /// Calculate $τ[w(z) - w(y)]$ for some w in the subdifferential of the regularisation
     /// term, such that $-ε ≤ τw - d ≤ ε$.
     fn goodness<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &RNDM<F, N>,
-        y : &Loc<F, N>,
-        z : &Loc<F, N>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        y: &Loc<F, N>,
+        z: &Loc<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> F
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
 
     /// Convert bound on the regulariser to a bond on the Radon norm
-    fn radon_norm_bound(&self, b : F) -> F;
+    fn radon_norm_bound(&self, b: F) -> F;
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float + ToNalgebraRealField, const N : usize> RegTerm<F, N>
-for NonnegRadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F> {
+impl<F: Float + ToNalgebraRealField, const N: usize> RegTerm<F, N> for NonnegRadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
     fn solve_findim(
         &self,
-        mA : &DMatrix<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        mA_normest : F,
-        ε : F,
-        config : &FBGenericConfig<F>
+        mA: &DMatrix<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        mA_normest: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> usize {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        quadratic_nonneg(mA, g, τ * self.α(), x,
-                         mA_normest, &config.inner, inner_it)
-
+        quadratic_nonneg(mA, g, τ * self.α(), x, mA_normest, &config.inner, inner_it)
     }
 
     fn solve_findim_l1squared(
         &self,
-        y : &DVector<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        ε : F,
-        config : &FBGenericConfig<F>
+        y: &DVector<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> usize {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        l1squared_nonneg(y, g, τ * self.α(), 1.0, x,
-                         &config.inner, inner_it)
+        l1squared_nonneg(y, g, τ * self.α(), 1.0, x, &config.inner, inner_it)
     }
 
-
     #[inline]
     fn find_tolerance_violation_slack<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        τ : F,
-        ε : F,
-        skip_by_rough_check : bool,
-        config : &FBGenericConfig<F>,
-        slack : F
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
+        slack: F,
     ) -> Option<(Loc<F, N>, F, bool)>
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         let τα = τ * self.α();
         let keep_above = -τα - slack - ε;
         let minimise_below = -τα - slack - ε * config.insertion_cutoff_factor;
@@ -333,22 +326,28 @@
             None
         } else {
             // If the rough check didn't indicate no insertion needed, find minimising point.
-            d.minimise_below(minimise_below, refinement_tolerance, config.refinement.max_steps)
-             .map(|(ξ, v_ξ)| (ξ, v_ξ, v_ξ >= keep_above))
+            d.minimise_below(
+                minimise_below,
+                refinement_tolerance,
+                config.refinement.max_steps,
+            )
+            .map(|(ξ, v_ξ)| (ξ, v_ξ, v_ξ >= keep_above))
         }
     }
 
     fn verify_merge_candidate<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &RNDM<F, N>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
@@ -356,31 +355,32 @@
         let keep_supp_below = -τα + merge_tolerance;
         let bnd = d.bounds();
 
-        return (
-            bnd.upper() <= keep_supp_below
-            ||
-            μ.iter_spikes().all(|&DeltaMeasure{ α, ref x }| {
-                (α == 0.0) || d.apply(x) <= keep_supp_below
-            })
-         ) && (
-            bnd.lower() >= keep_above
-            ||
-            d.has_lower_bound(keep_above, refinement_tolerance, config.refinement.max_steps)
-        )
+        return (bnd.upper() <= keep_supp_below
+            || μ
+                .iter_spikes()
+                .all(|&DeltaMeasure { α, ref x }| (α == 0.0) || d.apply(x) <= keep_supp_below))
+            && (bnd.lower() >= keep_above
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                ));
     }
 
     fn verify_merge_candidate_radonsq<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &RNDM<F, N>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-        radon_μ :&RNDM<F, N>,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        radon_μ: &RNDM<F, N>,
     ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
@@ -388,9 +388,8 @@
         let bnd = d.bounds();
 
         return {
-            μ.both_matching(radon_μ)
-             .all(|(α, rα, x)| {
-                let v = -d.apply(x); // TODO: observe ad hoc negation here, after minus_τv 
+            μ.both_matching(radon_μ).all(|(α, rα, x)| {
+                let v = -d.apply(x); // TODO: observe ad hoc negation here, after minus_τv
                                      // switch to τv.
                 let (l1, u1) = match α.partial_cmp(&0.0).unwrap_or(Equal) {
                     Greater => (τα, τα),
@@ -405,17 +404,20 @@
                 // TODO: both fail.
                 (l1 + l2 - merge_tolerance <= v) && (v <= u1 + u2 + merge_tolerance)
             })
-         } && {
+        } && {
             let keep_above = -τα - slack - merge_tolerance;
             bnd.lower() <= keep_above
-            ||
-            d.has_lower_bound(keep_above, refinement_tolerance, config.refinement.max_steps)
-         }
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                )
+        };
     }
 
-    fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>> {
+    fn target_bounds(&self, τ: F, ε: F) -> Option<Bounds<F>> {
         let τα = τ * self.α();
-        Some(Bounds(τα - ε,  τα + ε))
+        Some(Bounds(τα - ε, τα + ε))
     }
 
     fn tolerance_scaling(&self) -> F {
@@ -424,78 +426,82 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float + ToNalgebraRealField, const N : usize> SlidingRegTerm<F, N>
-for NonnegRadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F> {
-
+impl<F: Float + ToNalgebraRealField, const N: usize> SlidingRegTerm<F, N> for NonnegRadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
     fn goodness<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        _μ : &RNDM<F, N>,
-        y : &Loc<F, N>,
-        z : &Loc<F, N>,
-        τ : F,
-        ε : F,
-        _config : &FBGenericConfig<F>,
+        d: &mut BTFN<F, G, BT, N>,
+        _μ: &RNDM<F, N>,
+        y: &Loc<F, N>,
+        z: &Loc<F, N>,
+        τ: F,
+        ε: F,
+        _config: &FBGenericConfig<F>,
     ) -> F
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
-        let w = |x| 1.0.min((ε + d.apply(x))/(τ * self.α()));
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let w = |x| 1.0.min((ε + d.apply(x)) / (τ * self.α()));
         w(z) - w(y)
     }
 
-    fn radon_norm_bound(&self, b : F) -> F {
+    fn radon_norm_bound(&self, b: F) -> F {
         b / self.α()
     }
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float + ToNalgebraRealField, const N : usize> RegTerm<F, N> for RadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F> {
+impl<F: Float + ToNalgebraRealField, const N: usize> RegTerm<F, N> for RadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
     fn solve_findim(
         &self,
-        mA : &DMatrix<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
+        mA: &DMatrix<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
         mA_normest: F,
-        ε : F,
-        config : &FBGenericConfig<F>
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> usize {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        quadratic_unconstrained(mA, g, τ * self.α(), x,
-                                mA_normest, &config.inner, inner_it)
+        quadratic_unconstrained(mA, g, τ * self.α(), x, mA_normest, &config.inner, inner_it)
     }
 
     fn solve_findim_l1squared(
         &self,
-        y : &DVector<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        ε : F,
-        config : &FBGenericConfig<F>
+        y: &DVector<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> usize {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        l1squared_unconstrained(y, g, τ * self.α(), 1.0, x,
-                                &config.inner, inner_it)
+        l1squared_unconstrained(y, g, τ * self.α(), 1.0, x, &config.inner, inner_it)
     }
 
     fn find_tolerance_violation_slack<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        τ : F,
-        ε : F,
-        skip_by_rough_check : bool,
-        config : &FBGenericConfig<F>,
-        slack : F,
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
+        slack: F,
     ) -> Option<(Loc<F, N>, F, bool)>
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         let τα = τ * self.α();
         let keep_below = τα + slack + ε;
         let keep_above = -(τα + slack) - ε;
@@ -509,10 +515,16 @@
             None
         } else {
             // If the rough check didn't indicate no insertion needed, find maximising point.
-            let mx = d.maximise_above(maximise_above, refinement_tolerance,
-                                      config.refinement.max_steps);
-            let mi = d.minimise_below(minimise_below, refinement_tolerance,
-                                      config.refinement.max_steps);
+            let mx = d.maximise_above(
+                maximise_above,
+                refinement_tolerance,
+                config.refinement.max_steps,
+            );
+            let mi = d.minimise_below(
+                minimise_below,
+                refinement_tolerance,
+                config.refinement.max_steps,
+            );
 
             match (mx, mi) {
                 (None, None) => None,
@@ -531,15 +543,17 @@
 
     fn verify_merge_candidate<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &RNDM<F, N>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
     ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
@@ -549,41 +563,42 @@
         let keep_supp_neg_below = -τα + merge_tolerance;
         let bnd = d.bounds();
 
-        return (
-            (bnd.lower() >= keep_supp_pos_above && bnd.upper() <= keep_supp_neg_below)
-            ||
-            μ.iter_spikes().all(|&DeltaMeasure{ α : β, ref x }| {
-                match β.partial_cmp(&0.0) {
+        return ((bnd.lower() >= keep_supp_pos_above && bnd.upper() <= keep_supp_neg_below)
+            || μ
+                .iter_spikes()
+                .all(|&DeltaMeasure { α: β, ref x }| match β.partial_cmp(&0.0) {
                     Some(Greater) => d.apply(x) >= keep_supp_pos_above,
                     Some(Less) => d.apply(x) <= keep_supp_neg_below,
                     _ => true,
-                }
-            })
-        ) && (
-            bnd.upper() <= keep_below
-            ||
-            d.has_upper_bound(keep_below, refinement_tolerance,
-                              config.refinement.max_steps)
-        ) && (
-            bnd.lower() >= keep_above
-            ||
-            d.has_lower_bound(keep_above, refinement_tolerance,
-                              config.refinement.max_steps)
-        )
+                }))
+            && (bnd.upper() <= keep_below
+                || d.has_upper_bound(
+                    keep_below,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                ))
+            && (bnd.lower() >= keep_above
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                ));
     }
 
     fn verify_merge_candidate_radonsq<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &RNDM<F, N>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-        radon_μ : &RNDM<F, N>,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        radon_μ: &RNDM<F, N>,
     ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
@@ -591,8 +606,7 @@
         let bnd = d.bounds();
 
         return {
-            μ.both_matching(radon_μ)
-             .all(|(α, rα, x)| {
+            μ.both_matching(radon_μ).all(|(α, rα, x)| {
                 let v = d.apply(x);
                 let (l1, u1) = match α.partial_cmp(&0.0).unwrap_or(Equal) {
                     Greater => (τα, τα),
@@ -606,24 +620,28 @@
                 };
                 (l1 + l2 - merge_tolerance <= v) && (v <= u1 + u2 + merge_tolerance)
             })
-         } && {
+        } && {
             let keep_below = τα + slack + merge_tolerance;
             bnd.upper() <= keep_below
-            ||
-            d.has_upper_bound(keep_below, refinement_tolerance,
-                              config.refinement.max_steps)
+                || d.has_upper_bound(
+                    keep_below,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                )
         } && {
             let keep_above = -τα - slack - merge_tolerance;
             bnd.lower() >= keep_above
-            ||
-            d.has_lower_bound(keep_above, refinement_tolerance,
-                              config.refinement.max_steps)
-        }
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                )
+        };
     }
 
-    fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>> {
+    fn target_bounds(&self, τ: F, ε: F) -> Option<Bounds<F>> {
         let τα = τ * self.α();
-        Some(Bounds(-τα - ε,  τα + ε))
+        Some(Bounds(-τα - ε, τα + ε))
     }
 
     fn tolerance_scaling(&self) -> F {
@@ -632,34 +650,34 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float + ToNalgebraRealField, const N : usize> SlidingRegTerm<F, N>
-for RadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F> {
-
+impl<F: Float + ToNalgebraRealField, const N: usize> SlidingRegTerm<F, N> for RadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
     fn goodness<G, BT>(
         &self,
-        d : &mut BTFN<F, G, BT, N>,
-        _μ : &RNDM<F, N>,
-        y : &Loc<F, N>,
-        z : &Loc<F, N>,
-        τ : F,
-        ε : F,
-        _config : &FBGenericConfig<F>,
+        d: &mut BTFN<F, G, BT, N>,
+        _μ: &RNDM<F, N>,
+        y: &Loc<F, N>,
+        z: &Loc<F, N>,
+        τ: F,
+        ε: F,
+        _config: &FBGenericConfig<F>,
     ) -> F
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
-
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
         let α = self.α();
         let w = |x| {
             let dx = d.apply(x);
-            ((-ε + dx)/(τ * α)).max(1.0.min(ε + dx)/(τ * α))
+            ((-ε + dx) / (τ * α)).max(1.0.min(ε + dx) / (τ * α))
         };
         w(z) - w(y)
     }
 
-    fn radon_norm_bound(&self, b : F) -> F {
+    fn radon_norm_bound(&self, b: F) -> F {
         b / self.α()
     }
-}
\ No newline at end of file
+}

mercurial