Make some math in documentation render v2.0.0-pre

Mon, 17 Feb 2025 14:10:52 -0500

author
Tuomo Valkonen <tuomov@iki.fi>
date
Mon, 17 Feb 2025 14:10:52 -0500
changeset 54
b3312eee105c
parent 53
92cae2e8f598
child 55
19e2140ba458

Make some math in documentation render

src/seminorms.rs file | annotate | diff | comparison | revisions
src/subproblem/l1squared_unconstrained.rs file | annotate | diff | comparison | revisions
--- a/src/seminorms.rs	Mon Feb 17 14:10:45 2025 -0500
+++ b/src/seminorms.rs	Mon Feb 17 14:10:52 2025 -0500
@@ -4,31 +4,31 @@
 the trait [`DiscreteMeasureOp`].
 */
 
-use std::iter::Zip;
-use std::ops::RangeFrom;
-use alg_tools::types::*;
+use crate::measures::{DeltaMeasure, DiscreteMeasure, Radon, SpikeIter, RNDM};
+use alg_tools::bisection_tree::*;
+use alg_tools::instance::Instance;
+use alg_tools::iter::{FilterMapX, Mappable};
+use alg_tools::linops::{BoundedLinear, Linear, Mapping};
 use alg_tools::loc::Loc;
-use alg_tools::sets::Cube;
-use alg_tools::bisection_tree::*;
 use alg_tools::mapping::RealMapping;
-use alg_tools::iter::{Mappable, FilterMapX};
-use alg_tools::linops::{Mapping, Linear, BoundedLinear};
-use alg_tools::instance::Instance;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::norms::Linfinity;
-use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter, Radon, RNDM};
+use alg_tools::sets::Cube;
+use alg_tools::types::*;
+use itertools::Itertools;
 use nalgebra::DMatrix;
+use std::iter::Zip;
 use std::marker::PhantomData;
-use itertools::Itertools;
+use std::ops::RangeFrom;
 
 /// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$.
 ///
 /// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`].
-pub trait DiscreteMeasureOp<Domain, F>
-    : BoundedLinear<DiscreteMeasure<Domain, F>, Radon, Linfinity, F>
+pub trait DiscreteMeasureOp<Domain, F>:
+    BoundedLinear<DiscreteMeasure<Domain, F>, Radon, Linfinity, F>
 where
-    F : Float + ToNalgebraRealField,
-    Domain : 'static + Clone + PartialEq,
+    F: Float + ToNalgebraRealField,
+    Domain: 'static + Clone + PartialEq,
 {
     /// The output type of [`Self::preapply`].
     type PreCodomain;
@@ -40,12 +40,13 @@
     /// for a $x_1, …, x_n$ the coordinates given by the iterator `I`, and $a=(α_1,…,α_n)$.
     /// Here $C_* ∈ 𝕃(C_c(Ω); ℝ^n) $ stands for the preadjoint.
     /// </p>
-    fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType>
-    where I : ExactSizeIterator<Item=&'a Domain> + Clone;
+    fn findim_matrix<'a, I>(&self, points: I) -> DMatrix<F::MixedType>
+    where
+        I: ExactSizeIterator<Item = &'a Domain> + Clone;
 
     /// [`Mapping`] that typically returns an uninitialised [`PreBTFN`]
     /// instead of a full [`BTFN`].
-    fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain;
+    fn preapply(&self, μ: DiscreteMeasure<Domain, F>) -> Self::PreCodomain;
 }
 
 // Blanket implementation of a measure as a linear functional over a predual
@@ -67,28 +68,37 @@
 //
 
 /// A trait alias for simple convolution kernels.
-pub trait SimpleConvolutionKernel<F : Float, const N : usize>
-: RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {}
+pub trait SimpleConvolutionKernel<F: Float, const N: usize>:
+    RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static
+{
+}
 
-impl<T, F : Float, const N : usize> SimpleConvolutionKernel<F, N> for T
-where T : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {}
+impl<T, F: Float, const N: usize> SimpleConvolutionKernel<F, N> for T where
+    T: RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static
+{
+}
 
 /// [`SupportGenerator`] for [`ConvolutionOp`].
-#[derive(Clone,Debug)]
-pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize>
-where K : SimpleConvolutionKernel<F, N> {
-    kernel : K,
-    centres : RNDM<F, N>,
+#[derive(Clone, Debug)]
+pub struct ConvolutionSupportGenerator<F: Float, K, const N: usize>
+where
+    K: SimpleConvolutionKernel<F, N>,
+{
+    kernel: K,
+    centres: RNDM<F, N>,
 }
 
-impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N>
-where K : SimpleConvolutionKernel<F, N> {
-
+impl<F: Float, K, const N: usize> ConvolutionSupportGenerator<F, K, N>
+where
+    K: SimpleConvolutionKernel<F, N>,
+{
     /// Construct the convolution kernel corresponding to `δ`, i.e., one centered at `δ.x` and
     /// weighted by `δ.α`.
     #[inline]
-    fn construct_kernel<'a>(&'a self, δ : &'a DeltaMeasure<Loc<F, N>, F>)
-    -> Weighted<Shift<K, F, N>, F> {
+    fn construct_kernel<'a>(
+        &'a self,
+        δ: &'a DeltaMeasure<Loc<F, N>, F>,
+    ) -> Weighted<Shift<K, F, N>, F> {
         self.kernel.clone().shift(δ.x).weigh(δ.α)
     }
 
@@ -98,22 +108,27 @@
     #[inline]
     fn construct_kernel_and_id_filtered<'a>(
         &'a self,
-        (id, δ) : (usize, &'a DeltaMeasure<Loc<F, N>, F>)
+        (id, δ): (usize, &'a DeltaMeasure<Loc<F, N>, F>),
     ) -> Option<(usize, Weighted<Shift<K, F, N>, F>)> {
         (δ.α != F::ZERO).then(|| (id.into(), self.construct_kernel(δ)))
     }
 }
 
-impl<F : Float, K, const N : usize> SupportGenerator<F, N>
-for ConvolutionSupportGenerator<F, K, N>
-where K : SimpleConvolutionKernel<F, N> {
+impl<F: Float, K, const N: usize> SupportGenerator<F, N> for ConvolutionSupportGenerator<F, K, N>
+where
+    K: SimpleConvolutionKernel<F, N>,
+{
     type Id = usize;
     type SupportType = Weighted<Shift<K, F, N>, F>;
-    type AllDataIter<'a> = FilterMapX<'a, Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>,
-                                      Self, (Self::Id, Self::SupportType)>;
+    type AllDataIter<'a> = FilterMapX<
+        'a,
+        Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>,
+        Self,
+        (Self::Id, Self::SupportType),
+    >;
 
     #[inline]
-    fn support_for(&self, d : Self::Id) -> Self::SupportType {
+    fn support_for(&self, d: Self::Id) -> Self::SupportType {
         self.construct_kernel(&self.centres[d])
     }
 
@@ -124,51 +139,53 @@
 
     #[inline]
     fn all_data(&self) -> Self::AllDataIter<'_> {
-        (0..).zip(self.centres.iter_spikes())
-             .filter_mapX(self, Self::construct_kernel_and_id_filtered)
+        (0..)
+            .zip(self.centres.iter_spikes())
+            .filter_mapX(self, Self::construct_kernel_and_id_filtered)
     }
 }
 
 /// Representation of a convolution operator $𝒟$.
-#[derive(Clone,Debug)]
-pub struct ConvolutionOp<F, K, BT, const N : usize>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
+#[derive(Clone, Debug)]
+pub struct ConvolutionOp<F, K, BT, const N: usize>
+where
+    F: Float + ToNalgebraRealField,
+    BT: BTImpl<F, N, Data = usize>,
+    K: SimpleConvolutionKernel<F, N>,
+{
     /// Depth of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
-    depth : BT::Depth,
+    depth: BT::Depth,
     /// Domain of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
-    domain : Cube<F, N>,
+    domain: Cube<F, N>,
     /// The convolution kernel
-    kernel : K,
-    _phantoms : PhantomData<(F,BT)>,
+    kernel: K,
+    _phantoms: PhantomData<(F, BT)>,
 }
 
-impl<F, K, BT, const N : usize> ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
-
+impl<F, K, BT, const N: usize> ConvolutionOp<F, K, BT, N>
+where
+    F: Float + ToNalgebraRealField,
+    BT: BTImpl<F, N, Data = usize>,
+    K: SimpleConvolutionKernel<F, N>,
+{
     /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`.
     ///
     /// The output of [`Mapping::apply`] is a [`BT`] of given `depth`.
-    pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self {
+    pub fn new(depth: BT::Depth, domain: Cube<F, N>, kernel: K) -> Self {
         ConvolutionOp {
-            depth : depth,
-            domain : domain,
-            kernel : kernel,
-            _phantoms : PhantomData
+            depth: depth,
+            domain: domain,
+            kernel: kernel,
+            _phantoms: PhantomData,
         }
     }
 
     /// Returns the support generator for this convolution operator.
-    fn support_generator(&self, μ : RNDM<F, N>)
-    -> ConvolutionSupportGenerator<F, K, N> {
-
+    fn support_generator(&self, μ: RNDM<F, N>) -> ConvolutionSupportGenerator<F, K, N> {
         // TODO: can we avoid cloning μ?
         ConvolutionSupportGenerator {
-            kernel : self.kernel.clone(),
-            centres : μ
+            kernel: self.kernel.clone(),
+            centres: μ,
         }
     }
 
@@ -178,43 +195,43 @@
     }
 }
 
-impl<F, K, BT, const N : usize> Mapping<RNDM<F, N>>
-for ConvolutionOp<F, K, BT, N>
+impl<F, K, BT, const N: usize> Mapping<RNDM<F, N>> for ConvolutionOp<F, K, BT, N>
 where
-    F : Float + ToNalgebraRealField,
-    BT : BTImpl<F, N, Data=usize>,
-    K : SimpleConvolutionKernel<F, N>,
-    Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N>
+    F: Float + ToNalgebraRealField,
+    BT: BTImpl<F, N, Data = usize>,
+    K: SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
 {
-
     type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
 
-    fn apply<I>(&self, μ : I) -> Self::Codomain
-    where I : Instance<RNDM<F, N>> {
+    fn apply<I>(&self, μ: I) -> Self::Codomain
+    where
+        I: Instance<RNDM<F, N>>,
+    {
         let g = self.support_generator(μ.own());
         BTFN::construct(self.domain.clone(), self.depth, g)
     }
 }
 
 /// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s.
-impl<F, K, BT, const N : usize> Linear<RNDM<F, N>>
-for ConvolutionOp<F, K, BT, N>
+impl<F, K, BT, const N: usize> Linear<RNDM<F, N>> for ConvolutionOp<F, K, BT, N>
 where
-    F : Float + ToNalgebraRealField,
-    BT : BTImpl<F, N, Data=usize>,
-    K : SimpleConvolutionKernel<F, N>,
-    Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N>
-{ }
+    F: Float + ToNalgebraRealField,
+    BT: BTImpl<F, N, Data = usize>,
+    K: SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
+{
+}
 
-impl<F, K, BT, const N : usize>
-BoundedLinear<RNDM<F, N>, Radon, Linfinity, F>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    fn opnorm_bound(&self, _ : Radon, _ : Linfinity) -> F {
+impl<F, K, BT, const N: usize> BoundedLinear<RNDM<F, N>, Radon, Linfinity, F>
+    for ConvolutionOp<F, K, BT, N>
+where
+    F: Float + ToNalgebraRealField,
+    BT: BTImpl<F, N, Data = usize>,
+    K: SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
+{
+    fn opnorm_bound(&self, _: Radon, _: Linfinity) -> F {
         // With μ = ∑_i α_i δ_{x_i}, we have
         // |𝒟μ|_∞
         // = sup_z |∑_i α_i φ(z - x_i)|
@@ -225,31 +242,33 @@
     }
 }
 
-
-impl<F, K, BT, const N : usize> DiscreteMeasureOp<Loc<F, N>, F>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+impl<F, K, BT, const N: usize> DiscreteMeasureOp<Loc<F, N>, F> for ConvolutionOp<F, K, BT, N>
+where
+    F: Float + ToNalgebraRealField,
+    BT: BTImpl<F, N, Data = usize>,
+    K: SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
+{
     type PreCodomain = PreBTFN<F, ConvolutionSupportGenerator<F, K, N>, N>;
 
-    fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType>
-    where I : ExactSizeIterator<Item=&'a Loc<F,N>> + Clone {
+    fn findim_matrix<'a, I>(&self, points: I) -> DMatrix<F::MixedType>
+    where
+        I: ExactSizeIterator<Item = &'a Loc<F, N>> + Clone,
+    {
         // TODO: Preliminary implementation. It be best to use sparse matrices or
         // possibly explicit operators without matrices
         let n = points.len();
         let points_clone = points.clone();
         let pairs = points.cartesian_product(points_clone);
         let kernel = &self.kernel;
-        let values = pairs.map(|(x, y)| kernel.apply(y-x).to_nalgebra_mixed());
+        let values = pairs.map(|(x, y)| kernel.apply(y - x).to_nalgebra_mixed());
         DMatrix::from_iterator(n, n, values)
     }
 
     /// A version of [`Mapping::apply`] that does not instantiate the [`BTFN`] codomain with
     /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when
     /// the output is to be added as the right-hand-side operand to a proper BTFN.
-    fn preapply(&self, μ : RNDM<F, N>) -> Self::PreCodomain {
+    fn preapply(&self, μ: RNDM<F, N>) -> Self::PreCodomain {
         BTFN::new_pre(self.support_generator(μ))
     }
 }
@@ -258,47 +277,46 @@
 /// for [`ConvolutionSupportGenerator`].
 macro_rules! make_convolutionsupportgenerator_scalarop_rhs {
     ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
-        impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
-        std::ops::$trait_assign<F>
-        for ConvolutionSupportGenerator<F, K, N> {
-            fn $fn_assign(&mut self, t : F) {
+        impl<F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait_assign<F>
+            for ConvolutionSupportGenerator<F, K, N>
+        {
+            fn $fn_assign(&mut self, t: F) {
                 self.centres.$fn_assign(t);
             }
         }
 
-        impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
-        std::ops::$trait<F>
-        for ConvolutionSupportGenerator<F, K, N> {
+        impl<F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait<F>
+            for ConvolutionSupportGenerator<F, K, N>
+        {
             type Output = ConvolutionSupportGenerator<F, K, N>;
-            fn $fn(mut self, t : F) -> Self::Output {
+            fn $fn(mut self, t: F) -> Self::Output {
                 std::ops::$trait_assign::$fn_assign(&mut self.centres, t);
                 self
             }
         }
-        impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
-        std::ops::$trait<F>
-        for &'a ConvolutionSupportGenerator<F, K, N> {
+        impl<'a, F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait<F>
+            for &'a ConvolutionSupportGenerator<F, K, N>
+        {
             type Output = ConvolutionSupportGenerator<F, K, N>;
-            fn $fn(self, t : F) -> Self::Output {
-                ConvolutionSupportGenerator{
-                    kernel : self.kernel.clone(),
-                    centres : (&self.centres).$fn(t),
+            fn $fn(self, t: F) -> Self::Output {
+                ConvolutionSupportGenerator {
+                    kernel: self.kernel.clone(),
+                    centres: (&self.centres).$fn(t),
                 }
             }
         }
-    }
+    };
 }
 
 make_convolutionsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
 make_convolutionsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
 
-
 /// Generates an unary operation (e.g. [`std::ops::Neg`]) for [`ConvolutionSupportGenerator`].
 macro_rules! make_convolutionsupportgenerator_unaryop {
     ($trait:ident, $fn:ident) => {
-        impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
-        std::ops::$trait
-        for ConvolutionSupportGenerator<F, K, N> {
+        impl<F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait
+            for ConvolutionSupportGenerator<F, K, N>
+        {
             type Output = ConvolutionSupportGenerator<F, K, N>;
             fn $fn(mut self) -> Self::Output {
                 self.centres = self.centres.$fn();
@@ -306,19 +324,18 @@
             }
         }
 
-        impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
-        std::ops::$trait
-        for &'a ConvolutionSupportGenerator<F, K, N> {
+        impl<'a, F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait
+            for &'a ConvolutionSupportGenerator<F, K, N>
+        {
             type Output = ConvolutionSupportGenerator<F, K, N>;
             fn $fn(self) -> Self::Output {
-                ConvolutionSupportGenerator{
-                    kernel : self.kernel.clone(),
-                    centres : (&self.centres).$fn(),
+                ConvolutionSupportGenerator {
+                    kernel: self.kernel.clone(),
+                    centres: (&self.centres).$fn(),
                 }
             }
         }
-    }
+    };
 }
 
 make_convolutionsupportgenerator_unaryop!(Neg, neg);
-
--- a/src/subproblem/l1squared_unconstrained.rs	Mon Feb 17 14:10:45 2025 -0500
+++ b/src/subproblem/l1squared_unconstrained.rs	Mon Feb 17 14:10:52 2025 -0500
@@ -2,76 +2,74 @@
 Iterative algorithms for solving the finite-dimensional subproblem without constraints.
 */
 
+use itertools::izip;
 use nalgebra::DVector;
 use numeric_literals::replace_float_literals;
-use itertools::izip;
 use std::cmp::Ordering::*;
 
-use std::iter::zip;
-use alg_tools::iterate::{
-    AlgIteratorFactory,
-    AlgIteratorState,
-};
+use alg_tools::iterate::{AlgIteratorFactory, AlgIteratorState};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::nanleast::NaNLeast;
 use alg_tools::norms::{Dist, L1};
+use std::iter::zip;
 
+use super::l1squared_nonneg::max_interval_dist_to_zero;
+use super::unconstrained::soft_thresholding;
+use super::{InnerMethod, InnerSettings};
 use crate::types::*;
-use super::{
-    InnerMethod,
-    InnerSettings
-};
-use super::unconstrained::soft_thresholding;
-use super::l1squared_nonneg::max_interval_dist_to_zero;
 
-/// Calculate $prox_f(x)$ for $f(x)=\frac{β}{2}\norm{x-y}_1^2$.
+/// Calculate $\prox_f(x)$ for $f(x)=\frac{β}{2}\norm{x-y}_1^2$.
 ///
 /// To derive an algorithm for this, we can assume that $y=0$, as
-/// $prox_f(x) = prox_{f_0}(x - y) - y$ for $f_0=\frac{β}{2}\norm{x}_1^2$.
-/// Now, the optimality conditions for $w = prox_f(x)$ are
-/// $$\tag{*}
-///     0 ∈ w-x + β\norm{w}_1\sign w.
+/// $\prox\_f(x) = \prox\_{f_0}(x - y) - y$ for $f\_0=\frac{β}{2}\norm{x}\_1^2$.
+/// Now, the optimality conditions for $w = \prox\_f(x)$ are
 /// $$
-/// Clearly then $w = \soft_{β\norm{w}_1}(x)$.
+///     0 ∈ w-x + β\norm{w}\_1\sign w.
+/// $$
+/// Clearly then $w = \soft\_{β\norm{w}\_1}(x)$.
 /// Thus the components of $x$ with smallest absolute value will be zeroed out.
 /// Denoting by $w'$ the non-zero components, and by $x'$ the corresponding components
 /// of $x$, and by $m$ their count,  multipying the corresponding lines of (*) by $\sign x'$,
 /// we obtain
 /// $$
-///     \norm{x'}_1 = (1+βm)\norm{w'}_1.
+///     \norm{x'}\_1 = (1+βm)\norm{w'}\_1.
 /// $$
-/// That is, $\norm{w}_1=\norm{w'}_1=\norm{x'}_1/(1+βm)$.
+/// That is, $\norm{w}\_1=\norm{w'}\_1=\norm{x'}\_1/(1+βm)$.
 /// Thus, sorting $x$ by absolute value, and sequentially in order eliminating the smallest
-/// elements, we can easily calculate what $\norm{w}_1$ should be for that choice, and
-/// then easily calculate $w = \soft_{β\norm{w}_1}(x)$. We just have to verify that
+/// elements, we can easily calculate what $\norm{w}\_1$ should be for that choice, and
+/// then easily calculate $w = \soft_{β\norm{w}\_1}(x)$. We just have to verify that
 /// the resulting $w$ has the same norm. There's a shortcut to this, as we work
 /// sequentially: just check that the smallest assumed-nonzero component $i$ satisfies the
-/// condition of soft-thresholding to remain non-zero: $|x_i|>τ\norm{x'}/(1+τm)$.
-/// Clearly, if this condition fails for x_i, it will fail for all the components
+/// condition of soft-thresholding to remain non-zero: $|x\_i|>τ\norm{x'}/(1+τm)$.
+/// Clearly, if this condition fails for $x\_i$, it will fail for all the components
 /// already exluced. While, if it holds, it will hold for all components not excluded.
 #[replace_float_literals(F::cast_from(literal))]
-pub(super) fn l1squared_prox<F :Float + nalgebra::RealField>(
-    sorted_abs : &mut DVector<F>,
-    x : &mut DVector<F>,
-    y : &DVector<F>,
-    β : F
+pub(super) fn l1squared_prox<F: Float + nalgebra::RealField>(
+    sorted_abs: &mut DVector<F>,
+    x: &mut DVector<F>,
+    y: &DVector<F>,
+    β: F,
 ) {
     sorted_abs.copy_from(x);
     sorted_abs.axpy(-1.0, y, 1.0);
     sorted_abs.apply(|z_i| *z_i = num_traits::abs(*z_i));
-    sorted_abs.as_mut_slice().sort_unstable_by(|a, b| NaNLeast(*a).cmp(&NaNLeast(*b)));
+    sorted_abs
+        .as_mut_slice()
+        .sort_unstable_by(|a, b| NaNLeast(*a).cmp(&NaNLeast(*b)));
 
     let mut n = sorted_abs.sum();
     for (m, az_i) in zip((1..=x.len() as u32).rev(), sorted_abs) {
         // test first
-        let tmp = β*n/(1.0 + β*F::cast_from(m));
+        let tmp = β * n / (1.0 + β * F::cast_from(m));
         if *az_i <= tmp {
             // Fail
             n -= *az_i;
         } else {
             // Success
-            x.zip_apply(y, |x_i, y_i| *x_i = y_i + soft_thresholding(*x_i-y_i, tmp));
-            return
+            x.zip_apply(y, |x_i, y_i| {
+                *x_i = y_i + soft_thresholding(*x_i - y_i, tmp)
+            });
+            return;
         }
     }
     // m = 0 should always work, but x is zero.
@@ -82,35 +80,52 @@
 ///
 /// `v` will be modified and cannot be trusted to contain useful values afterwards.
 #[replace_float_literals(F::cast_from(literal))]
-fn min_subdifferential<F : Float + nalgebra::RealField>(
-    y : &DVector<F>,
-    x : &DVector<F>,
-    g : &DVector<F>,
-    λ : F,
-    β : F
+fn min_subdifferential<F: Float + nalgebra::RealField>(
+    y: &DVector<F>,
+    x: &DVector<F>,
+    g: &DVector<F>,
+    λ: F,
+    β: F,
 ) -> F {
     let mut val = 0.0;
-    let tmp = β*y.dist(x, L1);
+    let tmp = β * y.dist(x, L1);
     for (&g_i, &x_i, y_i) in izip!(g.iter(), x.iter(), y.iter()) {
         let (mut lb, mut ub) = (-g_i, -g_i);
         match x_i.partial_cmp(y_i) {
-            Some(Greater) => { lb += tmp; ub += tmp },
-            Some(Less) => { lb -= tmp; ub -= tmp },
-            Some(Equal) => { lb -= tmp; ub += tmp },
-            None => {},
+            Some(Greater) => {
+                lb += tmp;
+                ub += tmp
+            }
+            Some(Less) => {
+                lb -= tmp;
+                ub -= tmp
+            }
+            Some(Equal) => {
+                lb -= tmp;
+                ub += tmp
+            }
+            None => {}
         }
         match x_i.partial_cmp(&0.0) {
-            Some(Greater) => { lb += λ; ub += λ },
-            Some(Less) => { lb -= λ; ub -= λ },
-            Some(Equal) => { lb -= λ; ub += λ },
-            None => {},
+            Some(Greater) => {
+                lb += λ;
+                ub += λ
+            }
+            Some(Less) => {
+                lb -= λ;
+                ub -= λ
+            }
+            Some(Equal) => {
+                lb -= λ;
+                ub += λ
+            }
+            None => {}
         };
         val = max_interval_dist_to_zero(val, lb, ub);
     }
     val
 }
 
-
 /// PDPS implementation of [`l1squared_unconstrained`].
 /// For detailed documentation of the inputs and outputs, refer to there.
 ///
@@ -118,17 +133,18 @@
 /// for potential performance improvements.
 #[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
 pub fn l1squared_unconstrained_pdps<F, I>(
-    y : &DVector<F::MixedType>,
-    g : &DVector<F::MixedType>,
-    λ_ : F,
-    β_ : F,
-    x : &mut DVector<F::MixedType>,
-    τ_ : F,
-    σ_ : F,
-    iterator : I
+    y: &DVector<F::MixedType>,
+    g: &DVector<F::MixedType>,
+    λ_: F,
+    β_: F,
+    x: &mut DVector<F::MixedType>,
+    τ_: F,
+    σ_: F,
+    iterator: I,
 ) -> usize
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<F>
+where
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<F>,
 {
     let λ = λ_.to_nalgebra_mixed();
     let β = β_.to_nalgebra_mixed();
@@ -143,19 +159,17 @@
         // Primal step: x^{k+1} = prox_{τ|.-y|_1^2}(x^k - τ (w^k - g))
         x.axpy(-τ, &w, 1.0);
         x.axpy(τ, g, 1.0);
-        l1squared_prox(&mut tmp, x, y, τ*β);
-        
+        l1squared_prox(&mut tmp, x, y, τ * β);
+
         // Dual step: w^{k+1} = proj_{[-λ,λ]}(w^k + σ(2x^{k+1}-x^k))
-        w.axpy(2.0*σ, x, 1.0);
+        w.axpy(2.0 * σ, x, 1.0);
         w.axpy(-σ, &xprev, 1.0);
         w.apply(|w_i| *w_i = num_traits::clamp(*w_i, -λ, λ));
         xprev.copy_from(x);
-        
-        iters +=1;
 
-        state.if_verbose(|| {
-            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
-        })
+        iters += 1;
+
+        state.if_verbose(|| F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β)))
     });
 
     iters
@@ -180,26 +194,27 @@
 /// $$</div>
 #[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
 pub fn l1squared_unconstrained_pdps_alt<F, I>(
-    y : &DVector<F::MixedType>,
-    g : &DVector<F::MixedType>,
-    λ_ : F,
-    β_ : F,
-    x : &mut DVector<F::MixedType>,
-    τ_ : F,
-    σ_ : F,
-    θ_ : F,
-    iterator : I
+    y: &DVector<F::MixedType>,
+    g: &DVector<F::MixedType>,
+    λ_: F,
+    β_: F,
+    x: &mut DVector<F::MixedType>,
+    τ_: F,
+    σ_: F,
+    θ_: F,
+    iterator: I,
 ) -> usize
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<F>
+where
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<F>,
 {
     let λ = λ_.to_nalgebra_mixed();
     let τ = τ_.to_nalgebra_mixed();
     let σ = σ_.to_nalgebra_mixed();
     let θ = θ_.to_nalgebra_mixed();
     let β = β_.to_nalgebra_mixed();
-    let σθ = σ*θ;
-    let τθ = τ*θ;
+    let σθ = σ * θ;
+    let τθ = τ * θ;
     let mut w = DVector::zeros(x.len());
     let mut tmp = DVector::zeros(x.len());
     let mut xprev = x.clone();
@@ -209,8 +224,8 @@
         // Primal step: x^{k+1} = soft_τλ(x^k - τ(θ w^k -g))
         x.axpy(-τθ, &w, 1.0);
         x.axpy(τ, g, 1.0);
-        x.apply(|x_i| *x_i = soft_thresholding(*x_i, τ*λ));
-        
+        x.apply(|x_i| *x_i = soft_thresholding(*x_i, τ * λ));
+
         // Dual step: with g(x) = (β/(2θ))‖x-y‖₁² and q = w^k + σ(2x^{k+1}-x^k),
         // we compute w^{k+1} = prox_{σg^*}(q) for
         //                    = q - σ prox_{g/σ}(q/σ)
@@ -221,22 +236,19 @@
         w.axpy(2.0, x, 1.0);
         w.axpy(-1.0, &xprev, 1.0);
         xprev.copy_from(&w); // use xprev as temporary variable
-        l1squared_prox(&mut tmp, &mut xprev, y, β/σθ);
+        l1squared_prox(&mut tmp, &mut xprev, y, β / σθ);
         w -= &xprev;
         w *= σ;
         xprev.copy_from(x);
-        
+
         iters += 1;
 
-        state.if_verbose(|| {
-            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
-        })
+        state.if_verbose(|| F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β)))
     });
 
     iters
 }
 
-
 /// This function applies an iterative method for the solution of the problem
 /// <div>$$
 ///     \min_{x ∈ ℝ^n} \frac{β}{2} |x-y|_1^2 - g^⊤ x + λ\|x\|₁.
@@ -246,16 +258,17 @@
 /// This function returns the number of iterations taken.
 #[replace_float_literals(F::cast_from(literal))]
 pub fn l1squared_unconstrained<F, I>(
-    y : &DVector<F::MixedType>,
-    g : &DVector<F::MixedType>,
-    λ : F,
-    β : F,
-    x : &mut DVector<F::MixedType>,
-    inner : &InnerSettings<F>,
-    iterator : I
+    y: &DVector<F::MixedType>,
+    g: &DVector<F::MixedType>,
+    λ: F,
+    β: F,
+    x: &mut DVector<F::MixedType>,
+    inner: &InnerSettings<F>,
+    iterator: I,
 ) -> usize
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<F>
+where
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<F>,
 {
     // Estimate of ‖K‖ for K=θ Id.
     let inner_θ = 1.0;
@@ -264,8 +277,9 @@
     let (inner_τ, inner_σ) = (inner.pdps_τσ0.0 / normest, inner.pdps_τσ0.1 / normest);
 
     match inner.method {
-        InnerMethod::PDPS =>
-            l1squared_unconstrained_pdps_alt(y, g, λ, β, x, inner_τ, inner_σ, inner_θ, iterator),
+        InnerMethod::PDPS => {
+            l1squared_unconstrained_pdps_alt(y, g, λ, β, x, inner_τ, inner_σ, inner_θ, iterator)
+        }
         other => unimplemented!("${other:?} is unimplemented"),
     }
 }

mercurial