Fri, 14 Feb 2025 23:46:43 -0500
Some documentation and factor changes related to ℓ_F and ℓ_r.
Also allow Zed to re-indent the modified files.
--- a/src/forward_model.rs Fri Feb 14 23:16:14 2025 -0500 +++ b/src/forward_model.rs Fri Feb 14 23:46:43 2025 -0500 @@ -2,56 +2,56 @@ Forward models from discrete measures to observations. */ -pub use alg_tools::linops::*; +use alg_tools::error::DynError; use alg_tools::euclidean::Euclidean; -use alg_tools::error::DynError; use alg_tools::instance::Instance; -use alg_tools::norms::{NormExponent, L2, Norm}; +pub use alg_tools::linops::*; +use alg_tools::norms::{Norm, NormExponent, L2}; +use crate::measures::Radon; use crate::types::*; -use crate::measures::Radon; +pub mod bias; pub mod sensor_grid; -pub mod bias; /// `ForwardeModel`s are bounded preadjointable linear operators $A ∈ 𝕃(𝒵(Ω); E)$ /// where $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by /// [`crate::measures::DiscreteMeasure`], and $E$ is a [`Euclidean`] space. -pub trait ForwardModel<Domain : Space, F : Float = f64, E : NormExponent = Radon> - : BoundedLinear<Domain, E, L2, F, Codomain=Self::Observable> +pub trait ForwardModel<Domain: Space, F: Float = f64, E: NormExponent = Radon>: + BoundedLinear<Domain, E, L2, F, Codomain = Self::Observable> + GEMV<F, Domain, Self::Observable> + Preadjointable<Domain, Self::Observable> where - for<'a> Self::Observable : Instance<Self::Observable>, - Domain : Norm<F, E>, + for<'a> Self::Observable: Instance<Self::Observable>, + Domain: Norm<F, E>, { /// The codomain or value space (of “observables”) for this operator. /// It is assumed to be a [`Euclidean`] space, and therefore also (identified with) /// the domain of the preadjoint. - type Observable : Euclidean<F, Output=Self::Observable> - + AXPY<F> - + Space - + Clone; + type Observable: Euclidean<F, Output = Self::Observable> + AXPY<F> + Space + Clone; /// Write an observable into a file. - fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError; + fn write_observable(&self, b: &Self::Observable, prefix: String) -> DynError; /// Returns a zero observable fn zero_observable(&self) -> Self::Observable; } /// Trait for operators $A$ for which $A_*A$ is bounded by some other operator. -pub trait AdjointProductBoundedBy<Domain : Space, D> : Linear<Domain> { - type FloatType : Float; +pub trait AdjointProductBoundedBy<Domain: Space, D>: Linear<Domain> { + type FloatType: Float; /// Return $L$ such that $A_*A ≤ LD$. - fn adjoint_product_bound(&self, other : &D) -> Option<Self::FloatType>; + fn adjoint_product_bound(&self, other: &D) -> Option<Self::FloatType>; } /// Trait for operators $A$ for which $A_*A$ is bounded by a diagonal operator. -pub trait AdjointProductPairBoundedBy<Domain : Space, D1, D2> : Linear<Domain> { - type FloatType : Float; +pub trait AdjointProductPairBoundedBy<Domain: Space, D1, D2>: Linear<Domain> { + type FloatType: Float; /// Return $(L, L_z)$ such that $A_*A ≤ (L_1 D_1, L_2 D_2)$. - fn adjoint_product_pair_bound(&self, other1 : &D1, other_2 : &D2) - -> Option<(Self::FloatType, Self::FloatType)>; + fn adjoint_product_pair_bound( + &self, + other1: &D1, + other_2: &D2, + ) -> Option<(Self::FloatType, Self::FloatType)>; } /* @@ -72,14 +72,12 @@ } */ -/// Trait for [`ForwardModel`]s that satisfy bounds on the curvature $𝒦_F$. +/// Trait for [`ForwardModel`]s that satisfy bounds on curvature. pub trait BoundedCurvature { - type FloatType : Float; + type FloatType: Float; - /// Returns components for a bound $ℓ_F$ on the curvature - /// $$ - /// 𝒦_F(μ, γ) = ∫ B_{F'(μ)} dγ + B_F(μ, μ+Δ). - /// $$ - /// such that $𝒦_F(μ, γ) ≤ ℓ_F ∫ c_2 d|γ|$. + /// Returns factor $ℓ_F$ and $ℓ_r$ such that + /// $B_{F'(μ)} dγ ≤ ℓ_F c_2$ and $⟨F'(μ)+F'(μ+Δ)|Δ⟩ ≤ ℓ_r|γ|(c_2)$, + /// where $Δ=(π_♯^1-π_♯^0)γ$. fn curvature_bound_components(&self) -> (Option<Self::FloatType>, Option<Self::FloatType>); }
--- a/src/forward_model/bias.rs Fri Feb 14 23:16:14 2025 -0500 +++ b/src/forward_model/bias.rs Fri Feb 14 23:46:43 2025 -0500 @@ -2,28 +2,28 @@ Simple parametric forward model. */ -use numeric_literals::replace_float_literals; -use alg_tools::types::{Float, ClosedAdd}; -use alg_tools::mapping::Space; +use super::{AdjointProductBoundedBy, AdjointProductPairBoundedBy, BoundedCurvature, ForwardModel}; +use crate::measures::RNDM; use alg_tools::direct_product::Pair; -use alg_tools::linops::{Linear, RowOp, IdOp, ZeroOp, AXPY}; use alg_tools::error::DynError; -use alg_tools::norms::{L2, Norm, PairNorm, NormExponent}; -use crate::measures::RNDM; -use super::{ForwardModel, AdjointProductBoundedBy, AdjointProductPairBoundedBy, BoundedCurvature}; +use alg_tools::linops::{IdOp, Linear, RowOp, ZeroOp, AXPY}; +use alg_tools::mapping::Space; +use alg_tools::norms::{Norm, NormExponent, PairNorm, L2}; +use alg_tools::types::{ClosedAdd, Float}; +use numeric_literals::replace_float_literals; impl<Domain, F, A, E> ForwardModel<Pair<Domain, A::Observable>, F, PairNorm<E, L2, L2>> -for RowOp<A, IdOp<A::Observable>> + for RowOp<A, IdOp<A::Observable>> where - E : NormExponent, - Domain : Space + Norm<F, E>, - F : Float, - A::Observable : ClosedAdd + Norm<F, L2> + 'static, - A : ForwardModel<Domain, F, E> + 'static + E: NormExponent, + Domain: Space + Norm<F, E>, + F: Float, + A::Observable: ClosedAdd + Norm<F, L2> + 'static, + A: ForwardModel<Domain, F, E> + 'static, { type Observable = A::Observable; - fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError { + fn write_observable(&self, b: &Self::Observable, prefix: String) -> DynError { self.0.write_observable(b, prefix) } @@ -35,17 +35,17 @@ #[replace_float_literals(F::cast_from(literal))] impl<Domain, F, A, D, Z> AdjointProductPairBoundedBy<Pair<Domain, Z>, D, IdOp<Z>> -for RowOp<A, IdOp<Z>> + for RowOp<A, IdOp<Z>> where - Domain : Space, - F : Float, - Z : Clone + Space + ClosedAdd, - A : AdjointProductBoundedBy<Domain, D, FloatType=F, Codomain = Z>, - A::Codomain : ClosedAdd, + Domain: Space, + F: Float, + Z: Clone + Space + ClosedAdd, + A: AdjointProductBoundedBy<Domain, D, FloatType = F, Codomain = Z>, + A::Codomain: ClosedAdd, { type FloatType = F; - fn adjoint_product_pair_bound(&self, d : &D, _ : &IdOp<Z>) -> Option<(F, F)> { + fn adjoint_product_pair_bound(&self, d: &D, _: &IdOp<Z>) -> Option<(F, F)> { self.0.adjoint_product_bound(d).map(|l_0| { // [A_*; B_*][A, B] = [A_*A, A_* B; B_* A, B_* B] ≤ diag(2A_*A, 2B_*B) // ≤ diag(2l_A𝒟_A, 2l_B𝒟_B), where now 𝒟_B=Id and l_B=1. @@ -79,12 +79,11 @@ } */ -impl<F, A, Z> BoundedCurvature -for RowOp<A, IdOp<Z>> +impl<F, A, Z> BoundedCurvature for RowOp<A, IdOp<Z>> where - F : Float, - Z : Clone + Space + ClosedAdd, - A : BoundedCurvature<FloatType = F>, + F: Float, + Z: Clone + Space + ClosedAdd, + A: BoundedCurvature<FloatType = F>, { type FloatType = F; @@ -94,17 +93,16 @@ } #[replace_float_literals(F::cast_from(literal))] -impl<'a, F, D, XD, Y, const N : usize> AdjointProductBoundedBy<RNDM<F, N>, D> -for ZeroOp<'a, RNDM<F, N>, XD, Y, F> +impl<'a, F, D, XD, Y, const N: usize> AdjointProductBoundedBy<RNDM<F, N>, D> + for ZeroOp<'a, RNDM<F, N>, XD, Y, F> where - F : Float, - Y : AXPY<F> + Clone, - D : Linear<RNDM<F, N>>, + F: Float, + Y: AXPY<F> + Clone, + D: Linear<RNDM<F, N>>, { type FloatType = F; /// Return $L$ such that $A_*A ≤ L𝒟$ is bounded by some `other` operator $𝒟$. - fn adjoint_product_bound(&self, _ : &D) -> Option<F> { + fn adjoint_product_bound(&self, _: &D) -> Option<F> { Some(0.0) } } -
--- a/src/forward_model/sensor_grid.rs Fri Feb 14 23:16:14 2025 -0500 +++ b/src/forward_model/sensor_grid.rs Fri Feb 14 23:46:43 2025 -0500 @@ -2,124 +2,120 @@ Sensor grid forward model */ +use nalgebra::base::{DMatrix, DVector}; use numeric_literals::replace_float_literals; -use nalgebra::base::{ - DMatrix, - DVector -}; use std::iter::Zip; use std::ops::RangeFrom; +use alg_tools::bisection_tree::*; +use alg_tools::error::DynError; +use alg_tools::instance::Instance; +use alg_tools::iter::{MapX, Mappable}; +use alg_tools::lingrid::*; pub use alg_tools::linops::*; -use alg_tools::norms::{ - L1, Linfinity, L2, Norm -}; -use alg_tools::bisection_tree::*; -use alg_tools::mapping::{ - RealMapping, - DifferentiableMapping -}; -use alg_tools::lingrid::*; -use alg_tools::iter::{MapX, Mappable}; +use alg_tools::mapping::{DifferentiableMapping, RealMapping}; +use alg_tools::maputil::map2; use alg_tools::nalgebra_support::ToNalgebraRealField; +use alg_tools::norms::{Linfinity, Norm, L1, L2}; use alg_tools::tabledump::write_csv; -use alg_tools::error::DynError; -use alg_tools::maputil::map2; -use alg_tools::instance::Instance; -use crate::types::*; +use super::{AdjointProductBoundedBy, BoundedCurvature, ForwardModel}; +use crate::frank_wolfe::FindimQuadraticModel; +use crate::kernels::{AutoConvolution, BoundedBy, Convolution}; use crate::measures::{DiscreteMeasure, Radon}; -use crate::seminorms::{ - ConvolutionOp, - SimpleConvolutionKernel, -}; -use crate::kernels::{ - Convolution, - AutoConvolution, - BoundedBy, -}; use crate::preadjoint_helper::PreadjointHelper; -use super::{ - ForwardModel, - BoundedCurvature, - AdjointProductBoundedBy -}; -use crate::frank_wolfe::FindimQuadraticModel; +use crate::seminorms::{ConvolutionOp, SimpleConvolutionKernel}; +use crate::types::*; -type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F,N>, F>; +type RNDM<F, const N: usize> = DiscreteMeasure<Loc<F, N>, F>; -pub type ShiftedSensor<F, S, P, const N : usize> = Shift<Convolution<S, P>, F, N>; +pub type ShiftedSensor<F, S, P, const N: usize> = Shift<Convolution<S, P>, F, N>; /// Trait for physical convolution models. Has blanket implementation for all cases. -pub trait Spread<F : Float, const N : usize> -: 'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F> {} +pub trait Spread<F: Float, const N: usize>: + 'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F> +{ +} -impl<F, T, const N : usize> Spread<F, N> for T -where F : Float, - T : 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N> {} +impl<F, T, const N: usize> Spread<F, N> for T +where + F: Float, + T: 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N>, +{ +} /// Trait for compactly supported sensors. Has blanket implementation for all cases. -pub trait Sensor<F : Float, const N : usize> : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {} +pub trait Sensor<F: Float, const N: usize>: + Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> +{ +} -impl<F, T, const N : usize> Sensor<F, N> for T -where F : Float, - T : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {} - +impl<F, T, const N: usize> Sensor<F, N> for T +where + F: Float, + T: Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity>, +{ +} -pub trait SensorGridBT<F, S, P, const N : usize> : -Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>> -where F : Float, - S : Sensor<F, N>, - P : Spread<F, N> {} +pub trait SensorGridBT<F, S, P, const N: usize>: + Clone + BTImpl<F, N, Data = usize, Agg = Bounds<F>> +where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, +{ +} -impl<F, S, P, T, const N : usize> -SensorGridBT<F, S, P, N> -for T -where T : Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>, - F : Float, - S : Sensor<F, N>, - P : Spread<F, N> {} +impl<F, S, P, T, const N: usize> SensorGridBT<F, S, P, N> for T +where + T: Clone + BTImpl<F, N, Data = usize, Agg = Bounds<F>>, + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, +{ +} // We need type alias bounds to access associated types #[allow(type_alias_bounds)] -pub type SensorGridBTFN<F, S, P, BT : SensorGridBT<F, S, P, N>, const N : usize> -= BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>; +pub type SensorGridBTFN<F, S, P, BT: SensorGridBT<F, S, P, N>, const N: usize> = + BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>; /// Sensor grid forward model #[derive(Clone)] -pub struct SensorGrid<F, S, P, BT, const N : usize> -where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N>, - BT : SensorGridBT<F, S, P, N>, +pub struct SensorGrid<F, S, P, BT, const N: usize> +where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, + BT: SensorGridBT<F, S, P, N>, { - domain : Cube<F, N>, - sensor_count : [usize; N], - sensor : S, - spread : P, - base_sensor : Convolution<S, P>, - bt : BT, + domain: Cube<F, N>, + sensor_count: [usize; N], + sensor: S, + spread: P, + base_sensor: Convolution<S, P>, + bt: BT, } -impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N> -where F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N>, +impl<F, S, P, BT, const N: usize> SensorGrid<F, S, P, BT, N> +where + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>, { - /// Create a new sensor grid. /// /// The parameter `depth` indicates the search depth of the created [`BT`]s /// for the adjoint values. pub fn new( - domain : Cube<F, N>, - sensor_count : [usize; N], - sensor : S, - spread : P, - depth : BT::Depth + domain: Cube<F, N>, + sensor_count: [usize; N], + sensor: S, + spread: P, + depth: BT::Depth, ) -> Self { let base_sensor = Convolution(sensor.clone(), spread.clone()); let bt = BT::new(domain, depth); @@ -141,15 +137,14 @@ } } - -impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N> -where F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> +impl<F, S, P, BT, const N: usize> SensorGrid<F, S, P, BT, N> +where + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, { - /// Return the grid of sensor locations. pub fn grid(&self) -> LinGrid<F, N> { lingrid_centered(&self.domain, &self.sensor_count) @@ -162,7 +157,7 @@ /// Constructs a sensor shifted by `x`. #[inline] - fn shifted_sensor(&self, x : Loc<F, N>) -> ShiftedSensor<F, S, P, N> { + fn shifted_sensor(&self, x: Loc<F, N>) -> ShiftedSensor<F, S, P, N> { self.base_sensor.clone().shift(x) } @@ -174,57 +169,55 @@ /// Returns the maximum number of overlapping sensors $N_\psi$. pub fn max_overlapping(&self) -> F { let w = self.base_sensor.support_hint().width(); - let d = map2(self.domain.width(), &self.sensor_count, |wi, &i| wi/F::cast_from(i)); + let d = map2(self.domain.width(), &self.sensor_count, |wi, &i| { + wi / F::cast_from(i) + }); w.iter() - .zip(d.iter()) - .map(|(&wi, &di)| (wi/di).ceil()) - .reduce(F::mul) - .unwrap() + .zip(d.iter()) + .map(|(&wi, &di)| (wi / di).ceil()) + .reduce(F::mul) + .unwrap() } } -impl<F, S, P, BT, const N : usize> Mapping<RNDM<F, N>> for SensorGrid<F, S, P, BT, N> +impl<F, S, P, BT, const N: usize> Mapping<RNDM<F, N>> for SensorGrid<F, S, P, BT, N> where - F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N>, + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, { - - type Codomain = DVector<F>; + type Codomain = DVector<F>; #[inline] - fn apply<I : Instance<RNDM<F, N>>>(&self, μ : I) -> DVector<F> { + fn apply<I: Instance<RNDM<F, N>>>(&self, μ: I) -> DVector<F> { let mut y = self._zero_observable(); self.apply_add(&mut y, μ); y } } - -impl<F, S, P, BT, const N : usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N> +impl<F, S, P, BT, const N: usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N> where - F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N>, -{ } - + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, +{ +} #[replace_float_literals(F::cast_from(literal))] -impl<F, S, P, BT, const N : usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N> -where F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N>, +impl<F, S, P, BT, const N: usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N> +where + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, { - - fn gemv<I : Instance<RNDM<F, N>>>( - &self, y : &mut DVector<F>, α : F, μ : I, β : F - ) { + fn gemv<I: Instance<RNDM<F, N>>>(&self, y: &mut DVector<F>, α: F, μ: I, β: F) { let grid = self.grid(); if β == 0.0 { y.fill(0.0) @@ -243,9 +236,7 @@ } } - fn apply_add<I : Instance<RNDM<F, N>>>( - &self, y : &mut DVector<F>, μ : I - ) { + fn apply_add<I: Instance<RNDM<F, N>>>(&self, y: &mut DVector<F>, μ: I) { let grid = self.grid(); for δ in μ.ref_instance() { for &d in self.bt.iter_at(&δ.x) { @@ -254,23 +245,20 @@ } } } - } - -impl<F, S, P, BT, const N : usize> -BoundedLinear<RNDM<F, N>, Radon, L2, F> -for SensorGrid<F, S, P, BT, N> -where F : Float, - BT : SensorGridBT<F, S, P, N, Agg=Bounds<F>>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N> +impl<F, S, P, BT, const N: usize> BoundedLinear<RNDM<F, N>, Radon, L2, F> + for SensorGrid<F, S, P, BT, N> +where + F: Float, + BT: SensorGridBT<F, S, P, N, Agg = Bounds<F>>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>, { - /// An estimate on the operator norm in $𝕃(ℳ(Ω); ℝ^n)$ with $ℳ(Ω)$ equipped /// with the Radon norm, and $ℝ^n$ with the Euclidean norm. - fn opnorm_bound(&self, _ : Radon, _ : L2) -> F { + fn opnorm_bound(&self, _: Radon, _: L2) -> F { // With {x_i}_{i=1}^n the grid centres and φ the kernel, we have // |Aμ|_2 = sup_{|z|_2 ≤ 1} ⟨z,Αμ⟩ = sup_{|z|_2 ≤ 1} ⟨A^*z|μ⟩ // ≤ sup_{|z|_2 ≤ 1} |A^*z|_∞ |μ|_ℳ @@ -287,20 +275,22 @@ } } -type SensorGridPreadjoint<'a, A, F, const N : usize> = PreadjointHelper<'a, A, RNDM<F,N>>; - +type SensorGridPreadjoint<'a, A, F, const N: usize> = PreadjointHelper<'a, A, RNDM<F, N>>; -impl<F, S, P, BT, const N : usize> -Preadjointable<RNDM<F, N>, DVector<F>> -for SensorGrid<F, S, P, BT, N> -where F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N> +impl<F, S, P, BT, const N: usize> Preadjointable<RNDM<F, N>, DVector<F>> + for SensorGrid<F, S, P, BT, N> +where + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>, { type PreadjointCodomain = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>; - type Preadjoint<'a> = SensorGridPreadjoint<'a, Self, F, N> where Self : 'a; + type Preadjoint<'a> + = SensorGridPreadjoint<'a, Self, F, N> + where + Self: 'a; fn preadjoint(&self) -> Self::Preadjoint<'_> { PreadjointHelper::new(self) @@ -318,7 +308,7 @@ Convolution<S, P> : Spread<F, N> + Lipschitz<L2, FloatType=F> + DifferentiableMapping<Loc<F,N>> + LocalAnalysis<F, BT::Agg, N>, for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>, { - + type FloatType = F; fn value_unit_lipschitz_factor(&self) -> Option<F> { @@ -342,96 +332,92 @@ */ #[replace_float_literals(F::cast_from(literal))] -impl<'a, F, S, P, BT, const N : usize> BoundedCurvature -for SensorGrid<F, S, P, BT, N> -where F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + Lipschitz<L2, FloatType=F> + DifferentiableMapping<Loc<F,N>> + LocalAnalysis<F, BT::Agg, N>, - for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>, +impl<'a, F, S, P, BT, const N: usize> BoundedCurvature for SensorGrid<F, S, P, BT, N> +where + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + + Lipschitz<L2, FloatType = F> + + DifferentiableMapping<Loc<F, N>> + + LocalAnalysis<F, BT::Agg, N>, + for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F, N>>>::Differential<'b>: + Lipschitz<L2, FloatType = F>, { - type FloatType = F; - /// Returns a bound $ℓ_F$ on the curvature - /// $$ - /// 𝒦_F(μ, γ) = ∫ B_{F'(μ)} dγ + B_F(μ, μ+Δ). - /// $$ - /// such that $𝒦_F(μ, γ) ≤ ℓ_F ∫ c_2 d|γ|$. + /// Returns factors $ℓ_F$ and $Θ²$ such that + /// $B_{F'(μ)} dγ ≤ ℓ_F c_2$ and $⟨F'(μ)+F'(μ+Δ)|Δ⟩ ≤ Θ²|γ|(c_2)‖γ‖$, + /// where $Δ=(π_♯^1-π_♯^0)γ$. /// - /// For $F(μ)=(1/2)‖Aμ-b‖^2$, we have $B_F(μ, μ+Δ)=(1/2)‖AΔ‖^2$, where $Δ = (π_♯^1-π_♯^0)γ$. - /// So we use Lemma 3.8 for that, bounding - /// $(1/2)‖AΔ‖^2 ≤ Θ ∫ c_2 dγ$ for $Θ=2N_ψML_ψ^2$, where - /// * $L_ψ$ is the 2-norm Lipschitz factor of $ψ$ (sensor * base_spread), and - /// * $N_ψ$ the maximum overlap, - /// * M is a bound on $|γ|(Ω^2)$. - /// - /// We also have $B_{F'(μ)}(x, y) = v(y) - v(x) ⟨∇v(x), x-y⟩$ for $v(x)=A^*(Aμ-b)$. - /// This we want the Lipschitz factor of $∇v$. - /// By Example 4.15, it makes sense to estimate this by $√(2N_ψ)L_{∇ψ}‖b‖$, where - /// $L_{∇ψ}$ is the Lipshitz factor of $∇ψ$. + /// See Lemma 3.8, Lemma 5.10, Remark 5.14, and Example 5.15. fn curvature_bound_components(&self) -> (Option<Self::FloatType>, Option<Self::FloatType>) { let n_ψ = self.max_overlapping(); let ψ_diff_lip = self.base_sensor.diff_ref().lipschitz_factor(L2); let ψ_lip = self.base_sensor.lipschitz_factor(L2); - let a = ψ_diff_lip.map(|l| (2.0 * n_ψ).sqrt() * l); - let b = ψ_lip.map(|l| 2.0 * n_ψ * l.powi(2)); + let ℓ_F = ψ_diff_lip.map(|l| (2.0 * n_ψ).sqrt() * l); + let θ2 = ψ_lip.map(|l| 4.0 * n_ψ * l.powi(2)); - (a, b) + (ℓ_F, θ2) } } - - -#[derive(Clone,Debug)] -pub struct SensorGridSupportGenerator<F, S, P, const N : usize> -where F : Float, - S : Sensor<F, N>, - P : Spread<F, N> +#[derive(Clone, Debug)] +pub struct SensorGridSupportGenerator<F, S, P, const N: usize> +where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, { - base_sensor : Convolution<S, P>, - grid : LinGrid<F, N>, - weights : DVector<F> + base_sensor: Convolution<S, P>, + grid: LinGrid<F, N>, + weights: DVector<F>, } -impl<F, S, P, const N : usize> SensorGridSupportGenerator<F, S, P, N> -where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> +impl<F, S, P, const N: usize> SensorGridSupportGenerator<F, S, P, N> +where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, { - #[inline] - fn construct_sensor(&self, id : usize, w : F) -> Weighted<ShiftedSensor<F, S, P, N>, F> { + fn construct_sensor(&self, id: usize, w: F) -> Weighted<ShiftedSensor<F, S, P, N>, F> { let x = self.grid.entry_linear_unchecked(id); self.base_sensor.clone().shift(x).weigh(w) } #[inline] - fn construct_sensor_and_id<'a>(&'a self, (id, w) : (usize, &'a F)) - -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) { + fn construct_sensor_and_id<'a>( + &'a self, + (id, w): (usize, &'a F), + ) -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) { (id.into(), self.construct_sensor(id, *w)) } } -impl<F, S, P, const N : usize> SupportGenerator<F, N> -for SensorGridSupportGenerator<F, S, P, N> -where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> +impl<F, S, P, const N: usize> SupportGenerator<F, N> for SensorGridSupportGenerator<F, S, P, N> +where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, { type Id = usize; type SupportType = Weighted<ShiftedSensor<F, S, P, N>, F>; - type AllDataIter<'a> = MapX<'a, Zip<RangeFrom<usize>, - std::slice::Iter<'a, F>>, - Self, - (Self::Id, Self::SupportType)> - where Self : 'a; + type AllDataIter<'a> + = MapX< + 'a, + Zip<RangeFrom<usize>, std::slice::Iter<'a, F>>, + Self, + (Self::Id, Self::SupportType), + > + where + Self: 'a; #[inline] - fn support_for(&self, d : Self::Id) -> Self::SupportType { + fn support_for(&self, d: Self::Id) -> Self::SupportType { self.construct_sensor(d, self.weights[d]) } @@ -442,21 +428,24 @@ #[inline] fn all_data(&self) -> Self::AllDataIter<'_> { - (0..).zip(self.weights.as_slice().iter()).mapX(self, Self::construct_sensor_and_id) + (0..) + .zip(self.weights.as_slice().iter()) + .mapX(self, Self::construct_sensor_and_id) } } -impl<F, S, P, BT, const N : usize> ForwardModel<DiscreteMeasure<Loc<F, N>, F>, F> -for SensorGrid<F, S, P, BT, N> -where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N>, +impl<F, S, P, BT, const N: usize> ForwardModel<DiscreteMeasure<Loc<F, N>, F>, F> + for SensorGrid<F, S, P, BT, N> +where + F: Float + ToNalgebraRealField<MixedType = F> + nalgebra::RealField, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>, { type Observable = DVector<F>; - fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError { + fn write_observable(&self, b: &Self::Observable, prefix: String) -> DynError { let it = self.grid().into_iter().zip(b.iter()).map(|(x, &v)| (x, v)); write_csv(it, prefix + ".txt") } @@ -467,19 +456,18 @@ } } -impl<F, S, P, BT, const N : usize> FindimQuadraticModel<Loc<F, N>, F> -for SensorGrid<F, S, P, BT, N> -where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N> +impl<F, S, P, BT, const N: usize> FindimQuadraticModel<Loc<F, N>, F> for SensorGrid<F, S, P, BT, N> +where + F: Float + ToNalgebraRealField<MixedType = F> + nalgebra::RealField, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>, { - fn findim_quadratic_model( &self, - μ : &DiscreteMeasure<Loc<F, N>, F>, - b : &Self::Observable + μ: &DiscreteMeasure<Loc<F, N>, F>, + b: &Self::Observable, ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>) { assert_eq!(b.len(), self.n_sensors()); let mut mA = DMatrix::zeros(self.n_sensors(), μ.len()); @@ -500,25 +488,24 @@ /// /// **This assumes (but does not check) that the sensors are not overlapping.** #[replace_float_literals(F::cast_from(literal))] -impl<F, BT, S, P, K, const N : usize> -AdjointProductBoundedBy<RNDM<F, N>, ConvolutionOp<F, K, BT, N>> -for SensorGrid<F, S, P, BT, N> -where F : Float + nalgebra::RealField + ToNalgebraRealField, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N>, - K : SimpleConvolutionKernel<F, N>, - AutoConvolution<P> : BoundedBy<F, K> +impl<F, BT, S, P, K, const N: usize> AdjointProductBoundedBy<RNDM<F, N>, ConvolutionOp<F, K, BT, N>> + for SensorGrid<F, S, P, BT, N> +where + F: Float + nalgebra::RealField + ToNalgebraRealField, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, + K: SimpleConvolutionKernel<F, N>, + AutoConvolution<P>: BoundedBy<F, K>, { - type FloatType = F; - fn adjoint_product_bound(&self, seminorm : &ConvolutionOp<F, K, BT, N>) -> Option<F> { + fn adjoint_product_bound(&self, seminorm: &ConvolutionOp<F, K, BT, N>) -> Option<F> { // Sensors should not take on negative values to allow // A_*A to be upper bounded by a simple convolution of `spread`. if self.sensor.bounds().lower() < 0.0 { - return None + return None; } // Calculate the factor $L_1$ for betwee $ℱ[ψ * ψ] ≤ L_1 ℱ[ρ]$ for $ψ$ the base spread @@ -536,49 +523,51 @@ macro_rules! make_sensorgridsupportgenerator_scalarop_rhs { ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { - impl<F, S, P, const N : usize> - std::ops::$trait_assign<F> - for SensorGridSupportGenerator<F, S, P, N> - where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> { - fn $fn_assign(&mut self, t : F) { + impl<F, S, P, const N: usize> std::ops::$trait_assign<F> + for SensorGridSupportGenerator<F, S, P, N> + where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, + { + fn $fn_assign(&mut self, t: F) { self.weights.$fn_assign(t); } } - impl<F, S, P, const N : usize> - std::ops::$trait<F> - for SensorGridSupportGenerator<F, S, P, N> - where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> { + impl<F, S, P, const N: usize> std::ops::$trait<F> for SensorGridSupportGenerator<F, S, P, N> + where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, + { type Output = SensorGridSupportGenerator<F, S, P, N>; - fn $fn(mut self, t : F) -> Self::Output { + fn $fn(mut self, t: F) -> Self::Output { std::ops::$trait_assign::$fn_assign(&mut self.weights, t); self } } - impl<'a, F, S, P, const N : usize> - std::ops::$trait<F> - for &'a SensorGridSupportGenerator<F, S, P, N> - where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> { + impl<'a, F, S, P, const N: usize> std::ops::$trait<F> + for &'a SensorGridSupportGenerator<F, S, P, N> + where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, + { type Output = SensorGridSupportGenerator<F, S, P, N>; - fn $fn(self, t : F) -> Self::Output { - SensorGridSupportGenerator{ - base_sensor : self.base_sensor.clone(), - grid : self.grid, - weights : (&self.weights).$fn(t) + fn $fn(self, t: F) -> Self::Output { + SensorGridSupportGenerator { + base_sensor: self.base_sensor.clone(), + grid: self.grid, + weights: (&self.weights).$fn(t), } } } - } + }; } make_sensorgridsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); @@ -586,13 +575,13 @@ macro_rules! make_sensorgridsupportgenerator_unaryop { ($trait:ident, $fn:ident) => { - impl<F, S, P, const N : usize> - std::ops::$trait - for SensorGridSupportGenerator<F, S, P, N> - where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> { + impl<F, S, P, const N: usize> std::ops::$trait for SensorGridSupportGenerator<F, S, P, N> + where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, + { type Output = SensorGridSupportGenerator<F, S, P, N>; fn $fn(mut self) -> Self::Output { self.weights = self.weights.$fn(); @@ -600,55 +589,57 @@ } } - impl<'a, F, S, P, const N : usize> - std::ops::$trait - for &'a SensorGridSupportGenerator<F, S, P, N> - where F : Float, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> { + impl<'a, F, S, P, const N: usize> std::ops::$trait + for &'a SensorGridSupportGenerator<F, S, P, N> + where + F: Float, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N>, + { type Output = SensorGridSupportGenerator<F, S, P, N>; fn $fn(self) -> Self::Output { - SensorGridSupportGenerator{ - base_sensor : self.base_sensor.clone(), - grid : self.grid, - weights : (&self.weights).$fn() + SensorGridSupportGenerator { + base_sensor: self.base_sensor.clone(), + grid: self.grid, + weights: (&self.weights).$fn(), } } } - } + }; } make_sensorgridsupportgenerator_unaryop!(Neg, neg); -impl<'a, F, S, P, BT, const N : usize> Mapping<DVector<F>> -for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>> -where F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>, +impl<'a, F, S, P, BT, const N: usize> Mapping<DVector<F>> + for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F, N>> +where + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>, { - type Codomain = SensorGridBTFN<F, S, P, BT, N>; - fn apply<I : Instance<DVector<F>>>(&self, x : I) -> Self::Codomain { + fn apply<I: Instance<DVector<F>>>(&self, x: I) -> Self::Codomain { let fwd = &self.forward_op; - let generator = SensorGridSupportGenerator{ - base_sensor : fwd.base_sensor.clone(), - grid : fwd.grid(), - weights : x.own() + let generator = SensorGridSupportGenerator { + base_sensor: fwd.base_sensor.clone(), + grid: fwd.grid(), + weights: x.own(), }; BTFN::new_refresh(&fwd.bt, generator) } } -impl<'a, F, S, P, BT, const N : usize> Linear<DVector<F>> -for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>> -where F : Float, - BT : SensorGridBT<F, S, P, N>, - S : Sensor<F, N>, - P : Spread<F, N>, - Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>, -{ } - +impl<'a, F, S, P, BT, const N: usize> Linear<DVector<F>> + for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F, N>> +where + F: Float, + BT: SensorGridBT<F, S, P, N>, + S: Sensor<F, N>, + P: Spread<F, N>, + Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>, +{ +}
--- a/src/sliding_fb.rs Fri Feb 14 23:16:14 2025 -0500 +++ b/src/sliding_fb.rs Fri Feb 14 23:46:43 2025 -0500 @@ -4,56 +4,43 @@ */ use numeric_literals::replace_float_literals; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; //use colored::Colorize; //use nalgebra::{DVector, DMatrix}; use itertools::izip; use std::iter::Iterator; +use alg_tools::euclidean::Euclidean; use alg_tools::iterate::AlgIteratorFactory; -use alg_tools::euclidean::Euclidean; -use alg_tools::mapping::{Mapping, DifferentiableRealMapping, Instance}; +use alg_tools::mapping::{DifferentiableRealMapping, Instance, Mapping}; +use alg_tools::nalgebra_support::ToNalgebraRealField; use alg_tools::norms::Norm; -use alg_tools::nalgebra_support::ToNalgebraRealField; -use crate::types::*; -use crate::measures::{DiscreteMeasure, Radon, RNDM}; +use crate::forward_model::{AdjointProductBoundedBy, BoundedCurvature, ForwardModel}; use crate::measures::merging::SpikeMerging; -use crate::forward_model::{ - ForwardModel, - AdjointProductBoundedBy, - BoundedCurvature, -}; +use crate::measures::{DiscreteMeasure, Radon, RNDM}; +use crate::types::*; //use crate::tolerance::Tolerance; -use crate::plot::{ - SeqPlotter, - Plotting, - PlotLookup -}; +use crate::dataterm::{calculate_residual, calculate_residual2, DataTerm, L2Squared}; use crate::fb::*; +use crate::plot::{PlotLookup, Plotting, SeqPlotter}; use crate::regularisation::SlidingRegTerm; -use crate::dataterm::{ - L2Squared, - DataTerm, - calculate_residual, - calculate_residual2, -}; //use crate::transport::TransportLipschitz; /// Transport settings for [`pointsource_sliding_fb_reg`]. #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] #[serde(default)] -pub struct TransportConfig<F : Float> { +pub struct TransportConfig<F: Float> { /// Transport step length $θ$ normalised to $(0, 1)$. - pub θ0 : F, + pub θ0: F, /// Factor in $(0, 1)$ for decreasing transport to adapt to tolerance. - pub adaptation : F, + pub adaptation: F, /// A posteriori transport tolerance multiplier (C_pos) - pub tolerance_mult_con : F, + pub tolerance_mult_con: F, } #[replace_float_literals(F::cast_from(literal))] -impl <F : Float> TransportConfig<F> { +impl<F: Float> TransportConfig<F> { /// Check that the parameters are ok. Panics if not. pub fn check(&self) { assert!(self.θ0 > 0.0); @@ -63,12 +50,12 @@ } #[replace_float_literals(F::cast_from(literal))] -impl<F : Float> Default for TransportConfig<F> { +impl<F: Float> Default for TransportConfig<F> { fn default() -> Self { TransportConfig { - θ0 : 0.9, - adaptation : 0.9, - tolerance_mult_con : 100.0, + θ0: 0.9, + adaptation: 0.9, + tolerance_mult_con: 100.0, } } } @@ -76,55 +63,54 @@ /// Settings for [`pointsource_sliding_fb_reg`]. #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] #[serde(default)] -pub struct SlidingFBConfig<F : Float> { +pub struct SlidingFBConfig<F: Float> { /// Step length scaling - pub τ0 : F, + pub τ0: F, /// Transport parameters - pub transport : TransportConfig<F>, + pub transport: TransportConfig<F>, /// Generic parameters - pub insertion : FBGenericConfig<F>, + pub insertion: FBGenericConfig<F>, } #[replace_float_literals(F::cast_from(literal))] -impl<F : Float> Default for SlidingFBConfig<F> { +impl<F: Float> Default for SlidingFBConfig<F> { fn default() -> Self { SlidingFBConfig { - τ0 : 0.99, - transport : Default::default(), - insertion : Default::default() + τ0: 0.99, + transport: Default::default(), + insertion: Default::default(), } } } /// Internal type of adaptive transport step length calculation -pub(crate) enum TransportStepLength<F : Float, G : Fn(F, F) -> F> { +pub(crate) enum TransportStepLength<F: Float, G: Fn(F, F) -> F> { /// Fixed, known step length #[allow(dead_code)] Fixed(F), /// Adaptive step length, only wrt. maximum transport. /// Content of `l` depends on use case, while `g` calculates the step length from `l`. - AdaptiveMax{ l : F, max_transport : F, g : G }, + AdaptiveMax { l: F, max_transport: F, g: G }, /// Adaptive step length. /// Content of `l` depends on use case, while `g` calculates the step length from `l`. - FullyAdaptive{ l : F, max_transport : F, g : G }, + FullyAdaptive { l: F, max_transport: F, g: G }, } /// Constrution of initial transport `γ1` from initial measure `μ` and `v=F'(μ)` /// with step lengh τ and transport step length `θ_or_adaptive`. #[replace_float_literals(F::cast_from(literal))] -pub(crate) fn initial_transport<F, G, D, const N : usize>( - γ1 : &mut RNDM<F, N>, - μ : &mut RNDM<F, N>, - τ : F, - θ_or_adaptive : &mut TransportStepLength<F, G>, - v : D, +pub(crate) fn initial_transport<F, G, D, const N: usize>( + γ1: &mut RNDM<F, N>, + μ: &mut RNDM<F, N>, + τ: F, + θ_or_adaptive: &mut TransportStepLength<F, G>, + v: D, ) -> (Vec<F>, RNDM<F, N>) where - F : Float + ToNalgebraRealField, - G : Fn(F, F) -> F, - D : DifferentiableRealMapping<F, N>, + F: Float + ToNalgebraRealField, + G: Fn(F, F) -> F, + D: DifferentiableRealMapping<F, N>, { - use TransportStepLength::*; // Save current base point and shift μ to new positions. Idea is that @@ -132,10 +118,10 @@ // μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1} // γ1 = π_♯^1γ^{k+1} // μ = μ^{k+1} - let μ_base_masses : Vec<F> = μ.iter_masses().collect(); + let μ_base_masses: Vec<F> = μ.iter_masses().collect(); let mut μ_base_minus_γ0 = μ.clone(); // Weights will be set in the loop below. - // Construct μ^{k+1} and π_♯^1γ^{k+1} initial candidates - //let mut sum_norm_dv = 0.0; + // Construct μ^{k+1} and π_♯^1γ^{k+1} initial candidates + //let mut sum_norm_dv = 0.0; let γ_prev_len = γ1.len(); assert!(μ.len() >= γ_prev_len); γ1.extend(μ[γ_prev_len..].iter().cloned()); @@ -149,7 +135,7 @@ } else { δ.α }; - }; + } // Calculate transport rays. match *θ_or_adaptive { @@ -158,15 +144,23 @@ for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) { ρ.x = δ.x - v.differential(&δ.x) * (ρ.α.signum() * θτ); } - }, - AdaptiveMax{ l : ℓ_F, ref mut max_transport, g : ref calculate_θ } => { + } + AdaptiveMax { + l: ℓ_F, + ref mut max_transport, + g: ref calculate_θ, + } => { *max_transport = max_transport.max(γ1.norm(Radon)); let θτ = τ * calculate_θ(ℓ_F, *max_transport); for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) { ρ.x = δ.x - v.differential(&δ.x) * (ρ.α.signum() * θτ); } - }, - FullyAdaptive{ l : ref mut adaptive_ℓ_F, ref mut max_transport, g : ref calculate_θ } => { + } + FullyAdaptive { + l: ref mut adaptive_ℓ_F, + ref mut max_transport, + g: ref calculate_θ, + } => { *max_transport = max_transport.max(γ1.norm(Radon)); let mut θ = calculate_θ(*adaptive_ℓ_F, *max_transport); // Do two runs through the spikes to update θ, breaking if first run did not cause @@ -187,7 +181,7 @@ } } if !changes { - break + break; } } } @@ -203,24 +197,29 @@ } } // Calculate μ^k-π_♯^0γ^{k+1} and v̆ = A_*(A[μ_transported + μ_transported_base]-b) - μ_base_minus_γ0.set_masses(μ_base_masses.iter().zip(γ1.iter_masses()) - .map(|(&a,b)| a - b)); + μ_base_minus_γ0.set_masses( + μ_base_masses + .iter() + .zip(γ1.iter_masses()) + .map(|(&a, b)| a - b), + ); (μ_base_masses, μ_base_minus_γ0) } /// A posteriori transport adaptation. #[replace_float_literals(F::cast_from(literal))] -pub(crate) fn aposteriori_transport<F, const N : usize>( - γ1 : &mut RNDM<F, N>, - μ : &mut RNDM<F, N>, - μ_base_minus_γ0 : &mut RNDM<F, N>, - μ_base_masses : &Vec<F>, - extra : Option<F>, - ε : F, - tconfig : &TransportConfig<F> +pub(crate) fn aposteriori_transport<F, const N: usize>( + γ1: &mut RNDM<F, N>, + μ: &mut RNDM<F, N>, + μ_base_minus_γ0: &mut RNDM<F, N>, + μ_base_masses: &Vec<F>, + extra: Option<F>, + ε: F, + tconfig: &TransportConfig<F>, ) -> bool -where F : Float + ToNalgebraRealField { - +where + F: Float + ToNalgebraRealField, +{ // 1. If π_♯^1γ^{k+1} = γ1 has non-zero mass at some point y, but μ = μ^{k+1} does not, // then the ansatz ∇w̃_x(y) = w^{k+1}(y) may not be satisfied. So set the mass of γ1 // at that point to zero, and retry. @@ -238,19 +237,22 @@ let nγ = γ1.norm(Radon); let nΔ = μ_base_minus_γ0.norm(Radon) + μ.dist_matching(&γ1) + extra.unwrap_or(0.0); let t = ε * tconfig.tolerance_mult_con; - if nγ*nΔ > t { + if nγ * nΔ > t { // Since t/(nγ*nΔ)<1, and the constant tconfig.adaptation < 1, // this will guarantee that eventually ‖γ‖ decreases sufficiently that we // will not enter here. - *γ1 *= tconfig.adaptation * t / ( nγ * nΔ ); + *γ1 *= tconfig.adaptation * t / (nγ * nΔ); all_ok = false } if !all_ok { // Update weights for μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1} - μ_base_minus_γ0.set_masses(μ_base_masses.iter().zip(γ1.iter_masses()) - .map(|(&a,b)| a - b)); - + μ_base_minus_γ0.set_masses( + μ_base_masses + .iter() + .zip(γ1.iter_masses()) + .map(|(&a, b)| a - b), + ); } all_ok @@ -262,29 +264,28 @@ /// The parametrisation is as for [`pointsource_fb_reg`]. /// Inertia is currently not supported. #[replace_float_literals(F::cast_from(literal))] -pub fn pointsource_sliding_fb_reg<F, I, A, Reg, P, const N : usize>( - opA : &A, - b : &A::Observable, - reg : Reg, - prox_penalty : &P, - config : &SlidingFBConfig<F>, - iterator : I, - mut plotter : SeqPlotter<F, N>, +pub fn pointsource_sliding_fb_reg<F, I, A, Reg, P, const N: usize>( + opA: &A, + b: &A::Observable, + reg: Reg, + prox_penalty: &P, + config: &SlidingFBConfig<F>, + iterator: I, + mut plotter: SeqPlotter<F, N>, ) -> RNDM<F, N> where - F : Float + ToNalgebraRealField, - I : AlgIteratorFactory<IterInfo<F, N>>, - A : ForwardModel<RNDM<F, N>, F> - + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType=F> - + BoundedCurvature<FloatType=F>, - for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>, - A::PreadjointCodomain : DifferentiableRealMapping<F, N>, - RNDM<F, N> : SpikeMerging<F>, - Reg : SlidingRegTerm<F, N>, - P : ProxPenalty<F, A::PreadjointCodomain, Reg, N>, - PlotLookup : Plotting<N>, + F: Float + ToNalgebraRealField, + I: AlgIteratorFactory<IterInfo<F, N>>, + A: ForwardModel<RNDM<F, N>, F> + + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType = F> + + BoundedCurvature<FloatType = F>, + for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable> + Instance<A::Observable>, + A::PreadjointCodomain: DifferentiableRealMapping<F, N>, + RNDM<F, N>: SpikeMerging<F>, + Reg: SlidingRegTerm<F, N>, + P: ProxPenalty<F, A::PreadjointCodomain, Reg, N>, + PlotLookup: Plotting<N>, { - // Check parameters assert!(config.τ0 > 0.0, "Invalid step length parameter"); config.transport.check(); @@ -301,23 +302,23 @@ //let ℓ = opA.transport.lipschitz_factor(L2Squared) * max_transport; let ℓ = 0.0; let τ = config.τ0 / opA.adjoint_product_bound(prox_penalty).unwrap(); - let (maybe_ℓ_v0, maybe_transport_lip) = opA.curvature_bound_components(); + let (maybe_ℓ_F0, maybe_transport_lip) = opA.curvature_bound_components(); let transport_lip = maybe_transport_lip.unwrap(); - let calculate_θ = |ℓ_v, max_transport| { - let ℓ_F = ℓ_v + transport_lip * max_transport; - config.transport.θ0 / (τ*(ℓ + ℓ_F)) + let calculate_θ = |ℓ_F, max_transport| { + let ℓ_r = transport_lip * max_transport; + config.transport.θ0 / (τ * (ℓ + ℓ_F + ℓ_r)) }; - let mut θ_or_adaptive = match maybe_ℓ_v0 { - //Some(ℓ_v0) => TransportStepLength::Fixed(calculate_θ(ℓ_v0 * b.norm2(), 0.0)), - Some(ℓ_v0) => TransportStepLength::AdaptiveMax { - l: ℓ_v0 * b.norm2(), // TODO: could estimate computing the real reesidual - max_transport : 0.0, - g : calculate_θ + let mut θ_or_adaptive = match maybe_ℓ_F0 { + //Some(ℓ_F0) => TransportStepLength::Fixed(calculate_θ(ℓ_F0 * b.norm2(), 0.0)), + Some(ℓ_F0) => TransportStepLength::AdaptiveMax { + l: ℓ_F0 * b.norm2(), // TODO: could estimate computing the real reesidual + max_transport: 0.0, + g: calculate_θ, }, None => TransportStepLength::FullyAdaptive { - l : 10.0 * F::EPSILON, // Start with something very small to estimate differentials - max_transport : 0.0, - g : calculate_θ + l: 10.0 * F::EPSILON, // Start with something very small to estimate differentials + max_transport: 0.0, + g: calculate_θ, }, }; // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled @@ -326,14 +327,12 @@ let mut ε = tolerance.initial(); // Statistics - let full_stats = |residual : &A::Observable, - μ : &RNDM<F, N>, - ε, stats| IterInfo { - value : residual.norm2_squared_div2() + reg.apply(μ), - n_spikes : μ.len(), + let full_stats = |residual: &A::Observable, μ: &RNDM<F, N>, ε, stats| IterInfo { + value: residual.norm2_squared_div2() + reg.apply(μ), + n_spikes: μ.len(), ε, // postprocessing: config.insertion.postprocessing.then(|| μ.clone()), - .. stats + ..stats }; let mut stats = IterInfo::new(); @@ -341,9 +340,8 @@ for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) { // Calculate initial transport let v = opA.preadjoint().apply(residual); - let (μ_base_masses, mut μ_base_minus_γ0) = initial_transport( - &mut γ1, &mut μ, τ, &mut θ_or_adaptive, v - ); + let (μ_base_masses, mut μ_base_minus_γ0) = + initial_transport(&mut γ1, &mut μ, τ, &mut θ_or_adaptive, v); // Solve finite-dimensional subproblem several times until the dual variable for the // regularisation term conforms to the assumptions made for the transport above. @@ -354,18 +352,29 @@ // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes. let (maybe_d, within_tolerances) = prox_penalty.insert_and_reweigh( - &mut μ, &mut τv̆, &γ1, Some(&μ_base_minus_γ0), - τ, ε, &config.insertion, - ®, &state, &mut stats, + &mut μ, + &mut τv̆, + &γ1, + Some(&μ_base_minus_γ0), + τ, + ε, + &config.insertion, + ®, + &state, + &mut stats, ); // A posteriori transport adaptation. if aposteriori_transport( - &mut γ1, &mut μ, &mut μ_base_minus_γ0, &μ_base_masses, + &mut γ1, + &mut μ, + &mut μ_base_minus_γ0, + &μ_base_masses, None, - ε, &config.transport + ε, + &config.transport, ) { - break 'adapt_transport (maybe_d, within_tolerances, τv̆) + break 'adapt_transport (maybe_d, within_tolerances, τv̆); } }; @@ -387,8 +396,15 @@ let ins = &config.insertion; if ins.merge_now(&state) { stats.merged += prox_penalty.merge_spikes( - &mut μ, &mut τv̆, &γ1, Some(&μ_base_minus_γ0), τ, ε, ins, ®, - Some(|μ̃ : &RNDM<F, N>| L2Squared.calculate_fit_op(μ̃, opA, b)), + &mut μ, + &mut τv̆, + &γ1, + Some(&μ_base_minus_γ0), + τ, + ε, + ins, + ®, + Some(|μ̃: &RNDM<F, N>| L2Squared.calculate_fit_op(μ̃, opA, b)), ); } @@ -412,7 +428,12 @@ // Give statistics if requested state.if_verbose(|| { plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv̆), &μ); - full_stats(&residual, &μ, ε, std::mem::replace(&mut stats, IterInfo::new())) + full_stats( + &residual, + &μ, + ε, + std::mem::replace(&mut stats, IterInfo::new()), + ) }); // Update main tolerance for next iteration
--- a/src/sliding_pdps.rs Fri Feb 14 23:16:14 2025 -0500 +++ b/src/sliding_pdps.rs Fri Feb 14 23:46:43 2025 -0500 @@ -4,83 +4,69 @@ */ use numeric_literals::replace_float_literals; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; //use colored::Colorize; //use nalgebra::{DVector, DMatrix}; use std::iter::Iterator; -use alg_tools::iterate::AlgIteratorFactory; +use alg_tools::convex::{Conjugable, Prox}; +use alg_tools::direct_product::Pair; use alg_tools::euclidean::Euclidean; -use alg_tools::mapping::{Mapping, DifferentiableRealMapping, Instance}; -use alg_tools::norms::{Norm, Dist}; -use alg_tools::direct_product::Pair; +use alg_tools::iterate::AlgIteratorFactory; +use alg_tools::linops::{Adjointable, BoundedLinear, IdOp, AXPY, GEMV}; +use alg_tools::mapping::{DifferentiableRealMapping, Instance, Mapping}; use alg_tools::nalgebra_support::ToNalgebraRealField; -use alg_tools::linops::{ - BoundedLinear, AXPY, GEMV, Adjointable, IdOp, -}; -use alg_tools::convex::{Conjugable, Prox}; -use alg_tools::norms::{L2, PairNorm}; +use alg_tools::norms::{Dist, Norm}; +use alg_tools::norms::{PairNorm, L2}; +use crate::forward_model::{AdjointProductPairBoundedBy, BoundedCurvature, ForwardModel}; +use crate::measures::merging::SpikeMerging; +use crate::measures::{DiscreteMeasure, Radon, RNDM}; use crate::types::*; -use crate::measures::{DiscreteMeasure, Radon, RNDM}; -use crate::measures::merging::SpikeMerging; -use crate::forward_model::{ - ForwardModel, - AdjointProductPairBoundedBy, - BoundedCurvature, -}; // use crate::transport::TransportLipschitz; //use crate::tolerance::Tolerance; -use crate::plot::{ - SeqPlotter, - Plotting, - PlotLookup -}; use crate::fb::*; +use crate::plot::{PlotLookup, Plotting, SeqPlotter}; use crate::regularisation::SlidingRegTerm; // use crate::dataterm::L2Squared; +use crate::dataterm::{calculate_residual, calculate_residual2}; use crate::sliding_fb::{ - TransportConfig, - TransportStepLength, - initial_transport, - aposteriori_transport, + aposteriori_transport, initial_transport, TransportConfig, TransportStepLength, }; -use crate::dataterm::{ - calculate_residual2, - calculate_residual, -}; - /// Settings for [`pointsource_sliding_pdps_pair`]. #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] #[serde(default)] -pub struct SlidingPDPSConfig<F : Float> { +pub struct SlidingPDPSConfig<F: Float> { /// Primal step length scaling. - pub τ0 : F, + pub τ0: F, /// Primal step length scaling. - pub σp0 : F, + pub σp0: F, /// Dual step length scaling. - pub σd0 : F, + pub σd0: F, /// Transport parameters - pub transport : TransportConfig<F>, + pub transport: TransportConfig<F>, /// Generic parameters - pub insertion : FBGenericConfig<F>, + pub insertion: FBGenericConfig<F>, } #[replace_float_literals(F::cast_from(literal))] -impl<F : Float> Default for SlidingPDPSConfig<F> { +impl<F: Float> Default for SlidingPDPSConfig<F> { fn default() -> Self { SlidingPDPSConfig { - τ0 : 0.99, - σd0 : 0.05, - σp0 : 0.99, - transport : TransportConfig { θ0 : 0.9, ..Default::default()}, - insertion : Default::default() + τ0: 0.99, + σd0: 0.05, + σp0: 0.99, + transport: TransportConfig { + θ0: 0.9, + ..Default::default() + }, + insertion: Default::default(), } } } -type MeasureZ<F, Z, const N : usize> = Pair<RNDM<F, N>, Z>; +type MeasureZ<F, Z, const N: usize> = Pair<RNDM<F, N>, Z>; /// Iteratively solve the pointsource localisation with an additional variable /// using sliding primal-dual proximal splitting @@ -88,39 +74,45 @@ /// The parametrisation is as for [`crate::forward_pdps::pointsource_forward_pdps_pair`]. #[replace_float_literals(F::cast_from(literal))] pub fn pointsource_sliding_pdps_pair< - F, I, A, S, Reg, P, Z, R, Y, /*KOpM, */ KOpZ, H, const N : usize + F, + I, + A, + S, + Reg, + P, + Z, + R, + Y, + /*KOpM, */ KOpZ, + H, + const N: usize, >( - opA : &A, - b : &A::Observable, - reg : Reg, - prox_penalty : &P, - config : &SlidingPDPSConfig<F>, - iterator : I, - mut plotter : SeqPlotter<F, N>, + opA: &A, + b: &A::Observable, + reg: Reg, + prox_penalty: &P, + config: &SlidingPDPSConfig<F>, + iterator: I, + mut plotter: SeqPlotter<F, N>, //opKμ : KOpM, - opKz : &KOpZ, - fnR : &R, - fnH : &H, - mut z : Z, - mut y : Y, + opKz: &KOpZ, + fnR: &R, + fnH: &H, + mut z: Z, + mut y: Y, ) -> MeasureZ<F, Z, N> where - F : Float + ToNalgebraRealField, - I : AlgIteratorFactory<IterInfo<F, N>>, - A : ForwardModel< - MeasureZ<F, Z, N>, - F, - PairNorm<Radon, L2, L2>, - PreadjointCodomain = Pair<S, Z>, - > - + AdjointProductPairBoundedBy<MeasureZ<F, Z, N>, P, IdOp<Z>, FloatType=F> - + BoundedCurvature<FloatType=F>, - S : DifferentiableRealMapping<F, N>, - for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>, - PlotLookup : Plotting<N>, - RNDM<F, N> : SpikeMerging<F>, - Reg : SlidingRegTerm<F, N>, - P : ProxPenalty<F, S, Reg, N>, + F: Float + ToNalgebraRealField, + I: AlgIteratorFactory<IterInfo<F, N>>, + A: ForwardModel<MeasureZ<F, Z, N>, F, PairNorm<Radon, L2, L2>, PreadjointCodomain = Pair<S, Z>> + + AdjointProductPairBoundedBy<MeasureZ<F, Z, N>, P, IdOp<Z>, FloatType = F> + + BoundedCurvature<FloatType = F>, + S: DifferentiableRealMapping<F, N>, + for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable> + Instance<A::Observable>, + PlotLookup: Plotting<N>, + RNDM<F, N>: SpikeMerging<F>, + Reg: SlidingRegTerm<F, N>, + P: ProxPenalty<F, S, Reg, N>, // KOpM : Linear<RNDM<F, N>, Codomain=Y> // + GEMV<F, RNDM<F, N>> // + Preadjointable< @@ -131,27 +123,28 @@ // + AdjointProductBoundedBy<RNDM<F, N>, 𝒟, FloatType=F>, // for<'b> KOpM::Preadjoint<'b> : GEMV<F, Y>, // Since Z is Hilbert, we may just as well use adjoints for K_z. - KOpZ : BoundedLinear<Z, L2, L2, F, Codomain=Y> + KOpZ: BoundedLinear<Z, L2, L2, F, Codomain = Y> + GEMV<F, Z> + Adjointable<Z, Y, AdjointCodomain = Z>, - for<'b> KOpZ::Adjoint<'b> : GEMV<F, Y>, - Y : AXPY<F> + Euclidean<F, Output=Y> + Clone + ClosedAdd, - for<'b> &'b Y : Instance<Y>, - Z : AXPY<F, Owned=Z> + Euclidean<F, Output=Z> + Clone + Norm<F, L2> + Dist<F, L2>, - for<'b> &'b Z : Instance<Z>, - R : Prox<Z, Codomain=F>, - H : Conjugable<Y, F, Codomain=F>, - for<'b> H::Conjugate<'b> : Prox<Y>, + for<'b> KOpZ::Adjoint<'b>: GEMV<F, Y>, + Y: AXPY<F> + Euclidean<F, Output = Y> + Clone + ClosedAdd, + for<'b> &'b Y: Instance<Y>, + Z: AXPY<F, Owned = Z> + Euclidean<F, Output = Z> + Clone + Norm<F, L2> + Dist<F, L2>, + for<'b> &'b Z: Instance<Z>, + R: Prox<Z, Codomain = F>, + H: Conjugable<Y, F, Codomain = F>, + for<'b> H::Conjugate<'b>: Prox<Y>, { - // Check parameters - assert!(config.τ0 > 0.0 && - config.τ0 < 1.0 && - config.σp0 > 0.0 && - config.σp0 < 1.0 && - config.σd0 > 0.0 && - config.σp0 * config.σd0 <= 1.0, - "Invalid step length parameters"); + assert!( + config.τ0 > 0.0 + && config.τ0 < 1.0 + && config.σp0 > 0.0 + && config.σp0 < 1.0 + && config.σd0 > 0.0 + && config.σp0 * config.σd0 <= 1.0, + "Invalid step length parameters" + ); config.transport.check(); // Initialise iterates @@ -168,7 +161,9 @@ let nKz = opKz.opnorm_bound(L2, L2); let ℓ = 0.0; let opIdZ = IdOp::new(); - let (l, l_z) = opA.adjoint_product_pair_bound(prox_penalty, &opIdZ).unwrap(); + let (l, l_z) = opA + .adjoint_product_pair_bound(prox_penalty, &opIdZ) + .unwrap(); // We need to satisfy // // τσ_dM(1-σ_p L_z)/(1 - τ L) + [σ_p L_z + σ_pσ_d‖K_z‖^2] < 1 @@ -184,7 +179,7 @@ // ⟺ τ [ σ_d M (1-σ_p L_z) + (1-σ_{p,0}) L ] < (1-σ_{p,0}) let φ = 1.0 - config.σp0; let a = 1.0 - σ_p * l_z; - let τ = config.τ0 * φ / ( σ_d * bigM * a + φ * l ); + let τ = config.τ0 * φ / (σ_d * bigM * a + φ * l); let ψ = 1.0 - τ * l; let β = σ_p * config.σd0 * nKz / a; // σ_p * σ_d * (nKz * nK_z) / a; assert!(β < 1.0); @@ -192,23 +187,23 @@ let κ = τ * σ_d * ψ / ((1.0 - β) * ψ - τ * σ_d * bigM); // The factor two in the manuscript disappears due to the definition of 𝚹 being // for ‖x-y‖₂² instead of c_2(x, y)=‖x-y‖₂²/2. - let (maybe_ℓ_v0, maybe_transport_lip) = opA.curvature_bound_components(); + let (maybe_ℓ_F0, maybe_transport_lip) = opA.curvature_bound_components(); let transport_lip = maybe_transport_lip.unwrap(); - let calculate_θ = |ℓ_v, max_transport| { - let ℓ_F = ℓ_v + transport_lip * max_transport; - config.transport.θ0 / (τ*(ℓ + ℓ_F) + κ * bigθ * max_transport) + let calculate_θ = |ℓ_F, max_transport| { + let ℓ_r = transport_lip * max_transport; + config.transport.θ0 / (τ * (ℓ + ℓ_F + ℓ_r) + κ * bigθ * max_transport) }; - let mut θ_or_adaptive = match maybe_ℓ_v0 { + let mut θ_or_adaptive = match maybe_ℓ_F0 { // We assume that the residual is decreasing. - Some(ℓ_v0) => TransportStepLength::AdaptiveMax { - l: ℓ_v0 * b.norm2(), // TODO: could estimate computing the real reesidual - max_transport : 0.0, - g : calculate_θ + Some(ℓ_F0) => TransportStepLength::AdaptiveMax { + l: ℓ_F0 * b.norm2(), // TODO: could estimate computing the real reesidual + max_transport: 0.0, + g: calculate_θ, }, None => TransportStepLength::FullyAdaptive { - l : F::EPSILON, - max_transport : 0.0, - g : calculate_θ + l: F::EPSILON, + max_transport: 0.0, + g: calculate_θ, }, }; // Acceleration is not currently supported @@ -223,13 +218,15 @@ let starH = fnH.conjugate(); // Statistics - let full_stats = |residual : &A::Observable, μ : &RNDM<F, N>, z : &Z, ε, stats| IterInfo { - value : residual.norm2_squared_div2() + fnR.apply(z) - + reg.apply(μ) + fnH.apply(/* opKμ.apply(μ) + */ opKz.apply(z)), - n_spikes : μ.len(), + let full_stats = |residual: &A::Observable, μ: &RNDM<F, N>, z: &Z, ε, stats| IterInfo { + value: residual.norm2_squared_div2() + + fnR.apply(z) + + reg.apply(μ) + + fnH.apply(/* opKμ.apply(μ) + */ opKz.apply(z)), + n_spikes: μ.len(), ε, // postprocessing: config.insertion.postprocessing.then(|| μ.clone()), - .. stats + ..stats }; let mut stats = IterInfo::new(); @@ -244,40 +241,49 @@ // where A_ν^* becomes a multiplier. // This is much easier with K_μ = 0, which is the only reason why are enforcing it. // TODO: Write a version of initial_transport that can deal with K_μ ≠ 0. - - let (μ_base_masses, mut μ_base_minus_γ0) = initial_transport( - &mut γ1, &mut μ, τ, &mut θ_or_adaptive, v, - ); + + let (μ_base_masses, mut μ_base_minus_γ0) = + initial_transport(&mut γ1, &mut μ, τ, &mut θ_or_adaptive, v); // Solve finite-dimensional subproblem several times until the dual variable for the // regularisation term conforms to the assumptions made for the transport above. let (maybe_d, _within_tolerances, mut τv̆, z_new) = 'adapt_transport: loop { // Calculate τv̆ = τA_*(A[μ_transported + μ_transported_base]-b) - let residual_μ̆ = calculate_residual2(Pair(&γ1, &z), - Pair(&μ_base_minus_γ0, &zero_z), - opA, b); + let residual_μ̆ = + calculate_residual2(Pair(&γ1, &z), Pair(&μ_base_minus_γ0, &zero_z), opA, b); let Pair(mut τv̆, τz̆) = opA.preadjoint().apply(residual_μ̆ * τ); // opKμ.preadjoint().gemv(&mut τv̆, τ, y, 1.0); // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes. let (maybe_d, within_tolerances) = prox_penalty.insert_and_reweigh( - &mut μ, &mut τv̆, &γ1, Some(&μ_base_minus_γ0), - τ, ε, &config.insertion, - ®, &state, &mut stats, + &mut μ, + &mut τv̆, + &γ1, + Some(&μ_base_minus_γ0), + τ, + ε, + &config.insertion, + ®, + &state, + &mut stats, ); // Do z variable primal update here to able to estimate B_{v̆^k-v^{k+1}} let mut z_new = τz̆; - opKz.adjoint().gemv(&mut z_new, -σ_p, &y, -σ_p/τ); + opKz.adjoint().gemv(&mut z_new, -σ_p, &y, -σ_p / τ); z_new = fnR.prox(σ_p, z_new + &z); // A posteriori transport adaptation. if aposteriori_transport( - &mut γ1, &mut μ, &mut μ_base_minus_γ0, &μ_base_masses, + &mut γ1, + &mut μ, + &mut μ_base_minus_γ0, + &μ_base_masses, Some(z_new.dist(&z, L2)), - ε, &config.transport + ε, + &config.transport, ) { - break 'adapt_transport (maybe_d, within_tolerances, τv̆, z_new) + break 'adapt_transport (maybe_d, within_tolerances, τv̆, z_new); } }; @@ -299,7 +305,14 @@ let ins = &config.insertion; if ins.merge_now(&state) { stats.merged += prox_penalty.merge_spikes_no_fitness( - &mut μ, &mut τv̆, &γ1, Some(&μ_base_minus_γ0), τ, ε, ins, ®, + &mut μ, + &mut τv̆, + &γ1, + Some(&μ_base_minus_γ0), + τ, + ε, + ins, + ®, //Some(|μ̃ : &RNDM<F, N>| calculate_residual(Pair(μ̃, &z), opA, b).norm2_squared_div2()), ); } @@ -317,9 +330,9 @@ // Do dual update // opKμ.gemv(&mut y, σ_d*(1.0 + ω), &μ, 1.0); // y = y + σ_d K[(1+ω)(μ,z)^{k+1}] - opKz.gemv(&mut y, σ_d*(1.0 + ω), &z_new, 1.0); + opKz.gemv(&mut y, σ_d * (1.0 + ω), &z_new, 1.0); // opKμ.gemv(&mut y, -σ_d*ω, μ_base, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b - opKz.gemv(&mut y, -σ_d*ω, z, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b + opKz.gemv(&mut y, -σ_d * ω, z, 1.0); // y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b y = starH.prox(σ_d, y); z = z_new; @@ -335,14 +348,20 @@ state.if_verbose(|| { plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv̆), &μ); - full_stats(&residual, &μ, &z, ε, std::mem::replace(&mut stats, IterInfo::new())) + full_stats( + &residual, + &μ, + &z, + ε, + std::mem::replace(&mut stats, IterInfo::new()), + ) }); // Update main tolerance for next iteration ε = tolerance.update(ε, iter); } - let fit = |μ̃ : &RNDM<F, N>| { + let fit = |μ̃: &RNDM<F, N>| { (opA.apply(Pair(μ̃, &z))-b).norm2_squared_div2() //+ fnR.apply(z) + reg.apply(μ) + fnH.apply(/* opKμ.apply(&μ̃) + */ opKz.apply(&z))