Tue, 29 Apr 2025 07:55:18 -0500
sketch
| src/convex.rs | file | annotate | diff | comparison | revisions | |
| src/mapping.rs | file | annotate | diff | comparison | revisions | |
| src/mapping/dataterm.rs | file | annotate | diff | comparison | revisions |
--- a/src/convex.rs Tue Apr 29 00:03:12 2025 -0500 +++ b/src/convex.rs Tue Apr 29 07:55:18 2025 -0500 @@ -319,15 +319,15 @@ } /// The squared Euclidean norm divided by two -pub struct Norm222<Domain: Space, F: Float>(PhantomData<(Domain, F)>); +pub struct Norm222<F: Float>(PhantomData<F>); -impl<Domain: Euclidean<F>, F: Float> Norm222<Domain, F> { +impl</*Domain: Euclidean<F>,*/ F: Float> Norm222<F> { pub fn new() -> Self { Norm222(PhantomData) } } -impl<Domain: Euclidean<F>, F: Float> Mapping<Domain> for Norm222<Domain, F> { +impl<Domain: Euclidean<F>, F: Float> Mapping<Domain> for Norm222<F> { type Codomain = F; /// Compute the value of `self` at `x`. @@ -336,9 +336,9 @@ } } -impl<Domain: Euclidean<F>, F: Float> ConvexMapping<Domain, F> for Norm222<Domain, F> {} +impl<Domain: Euclidean<F>, F: Float> ConvexMapping<Domain, F> for Norm222<F> {} -impl<Domain: Euclidean<F>, F: Float> Conjugable<Domain, F> for Norm222<Domain, F> { +impl<Domain: Euclidean<F>, F: Float> Conjugable<Domain, F> for Norm222<F> { type Conjugate<'a> = Self where @@ -350,7 +350,7 @@ } } -impl<Domain: Euclidean<F>, F: Float> Preconjugable<Domain, Domain, F> for Norm222<Domain, F> { +impl<Domain: Euclidean<F>, F: Float> Preconjugable<Domain, Domain, F> for Norm222<F> { type Preconjugate<'a> = Self where @@ -362,7 +362,7 @@ } } -impl<Domain, F> Prox<Domain> for Norm222<Domain, F> +impl<Domain, F> Prox<Domain> for Norm222<F> where F: Float, Domain: Euclidean<F, Output = Domain>,
--- a/src/mapping.rs Tue Apr 29 00:03:12 2025 -0500 +++ b/src/mapping.rs Tue Apr 29 07:55:18 2025 -0500 @@ -9,6 +9,7 @@ use crate::types::{ClosedMul, Float, Num}; use std::borrow::Cow; use std::marker::PhantomData; +use std::ops::Mul; /// A mapping from `Domain` to `Self::Codomain`. pub trait Mapping<Domain: Space> { @@ -283,8 +284,39 @@ } } +/// Helper trait for implementing [`DifferentiableMapping`] +impl<S, T, X, E, Y> DifferentiableImpl<X> for Composition<S, T, E> +where + X: Space, + T: DifferentiableImpl<X> + Mapping<X>, + S: DifferentiableImpl<T::Codomain>, + E: Copy, + //Composition<S::Derivative, T::Derivative, E>: Space, + S::Derivative: Mul<T::Derivative, Output = Y>, + Y: Space, +{ + //type Derivative = Composition<S::Derivative, T::Derivative, E>; + type Derivative = Y; + + /// Compute the differential of `self` at `x`, consuming the input. + fn differential_impl<I: Instance<X>>(&self, x: I) -> Self::Derivative { + // Composition { + // outer: self + // .outer + // .differential_impl(self.inner.apply(x.ref_instance())), + // inner: self.inner.differential_impl(x), + // intermediate_norm_exponent: self.intermediate_norm_exponent, + // } + self.outer + .differential_impl(self.inner.apply(x.ref_instance())) + * self.inner.differential_impl(x) + } +} + mod quadratic; pub use quadratic::Quadratic; +mod dataterm; +pub use dataterm::DataTerm; /// Trait for indicating that `Self` is Lipschitz with respect to the (semi)norm `D`. pub trait Lipschitz<M> {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/mapping/dataterm.rs Tue Apr 29 07:55:18 2025 -0500 @@ -0,0 +1,124 @@ +/*! +General deata terms of the form $g(Ax-b)$ for an operator $A$ +to a [`Euclidean`] space, and a function g on that space. +*/ + +#![allow(non_snake_case)] + +use super::{DifferentiableImpl, DifferentiableMapping, /*LipschitzDifferentiableImpl,*/ Mapping,}; +use crate::convex::ConvexMapping; +use crate::instance::{Instance, Space}; +use crate::linops::{/*BoundedLinear,*/ Linear, Preadjointable}; +//use crate::norms::{Norm, NormExponent, L2}; +use crate::types::Float; +use std::ops::Sub; +//use serde::{Deserialize, Serialize}; + +/// Functions of the form $\frac{1}{2}\|Ax-b\|_2^2$ for an operator $A$ +/// to a [`Euclidean`] space. +pub struct DataTerm< + F: Float, + Domain: Space, + A: Mapping<Domain>, + G: Mapping<A::Codomain, Codomain = F>, +> { + opA: A, + b: <A as Mapping<Domain>>::Codomain, + g: G, +} + +#[allow(non_snake_case)] +impl<F: Float, Domain: Space, A: Mapping<Domain>, G: Mapping<A::Codomain, Codomain = F>> + DataTerm<F, Domain, A, G> +{ + pub fn new(opA: A, b: A::Codomain, g: G) -> Self { + DataTerm { opA, b, g } + } + + pub fn operator(&self) -> &'_ A { + &self.opA + } + + pub fn data(&self) -> &'_ <A as Mapping<Domain>>::Codomain { + &self.b + } + + pub fn fidelity(&self) -> &'_ G { + &self.g + } +} + +//+ AdjointProductBoundedBy<RNDM<F, N>, P, FloatType = F>, + +impl<F, X, A, G> Mapping<X> for DataTerm<F, X, A, G> +where + F: Float, + X: Space, + A: Mapping<X>, + G: Mapping<A::Codomain, Codomain = F>, + A::Codomain: for<'a> Sub<&'a A::Codomain, Output = A::Codomain>, +{ + type Codomain = F; + + fn apply<I: Instance<X>>(&self, x: I) -> F { + // TODO: possibly (if at all more effcient) use GEMV once generalised + // to not require preallocation. However, Rust should be pretty efficient + // at not doing preallocations or anything here, as the result of self.opA.apply() + // can be consumed, so maybe GEMV is no use. + self.g.apply(self.opA.apply(x) - &self.b) + } +} + +impl<F, X, A, G> ConvexMapping<X, F> for DataTerm<F, X, A, G> +where + F: Float, + X: Space, + A: Linear<X>, + G: ConvexMapping<A::Codomain, F>, + A::Codomain: for<'a> Sub<&'a A::Codomain, Output = A::Codomain>, +{ +} + +impl<F, X, Y, A, G> DifferentiableImpl<X> for DataTerm<F, X, A, G> +where + F: Float, + X: Space, + Y: Space + for<'a> Sub<&'a Y, Output = Y>, + //<A as Mapping<X>>::Codomain: Euclidean<F>, + A: Linear<X, Codomain = Y> + Preadjointable<X, G::DerivativeDomain>, + //<<A as Mapping<X>>::Codomain as Euclidean<F>>::Output: Instance<<A as Mapping<X>>::Codomain>, + G: DifferentiableMapping<Y, Codomain = F>, +{ + type Derivative = A::PreadjointCodomain; + + fn differential_impl<I: Instance<X>>(&self, x: I) -> Self::Derivative { + // TODO: possibly (if at all more effcient) use GEMV once generalised + // to not require preallocation. However, Rust should be pretty efficient + // at not doing preallocations or anything here, as the result of self.opA.apply() + // can be consumed, so maybe GEMV is no use. + //self.opA.preadjoint().apply(self.opA.apply(x) - self.b) + self.opA + .preadjoint() + .apply(self.g.diff_ref().apply(self.opA.apply(x) - &self.b)) + } +} + +/* +impl<'a, F, X, ExpX, Y, ExpY, A, G> LipschitzDifferentiableImpl<X, ExpX> for DataTerm<F, X, A, G> +where + F: Float, + X: Space + Clone + Norm<F, ExpX>, + Y: Space + Norm<F, ExpY>, + ExpX: NormExponent, + ExpY: NormExponent, + A: Clone + BoundedLinear<X, ExpX, L2, F, Codomain = Y>, + G: Mapping<Y, Codomain = F> + LipschitzDifferentiableImpl<Y, ExpY>, + Self: DifferentiableImpl<X>, +{ + type FloatType = F; + + fn diff_lipschitz_factor(&self, seminorm: ExpX) -> Option<F> { + Some(self.opA.opnorm_bound(seminorm, L2).powi(2)) + } +} +*/