src/seminorms.rs

changeset 54
b3312eee105c
parent 35
b087e3eab191
equal deleted inserted replaced
53:92cae2e8f598 54:b3312eee105c
2 2
3 The principal data type of the module is [`ConvolutionOp`] and the main abstraction 3 The principal data type of the module is [`ConvolutionOp`] and the main abstraction
4 the trait [`DiscreteMeasureOp`]. 4 the trait [`DiscreteMeasureOp`].
5 */ 5 */
6 6
7 use std::iter::Zip; 7 use crate::measures::{DeltaMeasure, DiscreteMeasure, Radon, SpikeIter, RNDM};
8 use std::ops::RangeFrom; 8 use alg_tools::bisection_tree::*;
9 use alg_tools::types::*; 9 use alg_tools::instance::Instance;
10 use alg_tools::iter::{FilterMapX, Mappable};
11 use alg_tools::linops::{BoundedLinear, Linear, Mapping};
10 use alg_tools::loc::Loc; 12 use alg_tools::loc::Loc;
11 use alg_tools::sets::Cube;
12 use alg_tools::bisection_tree::*;
13 use alg_tools::mapping::RealMapping; 13 use alg_tools::mapping::RealMapping;
14 use alg_tools::iter::{Mappable, FilterMapX};
15 use alg_tools::linops::{Mapping, Linear, BoundedLinear};
16 use alg_tools::instance::Instance;
17 use alg_tools::nalgebra_support::ToNalgebraRealField; 14 use alg_tools::nalgebra_support::ToNalgebraRealField;
18 use alg_tools::norms::Linfinity; 15 use alg_tools::norms::Linfinity;
19 use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter, Radon, RNDM}; 16 use alg_tools::sets::Cube;
17 use alg_tools::types::*;
18 use itertools::Itertools;
20 use nalgebra::DMatrix; 19 use nalgebra::DMatrix;
20 use std::iter::Zip;
21 use std::marker::PhantomData; 21 use std::marker::PhantomData;
22 use itertools::Itertools; 22 use std::ops::RangeFrom;
23 23
24 /// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$. 24 /// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$.
25 /// 25 ///
26 /// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`]. 26 /// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`].
27 pub trait DiscreteMeasureOp<Domain, F> 27 pub trait DiscreteMeasureOp<Domain, F>:
28 : BoundedLinear<DiscreteMeasure<Domain, F>, Radon, Linfinity, F> 28 BoundedLinear<DiscreteMeasure<Domain, F>, Radon, Linfinity, F>
29 where 29 where
30 F : Float + ToNalgebraRealField, 30 F: Float + ToNalgebraRealField,
31 Domain : 'static + Clone + PartialEq, 31 Domain: 'static + Clone + PartialEq,
32 { 32 {
33 /// The output type of [`Self::preapply`]. 33 /// The output type of [`Self::preapply`].
34 type PreCodomain; 34 type PreCodomain;
35 35
36 /// Creates a finite-dimensional presentatin of the operator restricted to a fixed support. 36 /// Creates a finite-dimensional presentatin of the operator restricted to a fixed support.
38 /// <p> 38 /// <p>
39 /// This returns the matrix $C_*𝒟C$, where $C ∈ 𝕃(ℝ^n; 𝒵(Ω))$, $Ca = ∑_{i=1}^n α_i δ_{x_i}$ 39 /// This returns the matrix $C_*𝒟C$, where $C ∈ 𝕃(ℝ^n; 𝒵(Ω))$, $Ca = ∑_{i=1}^n α_i δ_{x_i}$
40 /// for a $x_1, …, x_n$ the coordinates given by the iterator `I`, and $a=(α_1,…,α_n)$. 40 /// for a $x_1, …, x_n$ the coordinates given by the iterator `I`, and $a=(α_1,…,α_n)$.
41 /// Here $C_* ∈ 𝕃(C_c(Ω); ℝ^n) $ stands for the preadjoint. 41 /// Here $C_* ∈ 𝕃(C_c(Ω); ℝ^n) $ stands for the preadjoint.
42 /// </p> 42 /// </p>
43 fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> 43 fn findim_matrix<'a, I>(&self, points: I) -> DMatrix<F::MixedType>
44 where I : ExactSizeIterator<Item=&'a Domain> + Clone; 44 where
45 I: ExactSizeIterator<Item = &'a Domain> + Clone;
45 46
46 /// [`Mapping`] that typically returns an uninitialised [`PreBTFN`] 47 /// [`Mapping`] that typically returns an uninitialised [`PreBTFN`]
47 /// instead of a full [`BTFN`]. 48 /// instead of a full [`BTFN`].
48 fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain; 49 fn preapply(&self, μ: DiscreteMeasure<Domain, F>) -> Self::PreCodomain;
49 } 50 }
50 51
51 // Blanket implementation of a measure as a linear functional over a predual 52 // Blanket implementation of a measure as a linear functional over a predual
52 // (that by assumption is a linear functional over a measure). 53 // (that by assumption is a linear functional over a measure).
53 /*impl<F, Domain, Predual> Linear<Predual> 54 /*impl<F, Domain, Predual> Linear<Predual>
65 // 66 //
66 // Convolutions for discrete measures 67 // Convolutions for discrete measures
67 // 68 //
68 69
69 /// A trait alias for simple convolution kernels. 70 /// A trait alias for simple convolution kernels.
70 pub trait SimpleConvolutionKernel<F : Float, const N : usize> 71 pub trait SimpleConvolutionKernel<F: Float, const N: usize>:
71 : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} 72 RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static
72 73 {
73 impl<T, F : Float, const N : usize> SimpleConvolutionKernel<F, N> for T 74 }
74 where T : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {} 75
76 impl<T, F: Float, const N: usize> SimpleConvolutionKernel<F, N> for T where
77 T: RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static
78 {
79 }
75 80
76 /// [`SupportGenerator`] for [`ConvolutionOp`]. 81 /// [`SupportGenerator`] for [`ConvolutionOp`].
77 #[derive(Clone,Debug)] 82 #[derive(Clone, Debug)]
78 pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize> 83 pub struct ConvolutionSupportGenerator<F: Float, K, const N: usize>
79 where K : SimpleConvolutionKernel<F, N> { 84 where
80 kernel : K, 85 K: SimpleConvolutionKernel<F, N>,
81 centres : RNDM<F, N>, 86 {
82 } 87 kernel: K,
83 88 centres: RNDM<F, N>,
84 impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N> 89 }
85 where K : SimpleConvolutionKernel<F, N> { 90
86 91 impl<F: Float, K, const N: usize> ConvolutionSupportGenerator<F, K, N>
92 where
93 K: SimpleConvolutionKernel<F, N>,
94 {
87 /// Construct the convolution kernel corresponding to `δ`, i.e., one centered at `δ.x` and 95 /// Construct the convolution kernel corresponding to `δ`, i.e., one centered at `δ.x` and
88 /// weighted by `δ.α`. 96 /// weighted by `δ.α`.
89 #[inline] 97 #[inline]
90 fn construct_kernel<'a>(&'a self, δ : &'a DeltaMeasure<Loc<F, N>, F>) 98 fn construct_kernel<'a>(
91 -> Weighted<Shift<K, F, N>, F> { 99 &'a self,
100 δ: &'a DeltaMeasure<Loc<F, N>, F>,
101 ) -> Weighted<Shift<K, F, N>, F> {
92 self.kernel.clone().shift(δ.x).weigh(δ.α) 102 self.kernel.clone().shift(δ.x).weigh(δ.α)
93 } 103 }
94 104
95 /// This is a helper method for the implementation of [`ConvolutionSupportGenerator::all_data`]. 105 /// This is a helper method for the implementation of [`ConvolutionSupportGenerator::all_data`].
96 /// It filters out `δ` with zero weight, and otherwise returns the corresponding convolution 106 /// It filters out `δ` with zero weight, and otherwise returns the corresponding convolution
97 /// kernel. The `id` is passed through as-is. 107 /// kernel. The `id` is passed through as-is.
98 #[inline] 108 #[inline]
99 fn construct_kernel_and_id_filtered<'a>( 109 fn construct_kernel_and_id_filtered<'a>(
100 &'a self, 110 &'a self,
101 (id, δ) : (usize, &'a DeltaMeasure<Loc<F, N>, F>) 111 (id, δ): (usize, &'a DeltaMeasure<Loc<F, N>, F>),
102 ) -> Option<(usize, Weighted<Shift<K, F, N>, F>)> { 112 ) -> Option<(usize, Weighted<Shift<K, F, N>, F>)> {
103 (δ.α != F::ZERO).then(|| (id.into(), self.construct_kernel(δ))) 113 (δ.α != F::ZERO).then(|| (id.into(), self.construct_kernel(δ)))
104 } 114 }
105 } 115 }
106 116
107 impl<F : Float, K, const N : usize> SupportGenerator<F, N> 117 impl<F: Float, K, const N: usize> SupportGenerator<F, N> for ConvolutionSupportGenerator<F, K, N>
108 for ConvolutionSupportGenerator<F, K, N> 118 where
109 where K : SimpleConvolutionKernel<F, N> { 119 K: SimpleConvolutionKernel<F, N>,
120 {
110 type Id = usize; 121 type Id = usize;
111 type SupportType = Weighted<Shift<K, F, N>, F>; 122 type SupportType = Weighted<Shift<K, F, N>, F>;
112 type AllDataIter<'a> = FilterMapX<'a, Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>, 123 type AllDataIter<'a> = FilterMapX<
113 Self, (Self::Id, Self::SupportType)>; 124 'a,
114 125 Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>,
115 #[inline] 126 Self,
116 fn support_for(&self, d : Self::Id) -> Self::SupportType { 127 (Self::Id, Self::SupportType),
128 >;
129
130 #[inline]
131 fn support_for(&self, d: Self::Id) -> Self::SupportType {
117 self.construct_kernel(&self.centres[d]) 132 self.construct_kernel(&self.centres[d])
118 } 133 }
119 134
120 #[inline] 135 #[inline]
121 fn support_count(&self) -> usize { 136 fn support_count(&self) -> usize {
122 self.centres.len() 137 self.centres.len()
123 } 138 }
124 139
125 #[inline] 140 #[inline]
126 fn all_data(&self) -> Self::AllDataIter<'_> { 141 fn all_data(&self) -> Self::AllDataIter<'_> {
127 (0..).zip(self.centres.iter_spikes()) 142 (0..)
128 .filter_mapX(self, Self::construct_kernel_and_id_filtered) 143 .zip(self.centres.iter_spikes())
144 .filter_mapX(self, Self::construct_kernel_and_id_filtered)
129 } 145 }
130 } 146 }
131 147
132 /// Representation of a convolution operator $𝒟$. 148 /// Representation of a convolution operator $𝒟$.
133 #[derive(Clone,Debug)] 149 #[derive(Clone, Debug)]
134 pub struct ConvolutionOp<F, K, BT, const N : usize> 150 pub struct ConvolutionOp<F, K, BT, const N: usize>
135 where F : Float + ToNalgebraRealField, 151 where
136 BT : BTImpl<F, N, Data=usize>, 152 F: Float + ToNalgebraRealField,
137 K : SimpleConvolutionKernel<F, N> { 153 BT: BTImpl<F, N, Data = usize>,
154 K: SimpleConvolutionKernel<F, N>,
155 {
138 /// Depth of the [`BT`] bisection tree for the outputs [`Mapping::apply`]. 156 /// Depth of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
139 depth : BT::Depth, 157 depth: BT::Depth,
140 /// Domain of the [`BT`] bisection tree for the outputs [`Mapping::apply`]. 158 /// Domain of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
141 domain : Cube<F, N>, 159 domain: Cube<F, N>,
142 /// The convolution kernel 160 /// The convolution kernel
143 kernel : K, 161 kernel: K,
144 _phantoms : PhantomData<(F,BT)>, 162 _phantoms: PhantomData<(F, BT)>,
145 } 163 }
146 164
147 impl<F, K, BT, const N : usize> ConvolutionOp<F, K, BT, N> 165 impl<F, K, BT, const N: usize> ConvolutionOp<F, K, BT, N>
148 where F : Float + ToNalgebraRealField, 166 where
149 BT : BTImpl<F, N, Data=usize>, 167 F: Float + ToNalgebraRealField,
150 K : SimpleConvolutionKernel<F, N> { 168 BT: BTImpl<F, N, Data = usize>,
151 169 K: SimpleConvolutionKernel<F, N>,
170 {
152 /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`. 171 /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`.
153 /// 172 ///
154 /// The output of [`Mapping::apply`] is a [`BT`] of given `depth`. 173 /// The output of [`Mapping::apply`] is a [`BT`] of given `depth`.
155 pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self { 174 pub fn new(depth: BT::Depth, domain: Cube<F, N>, kernel: K) -> Self {
156 ConvolutionOp { 175 ConvolutionOp {
157 depth : depth, 176 depth: depth,
158 domain : domain, 177 domain: domain,
159 kernel : kernel, 178 kernel: kernel,
160 _phantoms : PhantomData 179 _phantoms: PhantomData,
161 } 180 }
162 } 181 }
163 182
164 /// Returns the support generator for this convolution operator. 183 /// Returns the support generator for this convolution operator.
165 fn support_generator(&self, μ : RNDM<F, N>) 184 fn support_generator(&self, μ: RNDM<F, N>) -> ConvolutionSupportGenerator<F, K, N> {
166 -> ConvolutionSupportGenerator<F, K, N> {
167
168 // TODO: can we avoid cloning μ? 185 // TODO: can we avoid cloning μ?
169 ConvolutionSupportGenerator { 186 ConvolutionSupportGenerator {
170 kernel : self.kernel.clone(), 187 kernel: self.kernel.clone(),
171 centres : μ 188 centres: μ,
172 } 189 }
173 } 190 }
174 191
175 /// Returns a reference to the kernel of this convolution operator. 192 /// Returns a reference to the kernel of this convolution operator.
176 pub fn kernel(&self) -> &K { 193 pub fn kernel(&self) -> &K {
177 &self.kernel 194 &self.kernel
178 } 195 }
179 } 196 }
180 197
181 impl<F, K, BT, const N : usize> Mapping<RNDM<F, N>> 198 impl<F, K, BT, const N: usize> Mapping<RNDM<F, N>> for ConvolutionOp<F, K, BT, N>
182 for ConvolutionOp<F, K, BT, N> 199 where
183 where 200 F: Float + ToNalgebraRealField,
184 F : Float + ToNalgebraRealField, 201 BT: BTImpl<F, N, Data = usize>,
185 BT : BTImpl<F, N, Data=usize>, 202 K: SimpleConvolutionKernel<F, N>,
186 K : SimpleConvolutionKernel<F, N>, 203 Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
187 Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> 204 {
188 {
189
190 type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>; 205 type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
191 206
192 fn apply<I>(&self, μ : I) -> Self::Codomain 207 fn apply<I>(&self, μ: I) -> Self::Codomain
193 where I : Instance<RNDM<F, N>> { 208 where
209 I: Instance<RNDM<F, N>>,
210 {
194 let g = self.support_generator(μ.own()); 211 let g = self.support_generator(μ.own());
195 BTFN::construct(self.domain.clone(), self.depth, g) 212 BTFN::construct(self.domain.clone(), self.depth, g)
196 } 213 }
197 } 214 }
198 215
199 /// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s. 216 /// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s.
200 impl<F, K, BT, const N : usize> Linear<RNDM<F, N>> 217 impl<F, K, BT, const N: usize> Linear<RNDM<F, N>> for ConvolutionOp<F, K, BT, N>
201 for ConvolutionOp<F, K, BT, N> 218 where
202 where 219 F: Float + ToNalgebraRealField,
203 F : Float + ToNalgebraRealField, 220 BT: BTImpl<F, N, Data = usize>,
204 BT : BTImpl<F, N, Data=usize>, 221 K: SimpleConvolutionKernel<F, N>,
205 K : SimpleConvolutionKernel<F, N>, 222 Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
206 Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> 223 {
207 { } 224 }
208 225
209 impl<F, K, BT, const N : usize> 226 impl<F, K, BT, const N: usize> BoundedLinear<RNDM<F, N>, Radon, Linfinity, F>
210 BoundedLinear<RNDM<F, N>, Radon, Linfinity, F> 227 for ConvolutionOp<F, K, BT, N>
211 for ConvolutionOp<F, K, BT, N> 228 where
212 where F : Float + ToNalgebraRealField, 229 F: Float + ToNalgebraRealField,
213 BT : BTImpl<F, N, Data=usize>, 230 BT: BTImpl<F, N, Data = usize>,
214 K : SimpleConvolutionKernel<F, N>, 231 K: SimpleConvolutionKernel<F, N>,
215 Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { 232 Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
216 233 {
217 fn opnorm_bound(&self, _ : Radon, _ : Linfinity) -> F { 234 fn opnorm_bound(&self, _: Radon, _: Linfinity) -> F {
218 // With μ = ∑_i α_i δ_{x_i}, we have 235 // With μ = ∑_i α_i δ_{x_i}, we have
219 // |𝒟μ|_∞ 236 // |𝒟μ|_∞
220 // = sup_z |∑_i α_i φ(z - x_i)| 237 // = sup_z |∑_i α_i φ(z - x_i)|
221 // ≤ sup_z ∑_i |α_i| |φ(z - x_i)| 238 // ≤ sup_z ∑_i |α_i| |φ(z - x_i)|
222 // ≤ ∑_i |α_i| |φ|_∞ 239 // ≤ ∑_i |α_i| |φ|_∞
223 // = |μ|_ℳ |φ|_∞ 240 // = |μ|_ℳ |φ|_∞
224 self.kernel.bounds().uniform() 241 self.kernel.bounds().uniform()
225 } 242 }
226 } 243 }
227 244
228 245 impl<F, K, BT, const N: usize> DiscreteMeasureOp<Loc<F, N>, F> for ConvolutionOp<F, K, BT, N>
229 impl<F, K, BT, const N : usize> DiscreteMeasureOp<Loc<F, N>, F> 246 where
230 for ConvolutionOp<F, K, BT, N> 247 F: Float + ToNalgebraRealField,
231 where F : Float + ToNalgebraRealField, 248 BT: BTImpl<F, N, Data = usize>,
232 BT : BTImpl<F, N, Data=usize>, 249 K: SimpleConvolutionKernel<F, N>,
233 K : SimpleConvolutionKernel<F, N>, 250 Weighted<Shift<K, F, N>, F>: LocalAnalysis<F, BT::Agg, N>,
234 Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> { 251 {
235 type PreCodomain = PreBTFN<F, ConvolutionSupportGenerator<F, K, N>, N>; 252 type PreCodomain = PreBTFN<F, ConvolutionSupportGenerator<F, K, N>, N>;
236 253
237 fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType> 254 fn findim_matrix<'a, I>(&self, points: I) -> DMatrix<F::MixedType>
238 where I : ExactSizeIterator<Item=&'a Loc<F,N>> + Clone { 255 where
256 I: ExactSizeIterator<Item = &'a Loc<F, N>> + Clone,
257 {
239 // TODO: Preliminary implementation. It be best to use sparse matrices or 258 // TODO: Preliminary implementation. It be best to use sparse matrices or
240 // possibly explicit operators without matrices 259 // possibly explicit operators without matrices
241 let n = points.len(); 260 let n = points.len();
242 let points_clone = points.clone(); 261 let points_clone = points.clone();
243 let pairs = points.cartesian_product(points_clone); 262 let pairs = points.cartesian_product(points_clone);
244 let kernel = &self.kernel; 263 let kernel = &self.kernel;
245 let values = pairs.map(|(x, y)| kernel.apply(y-x).to_nalgebra_mixed()); 264 let values = pairs.map(|(x, y)| kernel.apply(y - x).to_nalgebra_mixed());
246 DMatrix::from_iterator(n, n, values) 265 DMatrix::from_iterator(n, n, values)
247 } 266 }
248 267
249 /// A version of [`Mapping::apply`] that does not instantiate the [`BTFN`] codomain with 268 /// A version of [`Mapping::apply`] that does not instantiate the [`BTFN`] codomain with
250 /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when 269 /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when
251 /// the output is to be added as the right-hand-side operand to a proper BTFN. 270 /// the output is to be added as the right-hand-side operand to a proper BTFN.
252 fn preapply(&self, μ : RNDM<F, N>) -> Self::PreCodomain { 271 fn preapply(&self, μ: RNDM<F, N>) -> Self::PreCodomain {
253 BTFN::new_pre(self.support_generator(μ)) 272 BTFN::new_pre(self.support_generator(μ))
254 } 273 }
255 } 274 }
256 275
257 /// Generates an scalar operation (e.g. [`std::ops::Mul`], [`std::ops::Div`]) 276 /// Generates an scalar operation (e.g. [`std::ops::Mul`], [`std::ops::Div`])
258 /// for [`ConvolutionSupportGenerator`]. 277 /// for [`ConvolutionSupportGenerator`].
259 macro_rules! make_convolutionsupportgenerator_scalarop_rhs { 278 macro_rules! make_convolutionsupportgenerator_scalarop_rhs {
260 ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { 279 ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
261 impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> 280 impl<F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait_assign<F>
262 std::ops::$trait_assign<F> 281 for ConvolutionSupportGenerator<F, K, N>
263 for ConvolutionSupportGenerator<F, K, N> { 282 {
264 fn $fn_assign(&mut self, t : F) { 283 fn $fn_assign(&mut self, t: F) {
265 self.centres.$fn_assign(t); 284 self.centres.$fn_assign(t);
266 } 285 }
267 } 286 }
268 287
269 impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> 288 impl<F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait<F>
270 std::ops::$trait<F> 289 for ConvolutionSupportGenerator<F, K, N>
271 for ConvolutionSupportGenerator<F, K, N> { 290 {
272 type Output = ConvolutionSupportGenerator<F, K, N>; 291 type Output = ConvolutionSupportGenerator<F, K, N>;
273 fn $fn(mut self, t : F) -> Self::Output { 292 fn $fn(mut self, t: F) -> Self::Output {
274 std::ops::$trait_assign::$fn_assign(&mut self.centres, t); 293 std::ops::$trait_assign::$fn_assign(&mut self.centres, t);
275 self 294 self
276 } 295 }
277 } 296 }
278 impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> 297 impl<'a, F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait<F>
279 std::ops::$trait<F> 298 for &'a ConvolutionSupportGenerator<F, K, N>
280 for &'a ConvolutionSupportGenerator<F, K, N> { 299 {
281 type Output = ConvolutionSupportGenerator<F, K, N>; 300 type Output = ConvolutionSupportGenerator<F, K, N>;
282 fn $fn(self, t : F) -> Self::Output { 301 fn $fn(self, t: F) -> Self::Output {
283 ConvolutionSupportGenerator{ 302 ConvolutionSupportGenerator {
284 kernel : self.kernel.clone(), 303 kernel: self.kernel.clone(),
285 centres : (&self.centres).$fn(t), 304 centres: (&self.centres).$fn(t),
286 } 305 }
287 } 306 }
288 } 307 }
289 } 308 };
290 } 309 }
291 310
292 make_convolutionsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); 311 make_convolutionsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
293 make_convolutionsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign); 312 make_convolutionsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
294
295 313
296 /// Generates an unary operation (e.g. [`std::ops::Neg`]) for [`ConvolutionSupportGenerator`]. 314 /// Generates an unary operation (e.g. [`std::ops::Neg`]) for [`ConvolutionSupportGenerator`].
297 macro_rules! make_convolutionsupportgenerator_unaryop { 315 macro_rules! make_convolutionsupportgenerator_unaryop {
298 ($trait:ident, $fn:ident) => { 316 ($trait:ident, $fn:ident) => {
299 impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> 317 impl<F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait
300 std::ops::$trait 318 for ConvolutionSupportGenerator<F, K, N>
301 for ConvolutionSupportGenerator<F, K, N> { 319 {
302 type Output = ConvolutionSupportGenerator<F, K, N>; 320 type Output = ConvolutionSupportGenerator<F, K, N>;
303 fn $fn(mut self) -> Self::Output { 321 fn $fn(mut self) -> Self::Output {
304 self.centres = self.centres.$fn(); 322 self.centres = self.centres.$fn();
305 self 323 self
306 } 324 }
307 } 325 }
308 326
309 impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize> 327 impl<'a, F: Float, K: SimpleConvolutionKernel<F, N>, const N: usize> std::ops::$trait
310 std::ops::$trait 328 for &'a ConvolutionSupportGenerator<F, K, N>
311 for &'a ConvolutionSupportGenerator<F, K, N> { 329 {
312 type Output = ConvolutionSupportGenerator<F, K, N>; 330 type Output = ConvolutionSupportGenerator<F, K, N>;
313 fn $fn(self) -> Self::Output { 331 fn $fn(self) -> Self::Output {
314 ConvolutionSupportGenerator{ 332 ConvolutionSupportGenerator {
315 kernel : self.kernel.clone(), 333 kernel: self.kernel.clone(),
316 centres : (&self.centres).$fn(), 334 centres: (&self.centres).$fn(),
317 } 335 }
318 } 336 }
319 } 337 }
320 } 338 };
321 } 339 }
322 340
323 make_convolutionsupportgenerator_unaryop!(Neg, neg); 341 make_convolutionsupportgenerator_unaryop!(Neg, neg);
324

mercurial