New version of sliding. dev

Tue, 31 Dec 2024 09:25:45 -0500

author
Tuomo Valkonen <tuomov@iki.fi>
date
Tue, 31 Dec 2024 09:25:45 -0500
branch
dev
changeset 35
b087e3eab191
parent 34
efa60bc4f743
child 36
fb911f72e698

New version of sliding.

Cargo.lock file | annotate | diff | comparison | revisions
Cargo.toml file | annotate | diff | comparison | revisions
README.md file | annotate | diff | comparison | revisions
src/dataterm.rs file | annotate | diff | comparison | revisions
src/experiments.rs file | annotate | diff | comparison | revisions
src/fb.rs file | annotate | diff | comparison | revisions
src/forward_model.rs file | annotate | diff | comparison | revisions
src/forward_model/bias.rs file | annotate | diff | comparison | revisions
src/forward_model/sensor_grid.rs file | annotate | diff | comparison | revisions
src/forward_pdps.rs file | annotate | diff | comparison | revisions
src/fourier.rs file | annotate | diff | comparison | revisions
src/frank_wolfe.rs file | annotate | diff | comparison | revisions
src/kernels.rs file | annotate | diff | comparison | revisions
src/kernels/ball_indicator.rs file | annotate | diff | comparison | revisions
src/kernels/base.rs file | annotate | diff | comparison | revisions
src/kernels/gaussian.rs file | annotate | diff | comparison | revisions
src/kernels/hat.rs file | annotate | diff | comparison | revisions
src/kernels/hat_convolution.rs file | annotate | diff | comparison | revisions
src/kernels/linear.rs file | annotate | diff | comparison | revisions
src/kernels/mollifier.rs file | annotate | diff | comparison | revisions
src/main.rs file | annotate | diff | comparison | revisions
src/measures.rs file | annotate | diff | comparison | revisions
src/measures/base.rs file | annotate | diff | comparison | revisions
src/measures/delta.rs file | annotate | diff | comparison | revisions
src/measures/discrete.rs file | annotate | diff | comparison | revisions
src/pdps.rs file | annotate | diff | comparison | revisions
src/plot.rs file | annotate | diff | comparison | revisions
src/preadjoint_helper.rs file | annotate | diff | comparison | revisions
src/radon_fb.rs file | annotate | diff | comparison | revisions
src/regularisation.rs file | annotate | diff | comparison | revisions
src/run.rs file | annotate | diff | comparison | revisions
src/seminorms.rs file | annotate | diff | comparison | revisions
src/sliding_fb.rs file | annotate | diff | comparison | revisions
src/sliding_pdps.rs file | annotate | diff | comparison | revisions
src/types.rs file | annotate | diff | comparison | revisions
--- a/Cargo.lock	Thu Aug 29 00:00:00 2024 -0500
+++ b/Cargo.lock	Tue Dec 31 09:25:45 2024 -0500
@@ -4,9 +4,9 @@
 
 [[package]]
 name = "GSL"
-version = "6.0.0"
+version = "7.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9becaf6d7d1ba36a457288e661fa6a0472e8328629276f45369eafcd48ef1ce"
+checksum = "db3943d5a15b5c46e991124abee6a1bc89c7c9ffb25dbb8aeb4eab926fd9b307"
 dependencies = [
  "GSL-sys",
  "paste",
@@ -23,24 +23,19 @@
 ]
 
 [[package]]
-name = "adler"
-version = "1.0.2"
+name = "aho-corasick"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
-
-[[package]]
-name = "aho-corasick"
-version = "0.7.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
 dependencies = [
  "memchr",
 ]
 
 [[package]]
 name = "alg_tools"
-version = "0.2.0-dev"
+version = "0.3.0-dev"
 dependencies = [
+ "anyhow",
  "colored",
  "cpu-time",
  "csv",
@@ -52,7 +47,7 @@
  "rayon",
  "serde",
  "serde_json",
- "trait-set",
+ "simba",
 ]
 
 [[package]]
@@ -71,6 +66,61 @@
 ]
 
 [[package]]
+name = "anstream"
+version = "0.6.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is_terminal_polyfill",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
+
+[[package]]
+name = "anstyle-parse"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "3.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125"
+dependencies = [
+ "anstyle",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
+
+[[package]]
 name = "approx"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -86,60 +136,30 @@
 checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
 
 [[package]]
-name = "bit_field"
-version = "0.10.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61"
-
-[[package]]
-name = "bitflags"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
-
-[[package]]
 name = "bitflags"
 version = "2.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
 
 [[package]]
-name = "bstr"
-version = "0.2.17"
+name = "bumpalo"
+version = "3.16.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
-dependencies = [
- "lazy_static",
- "memchr",
- "regex-automata",
- "serde",
-]
-
-[[package]]
-name = "bumpalo"
-version = "3.14.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
 
 [[package]]
 name = "bytemuck"
-version = "1.14.0"
+version = "1.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6"
-
-[[package]]
-name = "byteorder"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
+checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a"
 
 [[package]]
 name = "cc"
-version = "1.0.83"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
+checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8"
 dependencies = [
- "libc",
+ "shlex",
 ]
 
 [[package]]
@@ -150,9 +170,9 @@
 
 [[package]]
 name = "chrono"
-version = "0.4.31"
+version = "0.4.39"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38"
+checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825"
 dependencies = [
  "android-tzdata",
  "iana-time-zone",
@@ -160,22 +180,29 @@
  "num-traits",
  "serde",
  "wasm-bindgen",
- "windows-targets",
+ "windows-targets 0.52.6",
 ]
 
 [[package]]
 name = "clap"
-version = "4.0.32"
+version = "4.5.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39"
+checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84"
 dependencies = [
- "bitflags 1.3.2",
+ "clap_builder",
  "clap_derive",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.5.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838"
+dependencies = [
+ "anstream",
+ "anstyle",
  "clap_lex",
- "is-terminal",
- "once_cell",
  "strsim",
- "termcolor",
  "terminal_size",
  "unicase",
  "unicode-width",
@@ -183,57 +210,43 @@
 
 [[package]]
 name = "clap_derive"
-version = "4.0.21"
+version = "4.5.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014"
+checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab"
 dependencies = [
  "heck",
- "proc-macro-error",
  "proc-macro2",
  "quote",
- "syn 1.0.109",
+ "syn 2.0.93",
 ]
 
 [[package]]
 name = "clap_lex"
-version = "0.3.3"
+version = "0.7.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646"
-dependencies = [
- "os_str_bytes",
-]
+checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
 
 [[package]]
-name = "color_quant"
-version = "1.1.0"
+name = "colorchoice"
+version = "1.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
-
-[[package]]
-name = "colorbrewer"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "525be5012d97bc222e124ded87f18601e6fbd24a406761bcb1664475663919a6"
-dependencies = [
- "rgb",
-]
+checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
 
 [[package]]
 name = "colored"
-version = "2.0.4"
+version = "2.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6"
+checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8"
 dependencies = [
- "is-terminal",
  "lazy_static",
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
 name = "core-foundation-sys"
-version = "0.8.4"
+version = "0.8.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
+checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
 
 [[package]]
 name = "cpu-time"
@@ -246,15 +259,6 @@
 ]
 
 [[package]]
-name = "crc32fast"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
 name = "crossbeam-deque"
 version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -288,20 +292,13 @@
 ]
 
 [[package]]
-name = "crunchy"
-version = "0.2.2"
+name = "csv"
+version = "1.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
-
-[[package]]
-name = "csv"
-version = "1.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
+checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
 dependencies = [
- "bstr",
  "csv-core",
- "itoa 0.4.8",
+ "itoa",
  "ryu",
  "serde",
 ]
@@ -317,9 +314,9 @@
 
 [[package]]
 name = "either"
-version = "1.9.0"
+version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
+checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
 
 [[package]]
 name = "errno"
@@ -328,42 +325,7 @@
 checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860"
 dependencies = [
  "libc",
- "windows-sys",
-]
-
-[[package]]
-name = "exr"
-version = "1.71.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "832a761f35ab3e6664babfbdc6cef35a4860e816ec3916dcfd0882954e98a8a8"
-dependencies = [
- "bit_field",
- "flume",
- "half",
- "lebe",
- "miniz_oxide",
- "rayon-core",
- "smallvec",
- "zune-inflate",
-]
-
-[[package]]
-name = "fdeflate"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10"
-dependencies = [
- "simd-adler32",
-]
-
-[[package]]
-name = "flate2"
-version = "1.0.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e"
-dependencies = [
- "crc32fast",
- "miniz_oxide",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -376,15 +338,6 @@
 ]
 
 [[package]]
-name = "flume"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181"
-dependencies = [
- "spin",
-]
-
-[[package]]
 name = "getrandom"
 version = "0.2.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -396,48 +349,23 @@
 ]
 
 [[package]]
-name = "gif"
-version = "0.12.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045"
-dependencies = [
- "color_quant",
- "weezl",
-]
-
-[[package]]
-name = "half"
-version = "2.2.1"
+name = "heck"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02b4af3693f1b705df946e9fe5631932443781d0aabb423b62fcd4d73f6d2fd0"
-dependencies = [
- "crunchy",
-]
-
-[[package]]
-name = "heck"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
-
-[[package]]
-name = "hermit-abi"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
 
 [[package]]
 name = "iana-time-zone"
-version = "0.1.57"
+version = "0.1.61"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613"
+checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
 dependencies = [
  "android_system_properties",
  "core-foundation-sys",
  "iana-time-zone-haiku",
  "js-sys",
  "wasm-bindgen",
- "windows",
+ "windows-core",
 ]
 
 [[package]]
@@ -450,82 +378,33 @@
 ]
 
 [[package]]
-name = "image"
-version = "0.24.7"
+name = "is_terminal_polyfill"
+version = "1.70.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f3dfdbdd72063086ff443e297b61695500514b1e41095b6fb9a5ab48a70a711"
-dependencies = [
- "bytemuck",
- "byteorder",
- "color_quant",
- "exr",
- "gif",
- "jpeg-decoder",
- "num-rational",
- "num-traits",
- "png",
- "qoi",
- "tiff",
-]
-
-[[package]]
-name = "io-lifetimes"
-version = "1.0.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
-dependencies = [
- "hermit-abi",
- "libc",
- "windows-sys",
-]
-
-[[package]]
-name = "is-terminal"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
-dependencies = [
- "hermit-abi",
- "rustix 0.38.19",
- "windows-sys",
-]
+checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
 
 [[package]]
 name = "itertools"
-version = "0.10.5"
+version = "0.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
 dependencies = [
  "either",
 ]
 
 [[package]]
 name = "itoa"
-version = "0.4.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
-
-[[package]]
-name = "itoa"
 version = "1.0.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
 
 [[package]]
-name = "jpeg-decoder"
-version = "0.3.0"
+name = "js-sys"
+version = "0.3.76"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e"
+checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7"
 dependencies = [
- "rayon",
-]
-
-[[package]]
-name = "js-sys"
-version = "0.3.64"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
-dependencies = [
+ "once_cell",
  "wasm-bindgen",
 ]
 
@@ -536,12 +415,6 @@
 checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
 
 [[package]]
-name = "lebe"
-version = "0.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8"
-
-[[package]]
 name = "libc"
 version = "0.2.149"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -555,31 +428,15 @@
 
 [[package]]
 name = "linux-raw-sys"
-version = "0.3.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
-
-[[package]]
-name = "linux-raw-sys"
 version = "0.4.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f"
 
 [[package]]
-name = "lock_api"
-version = "0.4.10"
+name = "log"
+version = "0.4.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16"
-dependencies = [
- "autocfg",
- "scopeguard",
-]
-
-[[package]]
-name = "log"
-version = "0.4.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
 
 [[package]]
 name = "matrixmultiply"
@@ -607,20 +464,10 @@
 ]
 
 [[package]]
-name = "miniz_oxide"
-version = "0.7.1"
+name = "nalgebra"
+version = "0.33.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
-dependencies = [
- "adler",
- "simd-adler32",
-]
-
-[[package]]
-name = "nalgebra"
-version = "0.31.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20bd243ab3dbb395b39ee730402d2e5405e448c75133ec49cc977762c4cba3d1"
+checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b"
 dependencies = [
  "approx",
  "matrixmultiply",
@@ -635,20 +482,20 @@
 
 [[package]]
 name = "nalgebra-macros"
-version = "0.1.0"
+version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218"
+checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 1.0.109",
+ "syn 2.0.93",
 ]
 
 [[package]]
 name = "num"
-version = "0.4.1"
+version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af"
+checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41"
 dependencies = [
  "num-bigint",
  "num-complex",
@@ -660,39 +507,37 @@
 
 [[package]]
 name = "num-bigint"
-version = "0.4.4"
+version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0"
+checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
 dependencies = [
- "autocfg",
  "num-integer",
  "num-traits",
 ]
 
 [[package]]
 name = "num-complex"
-version = "0.4.4"
+version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214"
+checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495"
 dependencies = [
  "num-traits",
 ]
 
 [[package]]
 name = "num-integer"
-version = "0.1.45"
+version = "0.1.46"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
+checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
 dependencies = [
- "autocfg",
  "num-traits",
 ]
 
 [[package]]
 name = "num-iter"
-version = "0.1.43"
+version = "0.1.45"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
+checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
 dependencies = [
  "autocfg",
  "num-integer",
@@ -713,9 +558,9 @@
 
 [[package]]
 name = "num-traits"
-version = "0.2.17"
+version = "0.2.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
 dependencies = [
  "autocfg",
  "libm",
@@ -733,15 +578,9 @@
 
 [[package]]
 name = "once_cell"
-version = "1.18.0"
+version = "1.20.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
-
-[[package]]
-name = "os_str_bytes"
-version = "6.5.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
 
 [[package]]
 name = "paste"
@@ -756,106 +595,48 @@
 checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
 
 [[package]]
-name = "png"
-version = "0.17.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd75bf2d8dd3702b9707cdbc56a5b9ef42cec752eb8b3bafc01234558442aa64"
-dependencies = [
- "bitflags 1.3.2",
- "crc32fast",
- "fdeflate",
- "flate2",
- "miniz_oxide",
-]
-
-[[package]]
 name = "pointsource_algs"
 version = "2.0.0-dev"
 dependencies = [
  "GSL",
  "alg_tools",
+ "anyhow",
  "chrono",
  "clap",
- "colorbrewer",
  "colored",
  "cpu-time",
  "float_extras",
- "image",
  "itertools",
  "nalgebra",
  "num-traits",
  "numeric_literals",
- "poloto",
  "rand",
  "rand_distr",
  "regex",
- "rgb",
  "serde",
  "serde_json",
 ]
 
 [[package]]
-name = "poloto"
-version = "3.13.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2541c28c0622b297e342444bd8b1d87b02c8478dd3ed0ecc3eee47dc4d13282"
-dependencies = [
- "tagger",
-]
-
-[[package]]
 name = "ppv-lite86"
 version = "0.2.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
 
 [[package]]
-name = "proc-macro-error"
-version = "1.0.4"
+name = "proc-macro2"
+version = "1.0.92"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
-dependencies = [
- "proc-macro-error-attr",
- "proc-macro2",
- "quote",
- "syn 1.0.109",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro-error-attr"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
-dependencies = [
- "proc-macro2",
- "quote",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.69"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
+checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
 dependencies = [
  "unicode-ident",
 ]
 
 [[package]]
-name = "qoi"
-version = "0.4.1"
+name = "quote"
+version = "1.0.38"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001"
-dependencies = [
- "bytemuck",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.33"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
+checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc"
 dependencies = [
  "proc-macro2",
 ]
@@ -908,9 +689,9 @@
 
 [[package]]
 name = "rayon"
-version = "1.8.0"
+version = "1.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
 dependencies = [
  "either",
  "rayon-core",
@@ -918,9 +699,9 @@
 
 [[package]]
 name = "rayon-core"
-version = "1.12.0"
+version = "1.12.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed"
+checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
 dependencies = [
  "crossbeam-deque",
  "crossbeam-utils",
@@ -928,9 +709,21 @@
 
 [[package]]
 name = "regex"
-version = "1.7.3"
+version = "1.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
 dependencies = [
  "aho-corasick",
  "memchr",
@@ -938,39 +731,10 @@
 ]
 
 [[package]]
-name = "regex-automata"
-version = "0.1.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
-
-[[package]]
 name = "regex-syntax"
-version = "0.6.29"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
-
-[[package]]
-name = "rgb"
-version = "0.8.36"
+version = "0.8.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59"
-dependencies = [
- "bytemuck",
-]
-
-[[package]]
-name = "rustix"
-version = "0.37.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035"
-dependencies = [
- "bitflags 1.3.2",
- "errno",
- "io-lifetimes",
- "libc",
- "linux-raw-sys 0.3.8",
- "windows-sys",
-]
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
 
 [[package]]
 name = "rustix"
@@ -978,11 +742,11 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed"
 dependencies = [
- "bitflags 2.4.0",
+ "bitflags",
  "errno",
  "libc",
- "linux-raw-sys 0.4.10",
- "windows-sys",
+ "linux-raw-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -1023,7 +787,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.38",
+ "syn 2.0.93",
 ]
 
 [[package]]
@@ -1032,16 +796,22 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
 dependencies = [
- "itoa 1.0.9",
+ "itoa",
  "ryu",
  "serde",
 ]
 
 [[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
+[[package]]
 name = "simba"
-version = "0.7.3"
+version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f3fd720c48c53cace224ae62bef1bbff363a70c68c4802a78b5cc6159618176"
+checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa"
 dependencies = [
  "approx",
  "num-complex",
@@ -1051,31 +821,10 @@
 ]
 
 [[package]]
-name = "simd-adler32"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
-
-[[package]]
-name = "smallvec"
-version = "1.11.1"
+name = "strsim"
+version = "0.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a"
-
-[[package]]
-name = "spin"
-version = "0.9.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
-dependencies = [
- "lock_api",
-]
-
-[[package]]
-name = "strsim"
-version = "0.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
 
 [[package]]
 name = "syn"
@@ -1090,9 +839,9 @@
 
 [[package]]
 name = "syn"
-version = "2.0.38"
+version = "2.0.93"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b"
+checksum = "9c786062daee0d6db1132800e623df74274a0a87322d8e183338e01b3d98d058"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -1100,50 +849,13 @@
 ]
 
 [[package]]
-name = "tagger"
-version = "4.3.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "094c9f64d6de9a8506b1e49b63a29333b37ed9e821ee04be694d431b3264c3c5"
-
-[[package]]
-name = "termcolor"
-version = "1.3.0"
+name = "terminal_size"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64"
-dependencies = [
- "winapi-util",
-]
-
-[[package]]
-name = "terminal_size"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
+checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9"
 dependencies = [
- "rustix 0.37.25",
- "windows-sys",
-]
-
-[[package]]
-name = "tiff"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d172b0f4d3fba17ba89811858b9d3d97f928aece846475bbda076ca46736211"
-dependencies = [
- "flate2",
- "jpeg-decoder",
- "weezl",
-]
-
-[[package]]
-name = "trait-set"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "875c4c873cc824e362fa9a9419ffa59807244824275a44ad06fec9684fff08f2"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
+ "rustix",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
@@ -1169,9 +881,15 @@
 
 [[package]]
 name = "unicode-width"
-version = "0.1.11"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
+checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd"
+
+[[package]]
+name = "utf8parse"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
 
 [[package]]
 name = "version_check"
@@ -1187,34 +905,34 @@
 
 [[package]]
 name = "wasm-bindgen"
-version = "0.2.87"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
+checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396"
 dependencies = [
  "cfg-if",
+ "once_cell",
  "wasm-bindgen-macro",
 ]
 
 [[package]]
 name = "wasm-bindgen-backend"
-version = "0.2.87"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
+checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79"
 dependencies = [
  "bumpalo",
  "log",
- "once_cell",
  "proc-macro2",
  "quote",
- "syn 2.0.38",
+ "syn 2.0.93",
  "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-macro"
-version = "0.2.87"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
+checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe"
 dependencies = [
  "quote",
  "wasm-bindgen-macro-support",
@@ -1222,28 +940,22 @@
 
 [[package]]
 name = "wasm-bindgen-macro-support"
-version = "0.2.87"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
+checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.38",
+ "syn 2.0.93",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-shared"
-version = "0.2.87"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
-
-[[package]]
-name = "weezl"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb"
+checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6"
 
 [[package]]
 name = "wide"
@@ -1272,27 +984,18 @@
 checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 
 [[package]]
-name = "winapi-util"
-version = "0.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
-dependencies = [
- "winapi",
-]
-
-[[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
-name = "windows"
-version = "0.48.0"
+name = "windows-core"
+version = "0.52.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f"
+checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
 dependencies = [
- "windows-targets",
+ "windows-targets 0.52.6",
 ]
 
 [[package]]
@@ -1301,7 +1004,16 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
 dependencies = [
- "windows-targets",
+ "windows-targets 0.48.5",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets 0.52.6",
 ]
 
 [[package]]
@@ -1310,13 +1022,29 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
 dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
+ "windows_aarch64_gnullvm 0.48.5",
+ "windows_aarch64_msvc 0.48.5",
+ "windows_i686_gnu 0.48.5",
+ "windows_i686_msvc 0.48.5",
+ "windows_x86_64_gnu 0.48.5",
+ "windows_x86_64_gnullvm 0.48.5",
+ "windows_x86_64_msvc 0.48.5",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.6",
+ "windows_aarch64_msvc 0.52.6",
+ "windows_i686_gnu 0.52.6",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.6",
+ "windows_x86_64_gnu 0.52.6",
+ "windows_x86_64_gnullvm 0.52.6",
+ "windows_x86_64_msvc 0.52.6",
 ]
 
 [[package]]
@@ -1326,46 +1054,85 @@
 checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
 
 [[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
 name = "windows_aarch64_msvc"
 version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
 
 [[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
 name = "windows_i686_gnu"
 version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
 
 [[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
 name = "windows_i686_msvc"
 version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
 
 [[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
 name = "windows_x86_64_gnu"
 version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
 
 [[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
 name = "windows_x86_64_gnullvm"
 version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
 
 [[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
 name = "windows_x86_64_msvc"
 version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
 
 [[package]]
-name = "zune-inflate"
-version = "0.2.54"
+name = "windows_x86_64_msvc"
+version = "0.52.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02"
-dependencies = [
- "simd-adler32",
-]
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
--- a/Cargo.toml	Thu Aug 29 00:00:00 2024 -0500
+++ b/Cargo.toml	Tue Dec 31 09:25:45 2024 -0500
@@ -21,29 +21,31 @@
 ]
 categories = ["mathematics", "science", "computer-vision"]
 
+[dependencies.alg_tools]
+version = "~0.3.0-dev"
+path = "../alg_tools"
+default-features =  false
+features = ["nightly"]
+
 [dependencies]
-alg_tools = { version = "~0.2.0-dev", path = "../alg_tools", default-features = false }
 serde = { version = "1.0", features = ["derive"] }
 num-traits = { version = "~0.2.14", features = ["std"] }
 rand = "~0.8.5"
-colored = "~2.0.0"
+colored = "~2.1.0"
 rand_distr = "~0.4.3"
-nalgebra = { version = "~0.31.0", features = ["rand-no-std"] }
-itertools = "~0.10.3" 
+nalgebra = { version = "~0.33.0", features = ["rand-no-std"] }
+itertools = "~0.13.0"
 numeric_literals = "~0.2.0"
-poloto = "~3.13.1"
-GSL = "~6.0.0"
+GSL = "~7.0.0"
 float_extras = "~0.1.6"
-clap = { version = "~4.0.27", features = ["derive", "unicode", "wrap_help"] }
-image = "~0.24.3"
+clap = { version = "~4.5.0", features = ["derive", "unicode", "wrap_help"] }
 cpu-time = "~1.0.0"
-colorbrewer = "~0.2.0"
-rgb = "~0.8.33"
 serde_json = "~1.0.85"
 chrono = { version = "~0.4.23", features = ["alloc", "std", "serde"] }
+anyhow = "1.0.95"
 
 [build-dependencies]
-regex = "~1.7.0"
+regex = "~1.11.0"
 
 [profile.release]
 debug = true
--- a/README.md	Thu Aug 29 00:00:00 2024 -0500
+++ b/README.md	Tue Dec 31 09:25:45 2024 -0500
@@ -52,35 +52,29 @@
 
 ### Building and running the experiments
 
-To compile the code and run the experiments in the manuscript, use
+To compile and install the program, use
+```console
+cargo install --path=.
+```
+When doing this for the first time, several dependencies will be downloaded.
+Now you can run the default experiment with
+```
+pointsource_algs -o results
+```
+The `-o results` option tells `pointsource_algs` to write results in the
+`results` directory. The option is required.
+
+Alternatively, you may build and run the program without installing with
 ```console
 cargo run --release -- -o results
 ```
-When doing this for the first time, several dependencies will be downloaded.
-The double-dash (`--`) separates the arguments of Cargo and this software,
-`pointsource_algs`. The `--release` option to Cargo is required for `rustc` to
-build optimised high performance code. Without that flag the performance will
-be significantly worse. The `-o results` option tells `pointsource_algs` to
-write results in the `results` directory. The option is required.
-
-Alternatively, you may build the executable with
-```console
-cargo build --release
-```
-and then run it with
-```
-target/release/pointsource_algs -o results
-```
+The double-dash separates the options for the Cargo build system 
+and `pointsource_algs`.
 
 ### Documentation
 
 Use the `--help` option to get an extensive listing of command line options to
-customise algorithm parameters and the experiments performed. As above with
-`-o`, if using `cargo` to run the executable, you have to pass any arguments
-to `pointsource_algs` after a double-dash:
-```console
-cargo run --release -- --help
-```
+customise algorithm parameters and the experiments performed.
 
 ## Internals
 
--- a/src/dataterm.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/dataterm.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -4,25 +4,26 @@
 
 use numeric_literals::replace_float_literals;
 
-use alg_tools::loc::Loc;
 use alg_tools::euclidean::Euclidean;
 use alg_tools::linops::GEMV;
 pub use alg_tools::norms::L1;
 use alg_tools::norms::Norm;
+use alg_tools::instance::{Instance, Space};
 
 use crate::types::*;
 pub use crate::types::L2Squared;
-use crate::measures::DiscreteMeasure;
+use crate::measures::RNDM;
 
 /// Calculates the residual $Aμ-b$.
 #[replace_float_literals(F::cast_from(literal))]
 pub(crate) fn calculate_residual<
+    X : Space,
+    I : Instance<X>,
     F : Float,
     V : Euclidean<F> + Clone,
-    A : GEMV<F, DiscreteMeasure<Loc<F, N>, F>, Codomain = V>,
-    const N : usize
+    A : GEMV<F, X, Codomain = V>,
 >(
-    μ : &DiscreteMeasure<Loc<F, N>, F>,
+    μ : I,
     opA : &A,
     b : &V
 ) -> V {
@@ -35,12 +36,14 @@
 #[replace_float_literals(F::cast_from(literal))]
 pub(crate) fn calculate_residual2<
     F : Float,
+    X : Space,
+    I : Instance<X>,
+    J : Instance<X>,
     V : Euclidean<F> + Clone,
-    A : GEMV<F, DiscreteMeasure<Loc<F, N>, F>, Codomain = V>,
-    const N : usize
+    A : GEMV<F, X, Codomain = V>,
 >(
-    μ : &DiscreteMeasure<Loc<F, N>, F>,
-    μ_delta : &DiscreteMeasure<Loc<F, N>, F>,
+    μ : I,
+    μ_delta : J,
     opA : &A,
     b : &V
 ) -> V {
@@ -58,14 +61,17 @@
     fn calculate_fit(&self, _residual : &V) -> F;
 
     /// Calculates $F(Aμ-b)$, where $F$ is the data fidelity.
-    fn calculate_fit_op<A : GEMV<F, DiscreteMeasure<Loc<F, N>, F>, Codomain = V>>(
+    fn calculate_fit_op<I, A : GEMV<F, RNDM<F, N>, Codomain = V>>(
         &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : I,
         opA : &A,
         b : &V
     ) -> F
-    where V : Euclidean<F> + Clone {
-        let r = calculate_residual(&μ, opA, b);
+    where
+        V : Euclidean<F> + Clone,
+        I : Instance<RNDM<F, N>>,
+    {
+        let r = calculate_residual(μ, opA, b);
         self.calculate_fit(&r)
     }
 }
--- a/src/experiments.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/experiments.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -21,6 +21,7 @@
 use crate::run::{
     RunnableExperiment,
     ExperimentV2,
+    ExperimentBiased,
     Named,
     DefaultAlgorithm,
     AlgorithmConfig
@@ -28,6 +29,10 @@
 //use crate::fb::FBGenericConfig;
 use crate::rand_distr::{SerializableNormal, SaltAndPepper};
 use crate::regularisation::Regularisation;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::instance::Instance;
+use alg_tools::mapping::Mapping;
+use alg_tools::operator_arithmetic::{MappingSum, Weighted};
 
 /// Experiments shorthands, to be used with the command line parser
 
@@ -58,6 +63,9 @@
     /// Two dimensions, “fast” spread, 1-norm data fidelity
     #[clap(name = "2d_l1_fast")]
     Experiment2D_L1_Fast,
+     /// One dimension, “fast” spread, 2-norm-squared data fidelity with extra TV-regularised bias
+    #[clap(name = "1d_tv_fast")]
+    Experiment1D_TV_Fast,
 }
 
 macro_rules! make_float_constant {
@@ -92,6 +100,25 @@
     ([0.30, 0.70], 5.0)
 ];
 
+/// The $\{0,1\}$-valued characteristic function of a ball as a [`Mapping`].
+#[derive(Debug,Copy,Clone,Serialize,PartialEq)]
+struct BallCharacteristic<F : Float, const N : usize> {
+    pub center : Loc<F, N>,
+    pub radius : F,
+}
+
+impl<F : Float, const N : usize> Mapping<Loc<F, N>> for BallCharacteristic<F, N> {
+    type Codomain =F;
+
+    fn apply<I : Instance<Loc<F, N>>>(&self, i : I) -> F {
+        if self.center.dist2(i) <= self.radius {
+            F::ONE
+        } else {
+            F::ZERO
+        }
+    }
+}
+
 //#[replace_float_literals(F::cast_from(literal))]
 impl DefaultExperiment {
     /// Convert the experiment shorthand into a runnable experiment configuration.
@@ -115,6 +142,7 @@
         make_float_constant!(Variance1 = 0.05.powi(2));
         make_float_constant!(CutOff1 = 0.15);
         make_float_constant!(Hat1 = 0.16);
+        make_float_constant!(HatBias = 0.05);
 
         // We use a different step length for PDPS in 2D experiments
         let pdps_2d = || {
@@ -294,6 +322,30 @@
                     ]),
                 }})
             },
+            Experiment1D_TV_Fast => {
+                let base_spread = HatConv { radius : HatBias };
+                Box::new(Named { name, data : ExperimentBiased {
+                    λ : 0.02,
+                    bias : MappingSum::new([
+                        Weighted::new(1.0, BallCharacteristic{ center : 0.3.into(), radius : 0.2 }),
+                        Weighted::new(0.5, BallCharacteristic{ center : 0.6.into(), radius : 0.3 }),
+                    ]),
+                    base : ExperimentV2 {
+                        domain : [[0.0, 1.0]].into(),
+                        sensor_count : [N_SENSORS_1D],
+                        regularisation : Regularisation::NonnegRadon(cli.alpha.unwrap_or(0.2)),
+                        noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.1))?,
+                        dataterm : DataTerm::L2Squared,
+                        μ_hat : MU_TRUE_1D_BASIC.into(),
+                        sensor : BallIndicator { r : SensorWidth1D, exponent : Linfinity },
+                        spread : base_spread,
+                        kernel : base_spread,
+                        kernel_plot_width,
+                        noise_seed,
+                        algorithm_defaults: HashMap::new(),
+                    },
+                }})
+            },
         })
     }
 }
--- a/src/fb.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/fb.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -6,10 +6,7 @@
  * Valkonen T. - _Proximal methods for point source localisation_,
    [arXiv:2212.02991](https://arxiv.org/abs/2212.02991).
 
-The main routine is [`pointsource_fb_reg`]. It is based on [`generic_pointsource_fb_reg`], which is
-also used by our [primal-dual proximal splitting][crate::pdps] implementation.
-
-FISTA-type inertia can also be enabled through [`FBConfig::meta`].
+The main routine is [`pointsource_fb_reg`].
 
 ## Problem
 
@@ -76,7 +73,7 @@
 $$
 </p>
 
-We solve this with either SSN or FB via [`quadratic_nonneg`] as determined by
+We solve this with either SSN or FB as determined by
 [`InnerSettings`] in [`FBGenericConfig::inner`].
 */
 
@@ -87,10 +84,11 @@
 
 use alg_tools::iterate::{
     AlgIteratorFactory,
-    AlgIteratorState,
+    AlgIteratorIteration,
+    AlgIterator,
 };
 use alg_tools::euclidean::Euclidean;
-use alg_tools::linops::{Apply, GEMV};
+use alg_tools::linops::{Mapping, GEMV};
 use alg_tools::sets::Cube;
 use alg_tools::loc::Loc;
 use alg_tools::bisection_tree::{
@@ -107,17 +105,24 @@
 };
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::instance::Instance;
+use alg_tools::norms::Linfinity;
 
 use crate::types::*;
 use crate::measures::{
     DiscreteMeasure,
+    RNDM,
     DeltaMeasure,
+    Radon,
 };
 use crate::measures::merging::{
     SpikeMergingMethod,
     SpikeMerging,
 };
-use crate::forward_model::ForwardModel;
+use crate::forward_model::{
+    ForwardModel,
+    AdjointProductBoundedBy
+};
 use crate::seminorms::DiscreteMeasureOp;
 use crate::subproblem::{
     InnerSettings,
@@ -146,8 +151,7 @@
     pub generic : FBGenericConfig<F>,
 }
 
-/// Settings for the solution of the stepwise optimality condition in algorithms based on
-/// [`generic_pointsource_fb_reg`].
+/// Settings for the solution of the stepwise optimality condition.
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
 pub struct FBGenericConfig<F : Float> {
@@ -188,8 +192,8 @@
     /// Iterations between merging heuristic tries
     pub merge_every : usize,
 
-    /// Save $μ$ for postprocessing optimisation
-    pub postprocessing : bool
+    // /// Save $μ$ for postprocessing optimisation
+    // pub postprocessing : bool
 }
 
 #[replace_float_literals(F::cast_from(literal))]
@@ -221,29 +225,36 @@
             final_merging : Default::default(),
             merge_every : 10,
             merge_tolerance_mult : 2.0,
-            postprocessing : false,
+            // postprocessing : false,
         }
     }
 }
 
+impl<F : Float> FBGenericConfig<F> {
+    /// Check if merging should be attempted this iteration
+    pub fn merge_now<I : AlgIterator>(&self, state : &AlgIteratorIteration<I>) -> bool {
+        state.iteration() % self.merge_every == 0
+    }
+}
+
 /// TODO: document.
 /// `μ_base + ν_delta` is the base point, where `μ` and `μ_base` are assumed to have the same spike
 /// locations, while `ν_delta` may have different locations.
 #[replace_float_literals(F::cast_from(literal))]
 pub(crate) fn insert_and_reweigh<
-    'a, F, GA, 𝒟, BTA, G𝒟, S, K, Reg, State, const N : usize
+    'a, F, GA, 𝒟, BTA, G𝒟, S, K, Reg, I, const N : usize
 >(
-    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-    minus_τv : &BTFN<F, GA, BTA, N>,
-    μ_base : &DiscreteMeasure<Loc<F, N>, F>,
-    ν_delta: Option<&DiscreteMeasure<Loc<F, N>, F>>,
+    μ : &mut RNDM<F, N>,
+    τv : &BTFN<F, GA, BTA, N>,
+    μ_base : &RNDM<F, N>,
+    ν_delta: Option<&RNDM<F, N>>,
     op𝒟 : &'a 𝒟,
     op𝒟norm : F,
     τ : F,
     ε : F,
     config : &FBGenericConfig<F>,
     reg : &Reg,
-    state : &State,
+    state : &AlgIteratorIteration<I>,
     stats : &mut IterInfo<F, N>,
 ) -> (BTFN<F, BothGenerators<GA, G𝒟>, BTA, N>, bool)
 where F : Float + ToNalgebraRealField,
@@ -255,9 +266,8 @@
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
       Reg : RegTerm<F, N>,
-      State : AlgIteratorState {
+      I : AlgIterator {
 
     // Maximum insertion count and measure difference calculation depend on insertion style.
     let (max_insertions, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) {
@@ -265,11 +275,10 @@
         _ => (config.max_insertions, !state.is_quiet()),
     };
 
-    // TODO: should avoid a copy of μ_base here.
-    let ω0 = op𝒟.apply(match ν_delta {
-        None => μ_base.clone(),
-        Some(ν_d) => &*μ_base + ν_d,
-    });
+    let ω0 = match ν_delta {
+        None => op𝒟.apply(μ_base),
+        Some(ν) => op𝒟.apply(μ_base + ν),
+    };
 
     // Add points to support until within error tolerance or maximum insertion count reached.
     let mut count = 0;
@@ -277,10 +286,12 @@
         if μ.len() > 0 {
             // Form finite-dimensional subproblem. The subproblem references to the original μ^k
             // from the beginning of the iteration are all contained in the immutable c and g.
+            // TODO: observe negation of -τv after switch from minus_τv: finite-dimensional
+            // problems have not yet been updated to sign change.
             let à = op𝒟.findim_matrix(μ.iter_locations());
             let g̃ = DVector::from_iterator(μ.len(),
                                            μ.iter_locations()
-                                            .map(|ζ| minus_τv.apply(ζ) + ω0.apply(ζ))
+                                            .map(|ζ| ω0.apply(ζ) - τv.apply(ζ))
                                             .map(F::to_nalgebra_mixed));
             let mut x = μ.masses_dvector();
 
@@ -298,12 +309,12 @@
             μ.set_masses_dvector(&x);
         }
 
-        // Form d = ω0 - τv - 𝒟μ = -𝒟(μ - μ^k) - τv for checking the proximate optimality
+        // Form d = τv + 𝒟μ - ω0 = τv + 𝒟(μ - μ^k) for checking the proximate optimality
         // conditions in the predual space, and finding new points for insertion, if necessary.
-        let mut d = minus_τv + op𝒟.preapply(match ν_delta {
-            None => μ_base.sub_matching(μ),
-            Some(ν) =>  μ_base.sub_matching(μ) + ν
-        });
+        let mut d = τv + match ν_delta {
+            None => op𝒟.preapply(μ.sub_matching(μ_base)),
+            Some(ν) => op𝒟.preapply(μ.sub_matching(μ_base) - ν)
+        };
 
         // If no merging heuristic is used, let's be more conservative about spike insertion,
         // and skip it after first round. If merging is done, being more greedy about spike
@@ -330,11 +341,9 @@
         // No point in optimising the weight here; the finite-dimensional algorithm is fast.
         *μ += DeltaMeasure { x : ξ, α : 0.0 };
         count += 1;
+        stats.inserted += 1;
     };
 
-    // TODO: should redo everything if some transports cause a problem.
-    // Maybe implementation should call above loop as a closure.
-
     if !within_tolerances && warn_insertions {
         // Complain (but continue) if we failed to get within tolerances
         // by inserting more points.
@@ -346,61 +355,33 @@
     (d, within_tolerances)
 }
 
-#[replace_float_literals(F::cast_from(literal))]
-pub(crate) fn prune_and_maybe_simple_merge<
-    'a, F, GA, 𝒟, BTA, G𝒟, S, K, Reg, State, const N : usize
->(
-    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-    minus_τv : &BTFN<F, GA, BTA, N>,
-    μ_base : &DiscreteMeasure<Loc<F, N>, F>,
-    op𝒟 : &'a 𝒟,
-    τ : F,
-    ε : F,
-    config : &FBGenericConfig<F>,
-    reg : &Reg,
-    state : &State,
-    stats : &mut IterInfo<F, N>,
-)
-where F : Float + ToNalgebraRealField,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      Reg : RegTerm<F, N>,
-      State : AlgIteratorState {
-    if state.iteration() % config.merge_every == 0 {
-        stats.merged += μ.merge_spikes(config.merging, |μ_candidate| {
-            let mut d = minus_τv + op𝒟.preapply(μ_base.sub_matching(&μ_candidate));
-            reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, &config)
-        });
-    }
-
+pub(crate) fn prune_with_stats<F : Float, const N : usize>(
+    μ : &mut RNDM<F, N>,
+) -> usize {
     let n_before_prune = μ.len();
     μ.prune();
     debug_assert!(μ.len() <= n_before_prune);
-    stats.pruned += n_before_prune - μ.len();
+    n_before_prune - μ.len()
 }
 
 #[replace_float_literals(F::cast_from(literal))]
 pub(crate) fn postprocess<
     F : Float,
     V : Euclidean<F> + Clone,
-    A : GEMV<F, DiscreteMeasure<Loc<F, N>, F>, Codomain = V>,
+    A : GEMV<F, RNDM<F, N>, Codomain = V>,
     D : DataTerm<F, V, N>,
     const N : usize
 > (
-    mut μ : DiscreteMeasure<Loc<F, N>, F>,
+    mut μ : RNDM<F, N>,
     config : &FBGenericConfig<F>,
     dataterm : D,
     opA : &A,
     b : &V,
-) -> DiscreteMeasure<Loc<F, N>, F>
-where DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+) -> RNDM<F, N>
+where
+    RNDM<F, N> : SpikeMerging<F>,
+    for<'a> &'a RNDM<F, N> : Instance<RNDM<F, N>>,
+{
     μ.merge_spikes_fitness(config.merging,
                            |μ̃| dataterm.calculate_fit_op(μ̃, opA, b),
                            |&v| v);
@@ -437,15 +418,13 @@
     fbconfig : &FBConfig<F>,
     iterator : I,
     mut plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
       for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<&'a 𝒟, FloatType=F>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
+          + AdjointProductBoundedBy<RNDM<F, N>, 𝒟, FloatType=F>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
       𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
@@ -455,13 +434,13 @@
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       Cube<F, N>: P2Minimise<Loc<F, N>, F>,
       PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : RegTerm<F, N> {
 
     // Set up parameters
     let config = &fbconfig.generic;
-    let op𝒟norm = op𝒟.opnorm_bound();
-    let τ = fbconfig.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
+    let op𝒟norm = op𝒟.opnorm_bound(Radon, Linfinity);
+    let τ = fbconfig.τ0/opA.adjoint_product_bound(&op𝒟).unwrap();
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
     let tolerance = config.tolerance * τ * reg.tolerance_scaling();
@@ -470,66 +449,59 @@
     // Initialise iterates
     let mut μ = DiscreteMeasure::new();
     let mut residual = -b;
+
+    // Statistics
+    let full_stats = |residual : &A::Observable,
+                      μ : &RNDM<F, N>,
+                      ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + reg.apply(μ),
+        n_spikes : μ.len(),
+        ε,
+        //postprocessing: config.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
     let mut stats = IterInfo::new();
 
     // Run the algorithm
-    iterator.iterate(|state| {
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) {
         // Calculate smooth part of surrogate model.
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        residual *= -τ;
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let minus_τv = opA.preadjoint().apply(r);
+        let τv = opA.preadjoint().apply(residual * τ);
 
         // Save current base point
         let μ_base = μ.clone();
             
         // Insert and reweigh
-        let (d, within_tolerances) = insert_and_reweigh(
-            &mut μ, &minus_τv, &μ_base, None,
+        let (d, _within_tolerances) = insert_and_reweigh(
+            &mut μ, &τv, &μ_base, None,
             op𝒟, op𝒟norm,
             τ, ε,
-            config, &reg, state, &mut stats
+            config, &reg, &state, &mut stats
         );
 
         // Prune and possibly merge spikes
-        prune_and_maybe_simple_merge(
-            &mut μ, &minus_τv, &μ_base,
-            op𝒟,
-            τ, ε,
-            config, &reg, state, &mut stats
-        );
+        if config.merge_now(&state) {
+            stats.merged += μ.merge_spikes(config.merging, |μ_candidate| {
+                let mut d = &τv + op𝒟.preapply(μ_candidate.sub_matching(&μ_base));
+                reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, &config)
+            });
+        }
+        stats.pruned += prune_with_stats(&mut μ);
 
         // Update residual
         residual = calculate_residual(&μ, opA, b);
 
-        // Update main tolerance for next iteration
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
+        let iter = state.iteration();
         stats.this_iters += 1;
 
-        // Give function value if needed
+        // Give statistics if needed
         state.if_verbose(|| {
-            // Plot if so requested
-            plotter.plot_spikes(
-                format!("iter {} end; {}", state.iteration(), within_tolerances), &d,
-                "start".to_string(), Some(&minus_τv),
-                reg.target_bounds(τ, ε_prev), &μ,
-            );
-            // Calculate mean inner iterations and reset relevant counters.
-            // Return the statistics
-            let res = IterInfo {
-                value : residual.norm2_squared_div2() + reg.apply(&μ),
-                n_spikes : μ.len(),
-                ε : ε_prev,
-                postprocessing: config.postprocessing.then(|| μ.clone()),
-                .. stats
-            };
-            stats = IterInfo::new();
-            res
-        })
-    });
+            plotter.plot_spikes(iter, Some(&d), Some(&τv), &μ);
+            full_stats(&residual, &μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+        
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
 
     postprocess(μ, config, L2Squared, opA, b)
 }
@@ -563,15 +535,13 @@
     fbconfig : &FBConfig<F>,
     iterator : I,
     mut plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
       for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<&'a 𝒟, FloatType=F>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
+          + AdjointProductBoundedBy<RNDM<F, N>, 𝒟, FloatType=F>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
       𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
@@ -581,13 +551,13 @@
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       Cube<F, N>: P2Minimise<Loc<F, N>, F>,
       PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : RegTerm<F, N> {
 
     // Set up parameters
     let config = &fbconfig.generic;
-    let op𝒟norm = op𝒟.opnorm_bound();
-    let τ = fbconfig.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
+    let op𝒟norm = op𝒟.opnorm_bound(Radon, Linfinity);
+    let τ = fbconfig.τ0/opA.adjoint_product_bound(&op𝒟).unwrap();
     let mut λ = 1.0;
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
@@ -598,32 +568,36 @@
     let mut μ = DiscreteMeasure::new();
     let mut μ_prev = DiscreteMeasure::new();
     let mut residual = -b;
+    let mut warned_merging = false;
+
+    // Statistics
+    let full_stats = |ν : &RNDM<F, N>, ε, stats| IterInfo {
+        value : L2Squared.calculate_fit_op(ν, opA, b) + reg.apply(ν),
+        n_spikes : ν.len(),
+        ε,
+        // postprocessing: config.postprocessing.then(|| ν.clone()),
+        .. stats
+    };
     let mut stats = IterInfo::new();
-    let mut warned_merging = false;
 
     // Run the algorithm
-    iterator.iterate(|state| {
+    for state in iterator.iter_init(|| full_stats(&μ, ε, stats.clone())) {
         // Calculate smooth part of surrogate model.
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        residual *= -τ;
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let minus_τv = opA.preadjoint().apply(r);
+        let τv = opA.preadjoint().apply(residual * τ);
 
         // Save current base point
         let μ_base = μ.clone();
             
         // Insert new spikes and reweigh
-        let (d, within_tolerances) = insert_and_reweigh(
-            &mut μ, &minus_τv, &μ_base, None,
+        let (d, _within_tolerances) = insert_and_reweigh(
+            &mut μ, &τv, &μ_base, None,
             op𝒟, op𝒟norm,
             τ, ε,
-            config, &reg, state, &mut stats
+            config, &reg, &state, &mut stats
         );
 
         // (Do not) merge spikes.
-        if state.iteration() % config.merge_every == 0 {
+        if config.merge_now(&state) {
             match config.merging {
                 SpikeMergingMethod::None => { },
                 _ => if !warned_merging {
@@ -653,32 +627,18 @@
         // Update residual
         residual = calculate_residual(&μ, opA, b);
 
-        // Update main tolerance for next iteration
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
+        let iter = state.iteration();
         stats.this_iters += 1;
 
-        // Give function value if needed
+        // Give statistics if needed
         state.if_verbose(|| {
-            // Plot if so requested
-            plotter.plot_spikes(
-                format!("iter {} end; {}", state.iteration(), within_tolerances), &d,
-                "start".to_string(), Some(&minus_τv),
-                reg.target_bounds(τ, ε_prev), &μ_prev,
-            );
-            // Calculate mean inner iterations and reset relevant counters.
-            // Return the statistics
-            let res = IterInfo {
-                value : L2Squared.calculate_fit_op(&μ_prev, opA, b) + reg.apply(&μ_prev),
-                n_spikes : μ_prev.len(),
-                ε : ε_prev,
-                postprocessing: config.postprocessing.then(|| μ_prev.clone()),
-                .. stats
-            };
-            stats = IterInfo::new();
-            res
-        })
-    });
+            plotter.plot_spikes(iter, Some(&d), Some(&τv), &μ_prev);
+            full_stats(&μ_prev, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
 
     postprocess(μ_prev, config, L2Squared, opA, b)
 }
--- a/src/forward_model.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/forward_model.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -2,705 +2,71 @@
 Forward models from discrete measures to observations.
 */
 
-use numeric_literals::replace_float_literals;
-use nalgebra::base::{
-    DMatrix,
-    DVector
-};
-use std::iter::Zip;
-use std::ops::RangeFrom;
-use std::marker::PhantomData;
-
 pub use alg_tools::linops::*;
 use alg_tools::euclidean::Euclidean;
-use alg_tools::norms::{
-    L1, Linfinity, L2, Norm
-};
-use alg_tools::bisection_tree::*;
-use alg_tools::mapping::RealMapping;
-use alg_tools::lingrid::*;
-use alg_tools::iter::{MapX, Mappable};
-use alg_tools::nalgebra_support::ToNalgebraRealField;
-use alg_tools::tabledump::write_csv;
 use alg_tools::error::DynError;
-use alg_tools::maputil::map2;
+use alg_tools::instance::Instance;
+use alg_tools::norms::{NormExponent, L2, Norm};
 
 use crate::types::*;
-use crate::measures::*;
-use crate::seminorms::{
-    ConvolutionOp,
-    SimpleConvolutionKernel,
-};
-use crate::kernels::{
-    Convolution,
-    AutoConvolution,
-    BoundedBy,
-};
-use crate::types::L2Squared;
-use crate::transport::TransportLipschitz;
-
-pub type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F,N>, F>;
+use crate::measures::Radon;
+pub mod sensor_grid;
+pub mod bias;
 
 /// `ForwardeModel`s are bounded preadjointable linear operators  $A ∈ 𝕃(𝒵(Ω); E)$
 /// where $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by
-/// [`DiscreteMeasure`], and $E$ is a [`Euclidean`] space.
-pub trait ForwardModel<Domain, F : Float + ToNalgebraRealField>
-: BoundedLinear<DiscreteMeasure<Domain, F>, Codomain=Self::Observable, FloatType=F>
-+ GEMV<F, DiscreteMeasure<Domain, F>, Self::Observable>
-+ Linear<DeltaMeasure<Domain, F>, Codomain=Self::Observable>
-+ Preadjointable<DiscreteMeasure<Domain, F>, Self::Observable> {
+/// [`crate::measures::DiscreteMeasure`], and $E$ is a [`Euclidean`] space.
+pub trait ForwardModel<Domain : Space, F : Float = f64, E : NormExponent = Radon>
+    : BoundedLinear<Domain, E, L2, F, Codomain=Self::Observable>
+    + GEMV<F, Domain, Self::Observable>
+    + Preadjointable<Domain, Self::Observable>
+where
+    for<'a> Self::Observable : Instance<Self::Observable>,
+    Domain : Norm<F, E>,
+{
     /// The codomain or value space (of “observables”) for this operator.
     /// It is assumed to be a [`Euclidean`] space, and therefore also (identified with)
     /// the domain of the preadjoint.
     type Observable : Euclidean<F, Output=Self::Observable>
                       + AXPY<F>
+                      + Space
                       + Clone;
 
-    /// Return A_*A and A_* b
-    fn findim_quadratic_model(
-        &self,
-        μ : &DiscreteMeasure<Domain, F>,
-        b : &Self::Observable
-    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>);
-
     /// Write an observable into a file.
     fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError;
 
     /// Returns a zero observable
     fn zero_observable(&self) -> Self::Observable;
-
-    /// Returns an empty (uninitialised) observable.
-    ///
-    /// This is used as a placeholder for temporary [`std::mem::replace`] move operations.
-    fn empty_observable(&self) -> Self::Observable;
-}
-
-pub type ShiftedSensor<F, S, P, const N : usize> = Shift<Convolution<S, P>, F, N>;
-
-/// Trait for physical convolution models. Has blanket implementation for all cases.
-pub trait Spread<F : Float, const N : usize>
-: 'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F> {}
-
-impl<F, T, const N : usize> Spread<F, N> for T
-where F : Float,
-      T : 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N> {}
-
-/// Trait for compactly supported sensors. Has blanket implementation for all cases.
-pub trait Sensor<F : Float, const N : usize> : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
-
-impl<F, T, const N : usize> Sensor<F, N> for T
-where F : Float,
-      T : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
-
-
-pub trait SensorGridBT<F, S, P, const N : usize> :
-Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N> {}
-
-impl<F, S, P, T, const N : usize>
-SensorGridBT<F, S, P, N>
-for T
-where T : Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>,
-      F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N> {}
-
-// We need type alias bounds to access associated types
-#[allow(type_alias_bounds)]
-type SensorGridBTFN<F, S, P, BT : SensorGridBT<F, S, P, N>, const N : usize>
-= BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
-
-/// Sensor grid forward model
-#[derive(Clone)]
-pub struct SensorGrid<F, S, P, BT, const N : usize>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      BT : SensorGridBT<F, S, P, N>, {
-    domain : Cube<F, N>,
-    sensor_count : [usize; N],
-    sensor : S,
-    spread : P,
-    base_sensor : Convolution<S, P>,
-    bt : BT,
 }
 
-impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    pub fn new(
-        domain : Cube<F, N>,
-        sensor_count : [usize; N],
-        sensor : S,
-        spread : P,
-        depth : BT::Depth
-    ) -> Self {
-        let base_sensor = Convolution(sensor.clone(), spread.clone());
-        let bt = BT::new(domain, depth);
-        let mut sensorgrid = SensorGrid {
-            domain,
-            sensor_count,
-            sensor,
-            spread,
-            base_sensor,
-            bt,
-        };
-
-        for (x, id) in sensorgrid.grid().into_iter().zip(0usize..) {
-            let s = sensorgrid.shifted_sensor(x);
-            sensorgrid.bt.insert(id, &s);
-        }
-
-        sensorgrid
-    }
-
-    pub fn grid(&self) -> LinGrid<F, N> {
-        lingrid_centered(&self.domain, &self.sensor_count)
-    }
-
-    pub fn n_sensors(&self) -> usize {
-        self.sensor_count.iter().product()
-    }
-
-    #[inline]
-    fn shifted_sensor(&self, x : Loc<F, N>) -> ShiftedSensor<F, S, P, N> {
-        self.base_sensor.clone().shift(x)
-    }
-
-    #[inline]
-    fn _zero_observable(&self) -> DVector<F> {
-        DVector::zeros(self.n_sensors())
-    }
-}
-
-impl<F, S, P, BT, const N : usize> Apply<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    #[inline]
-    fn apply(&self, μ : RNDM<F, N>) -> DVector<F> {
-        self.apply(&μ)
-    }
+/// Trait for operators $A$ for which $A_*A$ is bounded by some other operator.
+pub trait AdjointProductBoundedBy<Domain : Space, D> : Linear<Domain> {
+    type FloatType : Float;
+    /// Return $L$ such that $A_*A ≤ LD$.
+    fn adjoint_product_bound(&self, other : &D) -> Option<Self::FloatType>;
 }
 
-impl<'a, F, S, P, BT, const N : usize> Apply<&'a RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    fn apply(&self, μ : &'a RNDM<F, N>) ->  DVector<F> {
-        let mut res = self._zero_observable();
-        self.apply_add(&mut res, μ);
-        res
-    }
-}
-
-impl<F, S, P, BT, const N : usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-    type Codomain = DVector<F>;
-}
-
-
-#[replace_float_literals(F::cast_from(literal))]
-impl<F, S, P, BT, const N : usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    fn gemv(&self, y : &mut DVector<F>, α : F, μ : &RNDM<F, N>, β : F) {
-        let grid = self.grid();
-        if β == 0.0 {
-            y.fill(0.0)
-        } else if β != 1.0 {
-            *y *= β; // Need to multiply first, as we have to be able to add to y.
-        }
-        if α == 1.0 {
-            self.apply_add(y, μ)
-        } else {
-            for δ in μ.iter_spikes() {
-                for &d in self.bt.iter_at(&δ.x) {
-                    let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-                    y[d] += sensor.apply(&δ.x) * (α * δ.α);
-                }
-            }
-        }
-    }
-
-    fn apply_add(&self, y : &mut DVector<F>, μ : &RNDM<F, N>) {
-        let grid = self.grid();
-        for δ in μ.iter_spikes() {
-            for &d in self.bt.iter_at(&δ.x) {
-                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-                y[d] += sensor.apply(&δ.x) * δ.α;
-            }
-        }
-    }
-
+/// Trait for operators $A$ for which $A_*A$ is bounded by a diagonal operator.
+pub trait AdjointProductPairBoundedBy<Domain : Space, D1, D2> : Linear<Domain> {
+    type FloatType : Float;
+    /// Return $(L, L_z)$ such that $A_*A ≤ (L_1 D_1, L_2 D_2)$.
+    fn adjoint_product_pair_bound(&self, other1 : &D1, other_2 : &D2)
+        -> Option<(Self::FloatType, Self::FloatType)>;
 }
 
-impl<F, S, P, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    #[inline]
-    fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> DVector<F> {
-        self.apply(&δ)
+/// Trait for [`ForwardModel`]s whose preadjoint has Lipschitz values.
+pub trait LipschitzValues {
+    type FloatType : Float;
+    /// Return (if one exists) a factor $L$ such that $A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        None
     }
-}
-
-impl<'a, F, S, P, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    fn apply(&self, δ : &DeltaMeasure<Loc<F, N>, F>) -> DVector<F> {
-        let mut res = DVector::zeros(self.n_sensors());
-        let grid = self.grid();
-        for &d in self.bt.iter_at(&δ.x) {
-            let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-            res[d] += sensor.apply(&δ.x) * δ.α;
-        }
-        res
-    }
-}
 
-impl<F, S, P, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-    type Codomain = DVector<F>;
-}
-
-impl<F, S, P, BT, const N : usize> BoundedLinear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N, Agg=Bounds<F>>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-    type FloatType = F;
-
-    /// An estimate on the operator norm in $𝕃(ℳ(Ω); ℝ^n)$ with $ℳ(Ω)$ equipped
-    /// with the Radon norm, and $ℝ^n$ with the Euclidean norm.
-    fn opnorm_bound(&self) -> F {
-        // With {x_i}_{i=1}^n the grid centres and φ the kernel, we have
-        // |Aμ|_2 = sup_{|z|_2 ≤ 1} ⟨z,Αμ⟩ = sup_{|z|_2 ≤ 1} ⟨A^*z|μ⟩
-        // ≤ sup_{|z|_2 ≤ 1} |A^*z|_∞ |μ|_ℳ
-        // = sup_{|z|_2 ≤ 1} |∑ φ(· - x_i)z_i|_∞ |μ|_ℳ
-        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ ∑ |z_i| |μ|_ℳ
-        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ √n |z|_2 |μ|_ℳ
-        // = |φ|_∞ √n |μ|_ℳ.
-        // Hence
-        let n = F::cast_from(self.n_sensors());
-        self.base_sensor.bounds().uniform() * n.sqrt()
-    }
-}
-
-type SensorGridPreadjoint<'a, A, F, const N : usize> = PreadjointHelper<'a, A, RNDM<F,N>>;
-
-
-impl<F, S, P, BT, const N : usize>
-Preadjointable<RNDM<F, N>, DVector<F>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-    type PreadjointCodomain = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
-    type Preadjoint<'a> = SensorGridPreadjoint<'a, Self, F, N> where Self : 'a;
-
-    fn preadjoint(&self) -> Self::Preadjoint<'_> {
-        PreadjointHelper::new(self)
+    /// Return (if one exists) a factor $L$ such that $∇A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_diff_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        None
     }
 }
 
-#[derive(Clone,Debug)]
-pub struct SensorGridSupportGenerator<F, S, P, const N : usize>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N> {
-    base_sensor : Convolution<S, P>,
-    grid : LinGrid<F, N>,
-    weights : DVector<F>
-}
-
-impl<F, S, P, const N : usize> SensorGridSupportGenerator<F, S, P, N>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N> {
-
-    #[inline]
-    fn construct_sensor(&self, id : usize, w : F) -> Weighted<ShiftedSensor<F, S, P, N>, F> {
-        let x = self.grid.entry_linear_unchecked(id);
-        self.base_sensor.clone().shift(x).weigh(w)
-    }
-
-    #[inline]
-    fn construct_sensor_and_id<'a>(&'a self, (id, w) : (usize, &'a F))
-    -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) {
-        (id.into(), self.construct_sensor(id, *w))
-    }
-}
-
-impl<F, S, P, const N : usize> SupportGenerator<F, N>
-for SensorGridSupportGenerator<F, S, P, N>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N> {
-    type Id = usize;
-    type SupportType = Weighted<ShiftedSensor<F, S, P, N>, F>;
-    type AllDataIter<'a> = MapX<'a, Zip<RangeFrom<usize>,
-                                        std::slice::Iter<'a, F>>,
-                                Self,
-                                (Self::Id, Self::SupportType)>
-                           where Self : 'a;
-
-    #[inline]
-    fn support_for(&self, d : Self::Id) -> Self::SupportType {
-        self.construct_sensor(d, self.weights[d])
-    }
-
-    #[inline]
-    fn support_count(&self) -> usize {
-        self.weights.len()
-    }
-
-    #[inline]
-    fn all_data(&self) -> Self::AllDataIter<'_> {
-        (0..).zip(self.weights.as_slice().iter()).mapX(self, Self::construct_sensor_and_id)
-    }
-}
-
-/// Helper structure for constructing preadjoints of `S` where `S : Linear<X>`.
-/// [`Linear`] needs to be implemented for each instance, but [`Adjointable`]
-/// and [`BoundedLinear`] have blanket implementations.
-#[derive(Clone,Debug)]
-pub struct PreadjointHelper<'a, S : 'a, X> {
-    forward_op : &'a S,
-    _domain : PhantomData<X>
-}
-
-impl<'a, S : 'a, X> PreadjointHelper<'a, S, X> {
-    pub fn new(forward_op : &'a S) -> Self {
-        PreadjointHelper { forward_op, _domain: PhantomData }
-    }
-}
-
-impl<'a, X, Ypre, S> Adjointable<Ypre, X>
-for PreadjointHelper<'a, S, X>
-where Self : Linear<Ypre>,
-      S : Clone + Linear<X> {
-    type AdjointCodomain = S::Codomain;
-    type Adjoint<'b> = S where Self : 'b;
-    fn adjoint(&self) -> Self::Adjoint<'_> {
-        self.forward_op.clone()
-    }
-}
-
-impl<'a, X, Ypre, S> BoundedLinear<Ypre>
-for PreadjointHelper<'a, S, X>
-where Self : Linear<Ypre>,
-      S : 'a + Clone + BoundedLinear<X> {
-    type FloatType = S::FloatType;
-    fn opnorm_bound(&self) -> Self::FloatType {
-        self.forward_op.opnorm_bound()
-    }
-}
-
-
-impl<'a, 'b, F, S, P, BT, const N : usize> Apply<&'b DVector<F>>
-for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output = SensorGridBTFN<F, S, P, BT, N>;
-
-    fn apply(&self, x : &'b DVector<F>) -> Self::Output {
-        self.apply(x.clone())
-    }
-}
-
-impl<'a, F, S, P, BT, const N : usize> Apply<DVector<F>>
-for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output = SensorGridBTFN<F, S, P, BT, N>;
-
-    fn apply(&self, x : DVector<F>) -> Self::Output {
-        let fwd = &self.forward_op;
-        let generator = SensorGridSupportGenerator{
-            base_sensor : fwd.base_sensor.clone(),
-            grid : fwd.grid(),
-            weights : x
-        };
-        BTFN::new_refresh(&fwd.bt, generator)
-    }
-}
-
-impl<'a, F, S, P, BT, const N : usize> Linear<DVector<F>>
-for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Codomain = SensorGridBTFN<F, S, P, BT, N>;
-}
-
-impl<F, S, P, BT, const N : usize> ForwardModel<Loc<F, N>, F>
-for SensorGrid<F, S, P, BT, N>
-where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-    type Observable = DVector<F>;
-
-    fn findim_quadratic_model(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-        b : &Self::Observable
-    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>) {
-        assert_eq!(b.len(), self.n_sensors());
-        let mut mA = DMatrix::zeros(self.n_sensors(), μ.len());
-        let grid = self.grid();
-        for (mut mAcol, δ) in mA.column_iter_mut().zip(μ.iter_spikes()) {
-            for &d in self.bt.iter_at(&δ.x) {
-                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-                mAcol[d] += sensor.apply(&δ.x);
-            }
-        }
-        let mAt = mA.transpose();
-        (&mAt * mA, &mAt * b)
-    }
-
-    fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError {
-        let it = self.grid().into_iter().zip(b.iter()).map(|(x, &v)| (x, v));
-        write_csv(it, prefix + ".txt")
-    }
-
-    #[inline]
-    fn zero_observable(&self) -> Self::Observable {
-        self._zero_observable()
-    }
-
-    #[inline]
-    fn empty_observable(&self) -> Self::Observable {
-        DVector::zeros(0)
-    }
-
-}
-
-/// Implements the calculation a factor $L$ such that $A_*A ≤ L 𝒟$ for $A$ the forward model
-/// and $𝒟$ a seminorm of suitable form.
-///
-/// **This assumes (but does not check) that the sensors are not overlapping.**
-#[replace_float_literals(F::cast_from(literal))]
-impl<'a, F, BT, S, P, K, const N : usize> Lipschitz<&'a ConvolutionOp<F, K, BT, N>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float + nalgebra::RealField + ToNalgebraRealField,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      K : SimpleConvolutionKernel<F, N>,
-      AutoConvolution<P> : BoundedBy<F, K> {
-
-    type FloatType = F;
-
-    fn lipschitz_factor(&self, seminorm : &'a ConvolutionOp<F, K, BT, N>) -> Option<F> {
-        // Sensors should not take on negative values to allow
-        // A_*A to be upper bounded by a simple convolution of `spread`.
-        if self.sensor.bounds().lower() < 0.0 {
-            return None
-        }
-
-        // Calculate the factor $L_1$ for betwee $ℱ[ψ * ψ] ≤ L_1 ℱ[ρ]$ for $ψ$ the base spread
-        // and $ρ$ the kernel of the seminorm.
-        let l1 = AutoConvolution(self.spread.clone()).bounding_factor(seminorm.kernel())?;
-
-        // Calculate the factor for transitioning from $A_*A$ to `AutoConvolution<P>`, where A
-        // consists of several `Convolution<S, P>` for the physical model `P` and the sensor `S`.
-        let l0 = self.sensor.norm(Linfinity) * self.sensor.norm(L1);
-
-        // The final transition factor is:
-        Some(l0 * l1)
-    }
-}
-
-#[replace_float_literals(F::cast_from(literal))]
-impl<F, BT, S, P, const N : usize> TransportLipschitz<L2Squared>
-for SensorGrid<F, S, P, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N> + Lipschitz<L2, FloatType = F> {
-    type FloatType = F;
-
-    fn transport_lipschitz_factor(&self, L2Squared : L2Squared) -> Self::FloatType {
-        // We estimate the factor by N_ψL^2, where L is the 2-norm Lipschitz factor of
-        // the base sensor (sensor * base_spread), and N_ψ the maximum overlap.
-        let l = self.base_sensor.lipschitz_factor(L2).unwrap();
-        let w = self.base_sensor.support_hint().width();
-        let d = map2(self.domain.width(), &self.sensor_count, |wi, &i| wi/F::cast_from(i));
-        let n = w.iter()
-                 .zip(d.iter())
-                 .map(|(&wi, &di)| (wi/di).ceil())
-                 .reduce(F::mul)
-                 .unwrap();
-        2.0 * n * l.powi(2)
-    }
-}
-
-
-macro_rules! make_sensorgridsupportgenerator_scalarop_rhs {
-    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
-        impl<F, S, P, const N : usize>
-        std::ops::$trait_assign<F>
-        for SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            fn $fn_assign(&mut self, t : F) {
-                self.weights.$fn_assign(t);
-            }
-        }
-
-        impl<F, S, P, const N : usize>
-        std::ops::$trait<F>
-        for SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(mut self, t : F) -> Self::Output {
-                std::ops::$trait_assign::$fn_assign(&mut self.weights, t);
-                self
-            }
-        }
-
-        impl<'a, F, S, P, const N : usize>
-        std::ops::$trait<F>
-        for &'a SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(self, t : F) -> Self::Output {
-                SensorGridSupportGenerator{
-                    base_sensor : self.base_sensor.clone(),
-                    grid : self.grid,
-                    weights : (&self.weights).$fn(t)
-                }
-            }
-        }
-    }
-}
-
-make_sensorgridsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
-make_sensorgridsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
-
-macro_rules! make_sensorgridsupportgenerator_unaryop {
-    ($trait:ident, $fn:ident) => {
-        impl<F, S, P, const N : usize>
-        std::ops::$trait
-        for SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(mut self) -> Self::Output {
-                self.weights = self.weights.$fn();
-                self
-            }
-        }
-
-        impl<'a, F, S, P, const N : usize>
-        std::ops::$trait
-        for &'a SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(self) -> Self::Output {
-                SensorGridSupportGenerator{
-                    base_sensor : self.base_sensor.clone(),
-                    grid : self.grid,
-                    weights : (&self.weights).$fn()
-                }
-            }
-        }
-    }
-}
-
-make_sensorgridsupportgenerator_unaryop!(Neg, neg);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/forward_model/bias.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -0,0 +1,110 @@
+/*!
+Simple parametric forward model.
+ */
+
+use numeric_literals::replace_float_literals;
+use alg_tools::types::{Float, ClosedAdd};
+use alg_tools::mapping::Space;
+use alg_tools::direct_product::Pair;
+use alg_tools::linops::{Linear, RowOp, ColOp, IdOp, ZeroOp, AXPY};
+use alg_tools::error::DynError;
+use alg_tools::norms::{L2, Norm, PairNorm, NormExponent};
+use crate::types::L2Squared;
+use crate::measures::RNDM;
+use super::{ForwardModel, AdjointProductBoundedBy, AdjointProductPairBoundedBy, LipschitzValues};
+use crate::transport::TransportLipschitz;
+
+impl<Domain, F, A, E> ForwardModel<Pair<Domain, A::Observable>, F, PairNorm<E, L2, L2>>
+for RowOp<A, IdOp<A::Observable>>
+where
+    E : NormExponent,
+    Domain : Space + Norm<F, E>,
+    F : Float,
+    A::Observable : ClosedAdd + Norm<F, L2> + 'static,
+    A : ForwardModel<Domain, F, E> + 'static
+{
+    type Observable = A::Observable;
+
+    fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError {
+        self.0.write_observable(b, prefix)
+    }
+
+    /// Returns a zero observable
+    fn zero_observable(&self) -> Self::Observable {
+        self.0.zero_observable()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<Domain, F, A, D, Z> AdjointProductPairBoundedBy<Pair<Domain, Z>, D, IdOp<Z>>
+for RowOp<A, IdOp<Z>>
+where
+    Domain : Space,
+    F : Float,
+    Z : Clone + Space + ClosedAdd,
+    A : AdjointProductBoundedBy<Domain, D, FloatType=F, Codomain = Z>,
+    D : Linear<Domain>,
+    A::Codomain : ClosedAdd,
+{
+    type FloatType = F;
+
+    fn adjoint_product_pair_bound(&self, d : &D, _ : &IdOp<Z>) -> Option<(F, F)> {
+        self.0.adjoint_product_bound(d).map(|l_0| {
+            // [A_*; B_*][A, B] = [A_*A, A_* B; B_* A, B_* B] ≤ diag(2A_*A, 2B_*B)
+            // ≤ diag(2l_A𝒟_A, 2l_B𝒟_B), where now 𝒟_B=Id and l_B=1.
+            (2.0 * l_0, 2.0)
+        })
+    }
+}
+
+/// This `impl` is bit of an abuse as the codomain of `Apre` is a [`Pair`] of a measure predual,
+/// to which this `impl` applies, and another space.
+impl<F, Apre, Z> LipschitzValues
+for ColOp<Apre, IdOp<Z>>
+where
+    F : Float,
+    Z : Clone + Space + ClosedAdd,
+    Apre : LipschitzValues<FloatType = F>,
+{
+    type FloatType = F;
+    /// Return (if one exists) a factor $L$ such that $A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        self.0.value_unit_lipschitz_factor()
+    }
+
+    /// Return (if one exists) a factor $L$ such that $∇A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_diff_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        self.0.value_diff_unit_lipschitz_factor()
+    }
+}
+
+
+
+impl<'a, F : Float, Y : Space, XD, const N : usize> TransportLipschitz<L2Squared> for
+ZeroOp<'a, RNDM<F, N>, XD, Y, F> {
+    type FloatType = F;
+
+    fn transport_lipschitz_factor(&self, _ : L2Squared) -> Self::FloatType {
+        F::ZERO
+    }
+}
+
+
+/// TODO: should assume `D` to be positive semi-definite and self-adjoint.
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F, D, XD, Y, const N : usize> AdjointProductBoundedBy<RNDM<F, N>, D>
+for ZeroOp<'a, RNDM<F, N>, XD, Y, F>
+where
+    F : Float,
+    Y : AXPY<F> + Clone,
+    D : Linear<RNDM<F, N>>,
+{
+    type FloatType = F;
+    /// Return $L$ such that $A_*A ≤ L𝒟$ is bounded by some `other` operator $𝒟$.
+    fn adjoint_product_bound(&self, _ : &D) -> Option<F> {
+        Some(0.0)
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/forward_model/sensor_grid.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -0,0 +1,634 @@
+/*!
+Sensor grid forward model
+*/
+
+use numeric_literals::replace_float_literals;
+use nalgebra::base::{
+    DMatrix,
+    DVector
+};
+use std::iter::Zip;
+use std::ops::RangeFrom;
+
+pub use alg_tools::linops::*;
+use alg_tools::norms::{
+    L1, Linfinity, L2, Norm
+};
+use alg_tools::bisection_tree::*;
+use alg_tools::mapping::{
+    RealMapping,
+    DifferentiableMapping
+};
+use alg_tools::lingrid::*;
+use alg_tools::iter::{MapX, Mappable};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::tabledump::write_csv;
+use alg_tools::error::DynError;
+use alg_tools::maputil::map2;
+use alg_tools::instance::Instance;
+
+use crate::types::*;
+use crate::measures::{DiscreteMeasure, Radon};
+use crate::seminorms::{
+    ConvolutionOp,
+    SimpleConvolutionKernel,
+};
+use crate::kernels::{
+    Convolution,
+    AutoConvolution,
+    BoundedBy,
+};
+use crate::types::L2Squared;
+use crate::transport::TransportLipschitz;
+use crate::preadjoint_helper::PreadjointHelper;
+use super::{
+    ForwardModel,
+    LipschitzValues,
+    AdjointProductBoundedBy
+};
+use crate::frank_wolfe::FindimQuadraticModel;
+
+type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F,N>, F>;
+
+pub type ShiftedSensor<F, S, P, const N : usize> = Shift<Convolution<S, P>, F, N>;
+
+/// Trait for physical convolution models. Has blanket implementation for all cases.
+pub trait Spread<F : Float, const N : usize>
+: 'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F> {}
+
+impl<F, T, const N : usize> Spread<F, N> for T
+where F : Float,
+      T : 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N> {}
+
+/// Trait for compactly supported sensors. Has blanket implementation for all cases.
+pub trait Sensor<F : Float, const N : usize> : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
+
+impl<F, T, const N : usize> Sensor<F, N> for T
+where F : Float,
+      T : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
+
+
+pub trait SensorGridBT<F, S, P, const N : usize> :
+Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N> {}
+
+impl<F, S, P, T, const N : usize>
+SensorGridBT<F, S, P, N>
+for T
+where T : Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>,
+      F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N> {}
+
+// We need type alias bounds to access associated types
+#[allow(type_alias_bounds)]
+pub type SensorGridBTFN<F, S, P, BT : SensorGridBT<F, S, P, N>, const N : usize>
+= BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
+
+/// Sensor grid forward model
+#[derive(Clone)]
+pub struct SensorGrid<F, S, P, BT, const N : usize>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      BT : SensorGridBT<F, S, P, N>, {
+    domain : Cube<F, N>,
+    sensor_count : [usize; N],
+    sensor : S,
+    spread : P,
+    base_sensor : Convolution<S, P>,
+    bt : BT,
+}
+
+impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+      /*ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>*/ {
+
+    /// Create a new sensor grid.
+    ///
+    /// The parameter `depth` indicates the search depth of the created [`BT`]s
+    /// for the adjoint values.
+    pub fn new(
+        domain : Cube<F, N>,
+        sensor_count : [usize; N],
+        sensor : S,
+        spread : P,
+        depth : BT::Depth
+    ) -> Self {
+        let base_sensor = Convolution(sensor.clone(), spread.clone());
+        let bt = BT::new(domain, depth);
+        let mut sensorgrid = SensorGrid {
+            domain,
+            sensor_count,
+            sensor,
+            spread,
+            base_sensor,
+            bt,
+        };
+
+        for (x, id) in sensorgrid.grid().into_iter().zip(0usize..) {
+            let s = sensorgrid.shifted_sensor(x);
+            sensorgrid.bt.insert(id, &s);
+        }
+
+        sensorgrid
+    }
+}
+
+
+impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> {
+      
+    /// Return the grid of sensor locations.
+    pub fn grid(&self) -> LinGrid<F, N> {
+        lingrid_centered(&self.domain, &self.sensor_count)
+    }
+
+    /// Returns the number of sensors (number of grid points)
+    pub fn n_sensors(&self) -> usize {
+        self.sensor_count.iter().product()
+    }
+
+    /// Constructs a sensor shifted by `x`.
+    #[inline]
+    fn shifted_sensor(&self, x : Loc<F, N>) -> ShiftedSensor<F, S, P, N> {
+        self.base_sensor.clone().shift(x)
+    }
+
+    #[inline]
+    fn _zero_observable(&self) -> DVector<F> {
+        DVector::zeros(self.n_sensors())
+    }
+
+    /// Returns the maximum number of overlapping sensors $N_\psi$.
+    pub fn max_overlapping(&self) -> F {
+        let w = self.base_sensor.support_hint().width();
+        let d = map2(self.domain.width(), &self.sensor_count, |wi, &i| wi/F::cast_from(i));
+        w.iter()
+         .zip(d.iter())
+         .map(|(&wi, &di)| (wi/di).ceil())
+         .reduce(F::mul)
+         .unwrap()
+    }
+}
+
+impl<F, S, P, BT, const N : usize> Mapping<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where
+    F : Float,
+    BT : SensorGridBT<F, S, P, N>,
+    S : Sensor<F, N>,
+    P : Spread<F, N>,
+    Convolution<S, P> : Spread<F, N>,
+    //ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+{
+
+    type Codomain =  DVector<F>;
+
+    #[inline]
+    fn apply<I : Instance<RNDM<F, N>>>(&self, μ : I) -> DVector<F> {
+        let mut y = self._zero_observable();
+        self.apply_add(&mut y, μ);
+        y
+    }
+}
+
+
+impl<F, S, P, BT, const N : usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where
+    F : Float,
+    BT : SensorGridBT<F, S, P, N>,
+    S : Sensor<F, N>,
+    P : Spread<F, N>,
+    Convolution<S, P> : Spread<F, N>,
+    //ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>
+{ }
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, S, P, BT, const N : usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      //ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>
+{
+
+    fn gemv<I : Instance<RNDM<F, N>>>(
+        &self, y : &mut DVector<F>, α : F, μ : I, β : F
+    ) {
+        let grid = self.grid();
+        if β == 0.0 {
+            y.fill(0.0)
+        } else if β != 1.0 {
+            *y *= β; // Need to multiply first, as we have to be able to add to y.
+        }
+        if α == 1.0 {
+            self.apply_add(y, μ)
+        } else {
+            for δ in μ.ref_instance() {
+                for &d in self.bt.iter_at(&δ.x) {
+                    let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                    y[d] += sensor.apply(&δ.x) * (α * δ.α);
+                }
+            }
+        }
+    }
+
+    fn apply_add<I : Instance<RNDM<F, N>>>(
+        &self, y : &mut DVector<F>, μ : I
+    ) {
+        let grid = self.grid();
+        for δ in μ.ref_instance() {
+            for &d in self.bt.iter_at(&δ.x) {
+                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                y[d] += sensor.apply(&δ.x) * δ.α;
+            }
+        }
+    }
+
+}
+
+
+impl<F, S, P, BT, const N : usize>
+BoundedLinear<RNDM<F, N>, Radon, L2, F>
+for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N, Agg=Bounds<F>>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+
+    /// An estimate on the operator norm in $𝕃(ℳ(Ω); ℝ^n)$ with $ℳ(Ω)$ equipped
+    /// with the Radon norm, and $ℝ^n$ with the Euclidean norm.
+    fn opnorm_bound(&self, _ : Radon, _ : L2) -> F {
+        // With {x_i}_{i=1}^n the grid centres and φ the kernel, we have
+        // |Aμ|_2 = sup_{|z|_2 ≤ 1} ⟨z,Αμ⟩ = sup_{|z|_2 ≤ 1} ⟨A^*z|μ⟩
+        // ≤ sup_{|z|_2 ≤ 1} |A^*z|_∞ |μ|_ℳ
+        // = sup_{|z|_2 ≤ 1} |∑ φ(· - x_i)z_i|_∞ |μ|_ℳ
+        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ ∑ |z_i| |μ|_ℳ
+        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ √n |z|_2 |μ|_ℳ
+        // = |φ|_∞ √n |μ|_ℳ.
+        // Hence
+        let n = F::cast_from(self.n_sensors());
+        self.base_sensor.bounds().uniform() * n.sqrt()
+    }
+}
+
+type SensorGridPreadjoint<'a, A, F, const N : usize> = PreadjointHelper<'a, A, RNDM<F,N>>;
+
+
+impl<F, S, P, BT, const N : usize>
+Preadjointable<RNDM<F, N>, DVector<F>>
+for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+      /*ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N>*/ {
+    type PreadjointCodomain = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
+    type Preadjoint<'a> = SensorGridPreadjoint<'a, Self, F, N> where Self : 'a;
+
+    fn preadjoint(&self) -> Self::Preadjoint<'_> {
+        PreadjointHelper::new(self)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F, S, P, BT, const N : usize> LipschitzValues
+for SensorGridPreadjoint<'a, SensorGrid<F, S, P, BT, N>, F, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + Lipschitz<L2, FloatType=F> + DifferentiableMapping<Loc<F,N>> + LocalAnalysis<F, BT::Agg, N>,
+      for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>,
+      /*ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N>*/ {
+    
+    type FloatType = F;
+
+    fn value_unit_lipschitz_factor(&self) -> Option<F> {
+        // The Lipschitz factor of the sensors has to be scaled by the square root of twice
+        // the number of overlapping sensors at a single ponit, as Lipschitz estimates involve
+        // two points.
+        let fw = self.forward_op;
+        let n = fw.max_overlapping();
+        fw.base_sensor.lipschitz_factor(L2).map(|l| (2.0 * n).sqrt() * l)
+    }
+
+    fn value_diff_unit_lipschitz_factor(&self) -> Option<F> {
+        // The Lipschitz factor of the sensors has to be scaled by the square root of twice
+        // the number of overlapping sensors at a single ponit, as Lipschitz estimates involve
+        // two points.
+        let fw = self.forward_op;
+        let n = fw.max_overlapping();
+        fw.base_sensor.diff_ref().lipschitz_factor(L2).map(|l| (2.0 * n).sqrt() * l)
+    }
+}
+
+#[derive(Clone,Debug)]
+pub struct SensorGridSupportGenerator<F, S, P, const N : usize>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N> {
+    base_sensor : Convolution<S, P>,
+    grid : LinGrid<F, N>,
+    weights : DVector<F>
+}
+
+impl<F, S, P, const N : usize> SensorGridSupportGenerator<F, S, P, N>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> {
+
+    #[inline]
+    fn construct_sensor(&self, id : usize, w : F) -> Weighted<ShiftedSensor<F, S, P, N>, F> {
+        let x = self.grid.entry_linear_unchecked(id);
+        self.base_sensor.clone().shift(x).weigh(w)
+    }
+
+    #[inline]
+    fn construct_sensor_and_id<'a>(&'a self, (id, w) : (usize, &'a F))
+    -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) {
+        (id.into(), self.construct_sensor(id, *w))
+    }
+}
+
+impl<F, S, P, const N : usize> SupportGenerator<F, N>
+for SensorGridSupportGenerator<F, S, P, N>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> {
+    type Id = usize;
+    type SupportType = Weighted<ShiftedSensor<F, S, P, N>, F>;
+    type AllDataIter<'a> = MapX<'a, Zip<RangeFrom<usize>,
+                                        std::slice::Iter<'a, F>>,
+                                Self,
+                                (Self::Id, Self::SupportType)>
+                           where Self : 'a;
+
+    #[inline]
+    fn support_for(&self, d : Self::Id) -> Self::SupportType {
+        self.construct_sensor(d, self.weights[d])
+    }
+
+    #[inline]
+    fn support_count(&self) -> usize {
+        self.weights.len()
+    }
+
+    #[inline]
+    fn all_data(&self) -> Self::AllDataIter<'_> {
+        (0..).zip(self.weights.as_slice().iter()).mapX(self, Self::construct_sensor_and_id)
+    }
+}
+
+impl<F, S, P, BT, const N : usize> ForwardModel<DiscreteMeasure<Loc<F, N>, F>, F>
+for SensorGrid<F, S, P, BT, N>
+where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+      /*ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N>*/ {
+    type Observable = DVector<F>;
+
+    fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError {
+        let it = self.grid().into_iter().zip(b.iter()).map(|(x, &v)| (x, v));
+        write_csv(it, prefix + ".txt")
+    }
+
+    #[inline]
+    fn zero_observable(&self) -> Self::Observable {
+        self._zero_observable()
+    }
+}
+
+impl<F, S, P, BT, const N : usize> FindimQuadraticModel<Loc<F, N>, F>
+for SensorGrid<F, S, P, BT, N>
+where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+      /*ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N>*/ {
+
+    fn findim_quadratic_model(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        b : &Self::Observable
+    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>) {
+        assert_eq!(b.len(), self.n_sensors());
+        let mut mA = DMatrix::zeros(self.n_sensors(), μ.len());
+        let grid = self.grid();
+        for (mut mAcol, δ) in mA.column_iter_mut().zip(μ.iter_spikes()) {
+            for &d in self.bt.iter_at(&δ.x) {
+                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                mAcol[d] += sensor.apply(&δ.x);
+            }
+        }
+        let mAt = mA.transpose();
+        (&mAt * mA, &mAt * b)
+    }
+}
+
+/// Implements the calculation a factor $L$ such that $A_*A ≤ L 𝒟$ for $A$ the forward model
+/// and $𝒟$ a seminorm of suitable form.
+///
+/// **This assumes (but does not check) that the sensors are not overlapping.**
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, BT, S, P, K, const N : usize>
+AdjointProductBoundedBy<RNDM<F, N>, ConvolutionOp<F, K, BT, N>>
+for SensorGrid<F, S, P, BT, N>
+where F : Float + nalgebra::RealField + ToNalgebraRealField,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      K : SimpleConvolutionKernel<F, N>,
+      AutoConvolution<P> : BoundedBy<F, K> {
+
+    type FloatType = F;
+
+    fn adjoint_product_bound(&self, seminorm : &ConvolutionOp<F, K, BT, N>) -> Option<F> {
+        // Sensors should not take on negative values to allow
+        // A_*A to be upper bounded by a simple convolution of `spread`.
+        if self.sensor.bounds().lower() < 0.0 {
+            return None
+        }
+
+        // Calculate the factor $L_1$ for betwee $ℱ[ψ * ψ] ≤ L_1 ℱ[ρ]$ for $ψ$ the base spread
+        // and $ρ$ the kernel of the seminorm.
+        let l1 = AutoConvolution(self.spread.clone()).bounding_factor(seminorm.kernel())?;
+
+        // Calculate the factor for transitioning from $A_*A$ to `AutoConvolution<P>`, where A
+        // consists of several `Convolution<S, P>` for the physical model `P` and the sensor `S`.
+        let l0 = self.sensor.norm(Linfinity) * self.sensor.norm(L1);
+
+        // The final transition factor is:
+        Some(l0 * l1)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, BT, S, P, const N : usize> TransportLipschitz<L2Squared>
+for SensorGrid<F, S, P, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + Lipschitz<L2, FloatType = F> {
+    type FloatType = F;
+
+    fn transport_lipschitz_factor(&self, L2Squared : L2Squared) -> Self::FloatType {
+        // We estimate the factor by N_ψL^2, where L is the 2-norm Lipschitz factor of
+        // the base sensor (sensor * base_spread), and N_ψ the maximum overlap.
+        // The factors two comes from Lipschitz estimates having two possible
+        // points of overlap.
+        let l = self.base_sensor.lipschitz_factor(L2).unwrap();
+        2.0 * self.max_overlapping() * l.powi(2)
+    }
+}
+
+
+macro_rules! make_sensorgridsupportgenerator_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F, S, P, const N : usize>
+        std::ops::$trait_assign<F>
+        for SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            fn $fn_assign(&mut self, t : F) {
+                self.weights.$fn_assign(t);
+            }
+        }
+
+        impl<F, S, P, const N : usize>
+        std::ops::$trait<F>
+        for SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(mut self, t : F) -> Self::Output {
+                std::ops::$trait_assign::$fn_assign(&mut self.weights, t);
+                self
+            }
+        }
+
+        impl<'a, F, S, P, const N : usize>
+        std::ops::$trait<F>
+        for &'a SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(self, t : F) -> Self::Output {
+                SensorGridSupportGenerator{
+                    base_sensor : self.base_sensor.clone(),
+                    grid : self.grid,
+                    weights : (&self.weights).$fn(t)
+                }
+            }
+        }
+    }
+}
+
+make_sensorgridsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_sensorgridsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! make_sensorgridsupportgenerator_unaryop {
+    ($trait:ident, $fn:ident) => {
+        impl<F, S, P, const N : usize>
+        std::ops::$trait
+        for SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(mut self) -> Self::Output {
+                self.weights = self.weights.$fn();
+                self
+            }
+        }
+
+        impl<'a, F, S, P, const N : usize>
+        std::ops::$trait
+        for &'a SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(self) -> Self::Output {
+                SensorGridSupportGenerator{
+                    base_sensor : self.base_sensor.clone(),
+                    grid : self.grid,
+                    weights : (&self.weights).$fn()
+                }
+            }
+        }
+    }
+}
+
+make_sensorgridsupportgenerator_unaryop!(Neg, neg);
+
+impl<'a, F, S, P, BT, const N : usize> Mapping<DVector<F>>
+for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      //ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      /*Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N>*/ {
+
+    type Codomain = SensorGridBTFN<F, S, P, BT, N>;
+
+    fn apply<I : Instance<DVector<F>>>(&self, x : I) -> Self::Codomain {
+        let fwd = &self.forward_op;
+        let generator = SensorGridSupportGenerator{
+            base_sensor : fwd.base_sensor.clone(),
+            grid : fwd.grid(),
+            weights : x.own()
+        };
+        BTFN::new_refresh(&fwd.bt, generator)
+    }
+}
+
+impl<'a, F, S, P, BT, const N : usize> Linear<DVector<F>>
+for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      /*ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N>*/ {
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/forward_pdps.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -0,0 +1,268 @@
+/*!
+Solver for the point source localisation problem using a
+primal-dual proximal splitting with a forward step.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+
+use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::sets::Cube;
+use alg_tools::loc::Loc;
+use alg_tools::mapping::{Mapping, Instance};
+use alg_tools::norms::Norm;
+use alg_tools::direct_product::Pair;
+use alg_tools::bisection_tree::{
+    BTFN,
+    PreBTFN,
+    Bounds,
+    BTNodeLookup,
+    BTNode,
+    BTSearch,
+    P2Minimise,
+    SupportGenerator,
+    LocalAnalysis,
+    //Bounded,
+};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::linops::{
+    BoundedLinear, AXPY, GEMV, Adjointable, IdOp,
+};
+use alg_tools::convex::{Conjugable, Prox};
+use alg_tools::norms::{L2, Linfinity, PairNorm};
+
+use crate::types::*;
+use crate::measures::{DiscreteMeasure, Radon, RNDM};
+use crate::measures::merging::SpikeMerging;
+use crate::forward_model::{
+    ForwardModel,
+    AdjointProductPairBoundedBy,
+};
+use crate::seminorms::DiscreteMeasureOp;
+use crate::plot::{
+    SeqPlotter,
+    Plotting,
+    PlotLookup
+};
+use crate::fb::*;
+use crate::regularisation::RegTerm;
+use crate::dataterm::calculate_residual;
+
+/// Settings for [`pointsource_forward_pdps_pair`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct ForwardPDPSConfig<F : Float> {
+    /// Primal step length scaling.
+    pub τ0 : F,
+    /// Primal step length scaling.
+    pub σp0 : F,
+    /// Dual step length scaling.
+    pub σd0 : F,
+    /// Generic parameters
+    pub insertion : FBGenericConfig<F>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for ForwardPDPSConfig<F> {
+    fn default() -> Self {
+        let τ0 = 0.99;
+        ForwardPDPSConfig {
+            τ0,
+            σd0 : 0.1,
+            σp0 : 0.99,
+            insertion : Default::default()
+        }
+    }
+}
+
+type MeasureZ<F, Z, const N : usize> = Pair<RNDM<F, N>, Z>;
+
+/// Iteratively solve the pointsource localisation with an additional variable
+/// using primal-dual proximal splitting with a forward step.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_forward_pdps_pair<
+    'a, F, I, A, GA, 𝒟, BTA, BT𝒟, G𝒟, S, K, Reg, Z, R, Y, /*KOpM, */ KOpZ, H, const N : usize
+>(
+    opA : &'a A,
+    b : &A::Observable,
+    reg : Reg,
+    op𝒟 : &'a 𝒟,
+    config : &ForwardPDPSConfig<F>,
+    iterator : I,
+    mut plotter : SeqPlotter<F, N>,
+    //opKμ : KOpM,
+    opKz : &KOpZ,
+    fnR : &R,
+    fnH : &H,
+    mut z : Z,
+    mut y : Y,
+) -> MeasureZ<F, Z, N>
+where
+    F : Float + ToNalgebraRealField,
+    I : AlgIteratorFactory<IterInfo<F, N>>,
+    for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    A : ForwardModel<
+            MeasureZ<F, Z, N>,
+            F,
+            PairNorm<Radon, L2, L2>,
+            PreadjointCodomain = Pair<BTFN<F, GA, BTA, N>, Z>,
+        >
+        + AdjointProductPairBoundedBy<MeasureZ<F, Z, N>, 𝒟, IdOp<Z>, FloatType=F>,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
+    𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>,
+                                        Codomain = BTFN<F, G𝒟, BT𝒟, N>>,
+    BT𝒟 : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+    PlotLookup : Plotting<N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    Reg : RegTerm<F, N>,
+    KOpZ : BoundedLinear<Z, L2, L2, F, Codomain=Y>
+        + GEMV<F, Z>
+        + Adjointable<Z, Y, AdjointCodomain = Z>,
+    for<'b> KOpZ::Adjoint<'b> : GEMV<F, Y>,
+    Y : AXPY<F> + Euclidean<F, Output=Y> + Clone + ClosedAdd,
+    for<'b> &'b Y : Instance<Y>,
+    Z : AXPY<F, Owned=Z> + Euclidean<F, Output=Z> + Clone + Norm<F, L2>,
+    for<'b> &'b Z : Instance<Z>,
+    R : Prox<Z, Codomain=F>,
+    H : Conjugable<Y, F, Codomain=F>,
+    for<'b> H::Conjugate<'b> : Prox<Y>,
+{
+
+    // Check parameters
+    assert!(config.τ0 > 0.0 &&
+            config.τ0 < 1.0 &&
+            config.σp0 > 0.0 &&
+            config.σp0 < 1.0 &&
+            config.σd0 > 0.0 &&
+            config.σp0 * config.σd0 <= 1.0,
+            "Invalid step length parameters");
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut residual = calculate_residual(Pair(&μ, &z), opA, b);
+
+    // Set up parameters
+    let op𝒟norm = op𝒟.opnorm_bound(Radon, Linfinity);
+    let bigM = 0.0; //opKμ.adjoint_product_bound(&op𝒟).unwrap().sqrt();
+    let nKz = opKz.opnorm_bound(L2, L2);
+    let opIdZ = IdOp::new();
+    let (l, l_z) = opA.adjoint_product_pair_bound(&op𝒟, &opIdZ).unwrap();
+    // We need to satisfy
+    //
+    //     τσ_dM(1-σ_p L_z)/(1 - τ L) + [σ_p L_z + σ_pσ_d‖K_z‖^2] < 1
+    //                                  ^^^^^^^^^^^^^^^^^^^^^^^^^
+    // with 1 > σ_p L_z and 1 > τ L.
+    //
+    // To do so, we first solve σ_p and σ_d from standard PDPS step length condition
+    // ^^^^^ < 1. then we solve τ from  the rest.
+    let σ_d = config.σd0 / nKz;
+    let σ_p = config.σp0 / (l_z + config.σd0 * nKz);
+    // Observe that = 1 - ^^^^^^^^^^^^^^^^^^^^^ = 1 - σ_{p,0}
+    // We get the condition τσ_d M (1-σ_p L_z) < (1-σ_{p,0})*(1-τ L)
+    // ⟺ τ [ σ_d M (1-σ_p L_z) + (1-σ_{p,0}) L ] < (1-σ_{p,0})
+    let φ = 1.0 - config.σp0;
+    let a = 1.0 - σ_p * l_z;
+    let τ = config.τ0 * φ / ( σ_d * bigM * a + φ * l );
+    // Acceleration is not currently supported
+    // let γ = dataterm.factor_of_strong_convexity();
+    let ω = 1.0;
+
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.insertion.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    let starH = fnH.conjugate();
+
+    // Statistics
+    let full_stats = |residual : &A::Observable, μ : &RNDM<F, N>, z : &Z, ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + fnR.apply(z)
+                + reg.apply(μ) + fnH.apply(/* opKμ.apply(μ) + */ opKz.apply(z)),
+        n_spikes : μ.len(),
+        ε,
+        // postprocessing: config.insertion.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
+    let mut stats = IterInfo::new();
+
+    // Run the algorithm
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, &z, ε, stats.clone())) {
+        // Calculate initial transport
+        let Pair(τv, τz) = opA.preadjoint().apply(residual * τ);
+        let z_base = z.clone();
+        let μ_base = μ.clone();
+
+        // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes.
+        let (d, _within_tolerances) = insert_and_reweigh(
+            &mut μ, &τv, &μ_base, None,
+            op𝒟, op𝒟norm,
+            τ, ε, &config.insertion,
+            &reg, &state, &mut stats,
+        );
+
+        // // Merge spikes.
+        // // This expects the prune below to prune γ.
+        // // TODO: This may not work correctly in all cases.
+        // let ins = &config.insertion;
+        // if ins.merge_now(&state) {
+        //     if let SpikeMergingMethod::None = ins.merging {
+        //     } else {
+        //         stats.merged += μ.merge_spikes(ins.merging, |μ_candidate| {
+        //             let ν = μ_candidate.sub_matching(&γ1)-&μ_base_minus_γ0;
+        //             let mut d = &τv̆ + op𝒟.preapply(ν);
+        //             reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, ins)
+        //         });
+        //     }
+        // }
+
+        // Prune spikes with zero weight.
+        stats.pruned += prune_with_stats(&mut μ);
+
+        // Do z variable primal update
+        z.axpy(-σ_p/τ, τz, 1.0); // TODO: simplify nasty factors
+        opKz.adjoint().gemv(&mut z, -σ_p, &y, 1.0);
+        z = fnR.prox(σ_p, z);
+        // Do dual update
+        // opKμ.gemv(&mut y, σ_d*(1.0 + ω), &μ, 1.0);    // y = y + σ_d K[(1+ω)(μ,z)^{k+1}]
+        opKz.gemv(&mut y, σ_d*(1.0 + ω), &z, 1.0);
+        // opKμ.gemv(&mut y, -σ_d*ω, μ_base, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        opKz.gemv(&mut y, -σ_d*ω, z_base, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        y = starH.prox(σ_d, y);
+
+        // Update residual
+        residual = calculate_residual(Pair(&μ, &z), opA, b);
+
+        // Update step length parameters
+        // let ω = pdpsconfig.acceleration.accelerate(&mut τ, &mut σ, γ);
+
+        // Give statistics if requested
+        let iter = state.iteration();
+        stats.this_iters += 1;
+
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, Some(&d), Some(&τv), &μ);
+            full_stats(&residual, &μ, &z, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
+
+    let fit = |μ̃ : &RNDM<F, N>| {
+        (opA.apply(Pair(μ̃, &z))-b).norm2_squared_div2()
+        //+ fnR.apply(z) + reg.apply(μ)
+        + fnH.apply(/* opKμ.apply(&μ̃) + */ opKz.apply(&z))
+    };
+
+    μ.merge_spikes_fitness(config.insertion.merging, fit, |&v| v);
+    μ.prune();
+    Pair(μ, z)
+}
--- a/src/fourier.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/fourier.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -3,14 +3,14 @@
 */
 
 use alg_tools::types::{Num, Float};
-use alg_tools::mapping::{RealMapping, Mapping};
+use alg_tools::mapping::{RealMapping, Mapping, Space};
 use alg_tools::bisection_tree::Weighted;
 use alg_tools::loc::Loc;
 
 /// Trait for Fourier transforms. When F is a non-complex number, the transform
 /// also has to be non-complex, i.e., the function itself symmetric.
 pub trait Fourier<F : Num> : Mapping<Self::Domain, Codomain=F> {
-    type Domain;
+    type Domain : Space;
     type Transformed : Mapping<Self::Domain, Codomain=F>;
 
     fn fourier(&self) -> Self::Transformed;
--- a/src/frank_wolfe.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/frank_wolfe.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -14,18 +14,18 @@
 */
 
 use numeric_literals::replace_float_literals;
+use nalgebra::{DMatrix, DVector};
 use serde::{Serialize, Deserialize};
 //use colored::Colorize;
 
 use alg_tools::iterate::{
     AlgIteratorFactory,
-    AlgIteratorState,
     AlgIteratorOptions,
     ValueIteratorFactory,
 };
 use alg_tools::euclidean::Euclidean;
 use alg_tools::norms::Norm;
-use alg_tools::linops::Apply;
+use alg_tools::linops::Mapping;
 use alg_tools::sets::Cube;
 use alg_tools::loc::Loc;
 use alg_tools::bisection_tree::{
@@ -40,9 +40,11 @@
 };
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::L2;
 
 use crate::types::*;
 use crate::measures::{
+    RNDM,
     DiscreteMeasure,
     DeltaMeasure,
     Radon,
@@ -71,7 +73,7 @@
     RegTerm
 };
 
-/// Settings for [`pointsource_fw`].
+/// Settings for [`pointsource_fw_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
 pub struct FWConfig<F : Float> {
@@ -111,10 +113,20 @@
     }
 }
 
-/// Helper struct for pre-initialising the finite-dimensional subproblems solver
-/// [`prepare_optimise_weights`].
-///
-/// The pre-initialisation is done by [`prepare_optimise_weights`].
+pub trait FindimQuadraticModel<Domain, F> : ForwardModel<DiscreteMeasure<Domain, F>, F>
+where
+    F : Float + ToNalgebraRealField,
+    Domain : Clone + PartialEq,
+{
+    /// Return A_*A and A_* b
+    fn findim_quadratic_model(
+        &self,
+        μ : &DiscreteMeasure<Domain, F>,
+        b : &Self::Observable
+    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>);
+}
+
+/// Helper struct for pre-initialising the finite-dimensional subproblem solver.
 pub struct FindimData<F : Float> {
     /// ‖A‖^2
     opAnorm_squared : F,
@@ -125,7 +137,7 @@
 /// Trait for finite dimensional weight optimisation.
 pub trait WeightOptim<
     F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
+    A : ForwardModel<RNDM<F, N>, F>,
     I : AlgIteratorFactory<F>,
     const N : usize
 > {
@@ -154,7 +166,7 @@
     /// Returns the number of iterations taken by the method configured in `inner`.
     fn optimise_weights<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         opA : &'a A,
         b : &A::Observable,
         findim_data : &FindimData<F>,
@@ -166,12 +178,12 @@
 /// Trait for regularisation terms supported by [`pointsource_fw_reg`].
 pub trait RegTermFW<
     F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
+    A : ForwardModel<RNDM<F, N>, F>,
     I : AlgIteratorFactory<F>,
     const N : usize
 > : RegTerm<F, N>
     + WeightOptim<F, A, I, N>
-    + for<'a> Apply<&'a DiscreteMeasure<Loc<F, N>, F>, Output = F> {
+    + Mapping<RNDM<F, N>, Codomain = F> {
 
     /// With $g = A\_\*(Aμ-b)$, returns $(x, g(x))$ for $x$ a new point to be inserted
     /// into $μ$, as determined by the regulariser.
@@ -188,7 +200,7 @@
     /// Insert point `ξ` into `μ` for the relaxed algorithm from Bredies–Pikkarainen.
     fn relaxed_insert<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         g : &A::PreadjointCodomain,
         opA : &'a A,
         ξ : Loc<F, N>,
@@ -201,18 +213,18 @@
 impl<F : Float + ToNalgebraRealField, A, I, const N : usize> WeightOptim<F, A, I, N>
 for RadonRegTerm<F>
 where I : AlgIteratorFactory<F>,
-      A : ForwardModel<Loc<F, N>, F> {
+      A : FindimQuadraticModel<Loc<F, N>, F>  {
 
     fn prepare_optimise_weights(&self, opA : &A, b : &A::Observable) -> FindimData<F> {
         FindimData{
-            opAnorm_squared : opA.opnorm_bound().powi(2),
+            opAnorm_squared : opA.opnorm_bound(Radon, L2).powi(2),
             m0 : b.norm2_squared() / (2.0 * self.α()),
         }
     }
 
     fn optimise_weights<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         opA : &'a A,
         b : &A::Observable,
         findim_data : &FindimData<F>,
@@ -245,12 +257,19 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float + ToNalgebraRealField, A, I, S, GA, BTA, const N : usize> RegTermFW<F, A, I, N>
 for RadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F>,
-      I : AlgIteratorFactory<F>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>> {
+where
+    Cube<F, N> : P2Minimise<Loc<F, N>, F>,
+    I : AlgIteratorFactory<F>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    A : FindimQuadraticModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    // FIXME: the following *should not* be needed, they are already implied
+    RNDM<F, N> : Mapping<A::PreadjointCodomain, Codomain = F>,
+    DeltaMeasure<Loc<F, N>, F> : Mapping<A::PreadjointCodomain, Codomain = F>,
+    //A : Mapping<RNDM<F, N>, Codomain = A::Observable>,
+    //A : Mapping<DeltaMeasure<Loc<F, N>, F>, Codomain = A::Observable>,
+{
 
     fn find_insertion(
         &self,
@@ -269,7 +288,7 @@
 
     fn relaxed_insert<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         g : &A::PreadjointCodomain,
         opA : &'a A,
         ξ : Loc<F, N>,
@@ -282,7 +301,7 @@
         let v = if v_ξ.abs() <= α { 0.0 } else { m0 / α * v_ξ };
         let δ = DeltaMeasure { x : ξ, α : v };
         let dp = μ.apply(g) - δ.apply(g);
-        let d = opA.apply(&*μ) - opA.apply(&δ);
+        let d = opA.apply(&*μ) - opA.apply(δ);
         let r = d.norm2_squared();
         let s = if r == 0.0 {
             1.0
@@ -298,18 +317,18 @@
 impl<F : Float + ToNalgebraRealField, A, I, const N : usize> WeightOptim<F, A, I, N>
 for NonnegRadonRegTerm<F>
 where I : AlgIteratorFactory<F>,
-      A : ForwardModel<Loc<F, N>, F> {
+      A : FindimQuadraticModel<Loc<F, N>, F> {
 
     fn prepare_optimise_weights(&self, opA : &A, b : &A::Observable) -> FindimData<F> {
         FindimData{
-            opAnorm_squared : opA.opnorm_bound().powi(2),
+            opAnorm_squared : opA.opnorm_bound(Radon, L2).powi(2),
             m0 : b.norm2_squared() / (2.0 * self.α()),
         }
     }
 
     fn optimise_weights<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         opA : &'a A,
         b : &A::Observable,
         findim_data : &FindimData<F>,
@@ -342,12 +361,17 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float + ToNalgebraRealField, A, I, S, GA, BTA, const N : usize> RegTermFW<F, A, I, N>
 for NonnegRadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F>,
-      I : AlgIteratorFactory<F>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>> {
+where
+    Cube<F, N> : P2Minimise<Loc<F, N>, F>,
+    I : AlgIteratorFactory<F>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    A : FindimQuadraticModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    // FIXME: the following *should not* be needed, they are already implied
+    RNDM<F, N> : Mapping<A::PreadjointCodomain, Codomain = F>,
+    DeltaMeasure<Loc<F, N>, F> : Mapping<A::PreadjointCodomain, Codomain = F>,
+{
 
     fn find_insertion(
         &self,
@@ -361,7 +385,7 @@
 
     fn relaxed_insert<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         g : &A::PreadjointCodomain,
         opA : &'a A,
         ξ : Loc<F, N>,
@@ -409,20 +433,18 @@
     config : &FWConfig<F>,
     iterator : I,
     mut plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
       for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       Cube<F, N>: P2Minimise<Loc<F, N>, F>,
       PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : RegTermFW<F, A, ValueIteratorFactory<F, AlgIteratorOptions>, N> {
 
     // Set up parameters
@@ -438,26 +460,24 @@
     let mut μ = DiscreteMeasure::new();
     let mut residual = -b;
 
-    let mut inner_iters = 0;
-    let mut this_iters = 0;
-    let mut pruned = 0;
-    let mut merged = 0;
+    // Statistics
+    let full_stats = |residual : &A::Observable,
+                      ν : &RNDM<F, N>,
+                      ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + reg.apply(ν),
+        n_spikes : ν.len(),
+        ε,
+        .. stats
+    };
+    let mut stats = IterInfo::new();
 
     // Run the algorithm
-    iterator.iterate(|state| {
-        // Update tolerance
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
 
         // Calculate smooth part of surrogate model.
-        //
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let mut g = -preadjA.apply(r);
+        let mut g = preadjA.apply(residual * (-1.0));
 
         // Find absolute value maximising point
         let (ξ, v_ξ) = reg.find_insertion(&mut g, refinement_tolerance,
@@ -467,60 +487,46 @@
             FWVariant::FullyCorrective => {
                 // No point in optimising the weight here: the finite-dimensional algorithm is fast.
                 μ += DeltaMeasure { x : ξ, α : 0.0 };
+                stats.inserted += 1;
                 config.inner.iterator_options.stop_target(inner_tolerance)
             },
             FWVariant::Relaxed => {
                 // Perform a relaxed initialisation of μ
                 reg.relaxed_insert(&mut μ, &g, opA, ξ, v_ξ, &findim_data);
+                stats.inserted += 1;
                 // The stop_target is only needed for the type system.
                 AlgIteratorOptions{ max_iter : 1, .. config.inner.iterator_options}.stop_target(0.0)
             }
         };
 
-        inner_iters += reg.optimise_weights(&mut μ, opA, b, &findim_data, &config.inner, inner_it);
+        stats.inner_iters += reg.optimise_weights(&mut μ, opA, b, &findim_data,
+                                                  &config.inner, inner_it);
    
         // Merge spikes and update residual for next step and `if_verbose` below.
         let (r, count) = μ.merge_spikes_fitness(config.merging,
                                                 |μ̃| opA.apply(μ̃) - b,
                                                 A::Observable::norm2_squared);
         residual = r;
-        merged += count;
-
+        stats.merged += count;
 
         // Prune points with zero mass
         let n_before_prune = μ.len();
         μ.prune();
         debug_assert!(μ.len() <= n_before_prune);
-        pruned += n_before_prune - μ.len();
+        stats.pruned += n_before_prune - μ.len();
 
-        this_iters +=1;
+        stats.this_iters += 1;
+        let iter = state.iteration();
 
-        // Give function value if needed
+        // Give statistics if needed
         state.if_verbose(|| {
-            plotter.plot_spikes(
-                format!("iter {} start", state.iteration()), &g,
-                "".to_string(), None::<&A::PreadjointCodomain>,
-                None, &μ
-            );
-            let res = IterInfo {
-                value : residual.norm2_squared_div2() + reg.apply(&μ),
-                n_spikes : μ.len(),
-                inner_iters,
-                this_iters,
-                merged,
-                pruned,
-                ε : ε_prev,
-                postprocessing : None,
-                untransported_fraction : None,
-                transport_error : None,
-            };
-            inner_iters = 0;
-            this_iters = 0;
-            merged = 0;
-            pruned = 0;
-            res
-        })
-    });
+            plotter.plot_spikes(iter, Some(&g), Option::<&S>::None, &μ);
+            full_stats(&residual, &μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update tolerance
+        ε = tolerance.update(ε, iter);
+    }
 
     // Return final iterate
     μ
--- a/src/kernels.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/kernels.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -24,4 +24,7 @@
 pub use ball_indicator::*;
 mod hat_convolution;
 pub use hat_convolution::*;
+mod linear;
+pub use linear::*;
 
+
--- a/src/kernels/ball_indicator.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/kernels/ball_indicator.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -1,6 +1,6 @@
 
 //! Implementation of the indicator function of a ball with respect to various norms.
-use float_extras::f64::{tgamma as gamma};
+use float_extras::f64::tgamma as gamma;
 use numeric_literals::replace_float_literals;
 use serde::Serialize;
 use alg_tools::types::*;
@@ -14,10 +14,16 @@
     LocalAnalysis,
     GlobalAnalysis,
 };
-use alg_tools::mapping::Apply;
+use alg_tools::mapping::{
+    Mapping,
+    Differential,
+    DifferentiableImpl,
+};
+use alg_tools::instance::Instance;
+use alg_tools::euclidean::StaticEuclidean;
 use alg_tools::maputil::array_init;
 use alg_tools::coefficients::factorial;
-
+use crate::types::*;
 use super::base::*;
 
 /// Representation of the indicator of the ball $𝔹_q = \\{ x ∈ ℝ^N \mid \\|x\\|\_q ≤ r \\}$,
@@ -36,14 +42,17 @@
 
 #[replace_float_literals(C::Type::cast_from(literal))]
 impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
-Apply<&'a Loc<C::Type, N>>
+Mapping<Loc<C::Type, N>>
 for BallIndicator<C, Exponent, N>
-where Loc<F, N> : Norm<F, Exponent> {
-    type Output = C::Type;
+where
+    Loc<F, N> : Norm<F, Exponent>
+{
+    type Codomain = C::Type;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<C::Type, N>>>(&self, x : I) -> Self::Codomain {
         let r = self.r.value();
-        let n = x.norm(self.exponent);
+        let n = x.eval(|x| x.norm(self.exponent));
         if n <= r {
             1.0
         } else {
@@ -52,14 +61,79 @@
     }
 }
 
+impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+DifferentiableImpl<Loc<C::Type, N>>
+for BallIndicator<C, Exponent, N>
+where
+    C : Constant,
+     Loc<F, N> : Norm<F, Exponent>
+{
+    type Derivative = Loc<C::Type, N>;
+
+    #[inline]
+    fn differential_impl<I : Instance<Loc<C::Type, N>>>(&self, _x : I) -> Self::Derivative {
+        Self::Derivative::origin()
+    }
+}
+
 impl<F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
-Apply<Loc<C::Type, N>>
+Lipschitz<L2>
 for BallIndicator<C, Exponent, N>
-where Loc<F, N> : Norm<F, Exponent> {
-    type Output = C::Type;
-    #[inline]
-    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
-        self.apply(&x)
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        None
+    }
+}
+
+impl<'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Lipschitz<L2>
+for Differential<'b, Loc<F, N>, BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        None
+    }
+}
+
+impl<'a, 'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Lipschitz<L2>
+for Differential<'b, Loc<F, N>, &'a BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        None
+    }
+}
+
+
+impl<'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+NormBounded<L2>
+for Differential<'b, Loc<F, N>, BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn norm_bound(&self, _l2 : L2) -> C::Type {
+        F::INFINITY
+    }
+}
+
+impl<'a, 'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+NormBounded<L2>
+for Differential<'b, Loc<F, N>, &'a BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn norm_bound(&self, _l2 : L2) -> C::Type {
+        F::INFINITY
     }
 }
 
@@ -188,32 +262,21 @@
 
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, F : Float, R, const N : usize> Mapping<Loc<F, N>>
 for AutoConvolution<CubeIndicator<R, N>>
 where R : Constant<Type=F> {
-    type Output = F;
+    type Codomain = F;
 
     #[inline]
-    fn apply(&self, y : &'a Loc<F, N>) -> F {
+    fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F {
         let two_r = 2.0 * self.0.r.value();
         // This is just a product of one-dimensional versions
-        y.iter().map(|&x| {
+        y.cow().iter().map(|&x| {
             0.0.max(two_r - x.abs())
         }).product()
     }
 }
 
-impl<F : Float, R, const N : usize> Apply<Loc<F, N>>
-for AutoConvolution<CubeIndicator<R, N>>
-where R : Constant<Type=F> {
-    type Output = F;
-
-    #[inline]
-    fn apply(&self, y : Loc<F, N>) -> F {
-        self.apply(&y)
-    }
-}
-
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float, R, const N : usize> Support<F, N>
 for AutoConvolution<CubeIndicator<R, N>>
--- a/src/kernels/base.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/kernels/base.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -14,16 +14,22 @@
     GlobalAnalysis,
     Bounded,
 };
-use alg_tools::mapping::{Apply, Differentiable};
+use alg_tools::mapping::{
+    Mapping,
+    DifferentiableImpl,
+    DifferentiableMapping,
+    Differential,
+};
+use alg_tools::instance::{Instance, Space};
 use alg_tools::maputil::{array_init, map2, map1_indexed};
 use alg_tools::sets::SetOrd;
 
 use crate::fourier::Fourier;
-use crate::types::Lipschitz;
+use crate::types::*;
 
 /// Representation of the product of two kernels.
 ///
-/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernels typically implement [`Support`] and [`Mapping`].
 ///
 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
 #[derive(Copy,Clone,Serialize,Debug)]
@@ -34,59 +40,94 @@
     pub B
 );
 
-impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
+impl<A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
 for SupportProductFirst<A, B>
-where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
-      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
+where
+    A : Mapping<Loc<F, N>, Codomain = F>,
+    B : Mapping<Loc<F, N>, Codomain = F>,
+{
+    type Codomain = F;
+
     #[inline]
-    fn apply(&self, x : Loc<F, N>) -> Self::Output {
-        self.0.apply(&x) * self.1.apply(&x)
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
+        self.0.apply(x.ref_instance()) * self.1.apply(x)
     }
 }
 
-impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+impl<A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
 for SupportProductFirst<A, B>
-where A : Apply<&'a Loc<F, N>, Output=F>,
-      B : Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
+where
+    A : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain=Loc<F, N>,
+        Codomain = F
+    >,
+    B : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain=Loc<F, N>,
+        Codomain = F,
+    >
+{
+    type Derivative = Loc<F, N>;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
-        self.0.apply(x) * self.1.apply(x)
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
+        let xr = x.ref_instance();
+        self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(x)
     }
 }
 
-impl<A, B, F : Float, const N : usize> Differentiable<Loc<F, N>>
+impl<A, B, M : Copy, F : Float> Lipschitz<M>
 for SupportProductFirst<A, B>
-where A : for<'a> Apply<&'a Loc<F, N>, Output=F>
-          + for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>,
-      B : for<'a> Apply<&'a Loc<F, N>, Output=F>
-          + for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> {
-    type Output = Loc<F, N>;
+where A : Lipschitz<M, FloatType = F> + Bounded<F>,
+      B : Lipschitz<M, FloatType = F> + Bounded<F> {
+    type FloatType = F;
     #[inline]
-    fn differential(&self, x : Loc<F, N>) -> Self::Output {
-        self.0.differential(&x) * self.1.apply(&x) + self.1.differential(&x) * self.0.apply(&x)
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        // f(x)g(x) - f(y)g(y) = f(x)[g(x)-g(y)] - [f(y)-f(x)]g(y)
+        let &SupportProductFirst(ref f, ref g) = self;
+        f.lipschitz_factor(m).map(|l| l * g.bounds().uniform())
+         .zip(g.lipschitz_factor(m).map(|l| l * f.bounds().uniform()))
+         .map(|(a, b)| a + b)
     }
 }
 
-impl<'a, A, B, F : Float, const N : usize> Differentiable<&'a Loc<F, N>>
-for SupportProductFirst<A, B>
-where A : Apply<&'a Loc<F, N>, Output=F>
-          + Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>,
-      B : Apply<&'a Loc<F, N>, Output=F>
-          + Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> {
-    type Output = Loc<F, N>;
+impl<'a, A, B, M : Copy, Domain, F : Float> Lipschitz<M>
+for Differential<'a, Domain, SupportProductFirst<A, B>>
+where
+    Domain : Space,
+    A : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
+    B : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
+    SupportProductFirst<A, B> :  DifferentiableMapping<Domain>,
+    for<'b> A::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>,
+    for<'b> B::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>
+{
+    type FloatType = F;
     #[inline]
-    fn differential(&self, x : &'a Loc<F, N>) -> Self::Output {
-        self.0.differential(&x) * self.1.apply(&x) + self.1.differential(&x) * self.0.apply(&x)
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        // ∇[gf] = f∇g + g∇f
+        // ⟹ ∇[gf](x) - ∇[gf](y) = f(x)∇g(x) + g(x)∇f(x) - f(y)∇g(y) + g(y)∇f(y)
+        //                        = f(x)[∇g(x)-∇g(y)] + g(x)∇f(x) - [f(y)-f(x)]∇g(y) + g(y)∇f(y)
+        //                        = f(x)[∇g(x)-∇g(y)] + g(x)[∇f(x)-∇f(y)]
+        //                          - [f(y)-f(x)]∇g(y) + [g(y)-g(x)]∇f(y)
+        let &SupportProductFirst(ref f, ref g) = self.base_fn();
+        let (df, dg) = (f.diff_ref(), g.diff_ref());
+        [
+            df.lipschitz_factor(m).map(|l| l * g.bounds().uniform()),
+            dg.lipschitz_factor(m).map(|l| l * f.bounds().uniform()),
+            f.lipschitz_factor(m).map(|l| l * dg.norm_bound(L2)),
+            g.lipschitz_factor(m).map(|l| l * df.norm_bound(L2))
+        ].into_iter().sum()
     }
 }
 
 
 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
 for SupportProductFirst<A, B>
-where A : Support<F, N>,
-      B : Support<F, N> {
+where
+    A : Support<F, N>,
+    B : Support<F, N>
+{
     #[inline]
     fn support_hint(&self) -> Cube<F, N> {
         self.0.support_hint()
@@ -125,7 +166,7 @@
 
 /// Representation of the sum of two kernels
 ///
-/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernels typically implement [`Support`] and [`Mapping`].
 ///
 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
 #[derive(Copy,Clone,Serialize,Debug)]
@@ -136,55 +177,48 @@
     pub B
 );
 
-impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
 for SupportSum<A, B>
-where A : Apply<&'a Loc<F, N>, Output=F>,
-      B : Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
+where
+    A : Mapping<Loc<F, N>, Codomain = F>,
+    B : Mapping<Loc<F, N>, Codomain = F>,
+{
+    type Codomain = F;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
-        self.0.apply(x) + self.1.apply(x)
-    }
-}
-
-impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
-for SupportSum<A, B>
-where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
-      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
-    #[inline]
-    fn apply(&self, x : Loc<F, N>) -> Self::Output {
-        self.0.apply(&x) + self.1.apply(&x)
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
+        self.0.apply(x.ref_instance()) + self.1.apply(x)
     }
 }
 
-impl<'a, A, B, F : Float, const N : usize> Differentiable<&'a Loc<F, N>>
+impl<'a, A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
 for SupportSum<A, B>
-where A : Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>,
-      B : Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> {
-    type Output = Loc<F, N>;
+where
+    A : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain = Loc<F, N>
+    >,
+    B : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain = Loc<F, N>,
+    >
+{
+
+    type Derivative = Loc<F, N>;
+
     #[inline]
-    fn differential(&self, x : &'a Loc<F, N>) -> Self::Output {
-        self.0.differential(x) + self.1.differential(x)
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
+        self.0.differential(x.ref_instance()) + self.1.differential(x)
     }
 }
 
-impl<A, B, F : Float, const N : usize> Differentiable<Loc<F, N>>
-for SupportSum<A, B>
-where A : for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>>,
-      B : for<'a> Differentiable<&'a Loc<F, N>, Output=Loc<F, N>> {
-    type Output = Loc<F, N>;
-    #[inline]
-    fn differential(&self, x : Loc<F, N>) -> Self::Output {
-        self.0.differential(&x) + self.1.differential(&x)
-    }
-}
 
 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
 for SupportSum<A, B>
 where A : Support<F, N>,
       B : Support<F, N>,
       Cube<F, N> : SetOrd {
+
     #[inline]
     fn support_hint(&self) -> Cube<F, N> {
         self.0.support_hint().common(&self.1.support_hint())
@@ -237,10 +271,29 @@
     }
 }
 
+impl<'b, F : Float, M : Copy, A, B, Domain> Lipschitz<M>
+for Differential<'b, Domain, SupportSum<A, B>>
+where
+    Domain : Space,
+    A : Clone + DifferentiableMapping<Domain, Codomain=F>,
+    B : Clone + DifferentiableMapping<Domain, Codomain=F>,
+    SupportSum<A, B> : DifferentiableMapping<Domain, Codomain=F>,
+    for<'a> A :: Differential<'a> : Lipschitz<M, FloatType = F>,
+    for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
+{
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        let base = self.base_fn();
+        base.0.diff_ref().lipschitz_factor(m)
+            .zip(base.1.diff_ref().lipschitz_factor(m))
+            .map(|(a, b)| a + b)
+    }
+}
 
 /// Representation of the convolution of two kernels.
 ///
-/// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernels typically implement [`Support`]s and [`Mapping`].
 //
 /// Trait implementations have to be on a case-by-case basis.
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
@@ -252,18 +305,45 @@
 );
 
 impl<F : Float, M, A, B> Lipschitz<M> for Convolution<A, B>
-where A : Bounded<F> ,
+where A : Norm<F, L1> ,
       B : Lipschitz<M, FloatType = F> {
     type FloatType = F;
 
     fn lipschitz_factor(&self, m : M) -> Option<F> {
-        self.1.lipschitz_factor(m).map(|l| l * self.0.bounds().uniform())
+        // For [f * g](x) = ∫ f(x-y)g(y) dy we have
+        // [f * g](x) - [f * g](z) = ∫ [f(x-y)-f(z-y)]g(y) dy.
+        // Hence |[f * g](x) - [f * g](z)| ≤ ∫ |f(x-y)-f(z-y)|g(y)| dy.
+        //                                 ≤ L|x-z| ∫ |g(y)| dy,
+        // where L is the Lipschitz factor of f.
+        self.1.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
+    }
+}
+
+impl<'b, F : Float, M, A, B, Domain> Lipschitz<M>
+for Differential<'b, Domain, Convolution<A, B>>
+where
+    Domain : Space,
+    A : Clone + Norm<F, L1> ,
+    Convolution<A, B> : DifferentiableMapping<Domain, Codomain=F>,
+    B : Clone + DifferentiableMapping<Domain, Codomain=F>,
+    for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
+{
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        // For [f * g](x) = ∫ f(x-y)g(y) dy we have
+        // ∇[f * g](x) - ∇[f * g](z) = ∫ [∇f(x-y)-∇f(z-y)]g(y) dy.
+        // Hence |∇[f * g](x) - ∇[f * g](z)| ≤ ∫ |∇f(x-y)-∇f(z-y)|g(y)| dy.
+        //                                 ≤ L|x-z| ∫ |g(y)| dy,
+        // where L is the Lipschitz factor of ∇f.
+        let base = self.base_fn();
+        base.1.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
     }
 }
 
 /// Representation of the autoconvolution of a kernel.
 ///
-/// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernel typically implements [`Support`] and [`Mapping`].
 ///
 /// Trait implementations have to be on a case-by-case basis.
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
@@ -273,11 +353,27 @@
 );
 
 impl<F : Float, M, C> Lipschitz<M> for AutoConvolution<C>
-where C : Lipschitz<M, FloatType = F> + Bounded<F> {
+where C : Lipschitz<M, FloatType = F> + Norm<F, L1> {
     type FloatType = F;
 
     fn lipschitz_factor(&self, m : M) -> Option<F> {
-        self.0.lipschitz_factor(m).map(|l| l * self.0.bounds().uniform())
+        self.0.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
+    }
+}
+
+impl<'b, F : Float, M, C, Domain> Lipschitz<M>
+for Differential<'b, Domain, AutoConvolution<C>>
+where
+    Domain : Space,
+    C : Clone + Norm<F, L1> + DifferentiableMapping<Domain, Codomain=F>,
+    AutoConvolution<C> : DifferentiableMapping<Domain, Codomain=F>,
+    for<'a> C :: Differential<'a> : Lipschitz<M, FloatType = F>
+{
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        let base = self.base_fn();
+        base.0.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
     }
 }
 
@@ -285,42 +381,47 @@
 /// Representation a multi-dimensional product of a one-dimensional kernel.
 ///
 /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
-/// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]
+/// The kernel $G$ typically implements [`Support`] and [`Mapping`]
 /// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`].
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+#[allow(dead_code)]
 struct UniformProduct<G, const N : usize>(
     /// The one-dimensional kernel
     G
 );
 
-impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, G, F : Float, const N : usize> Mapping<Loc<F, N>>
 for UniformProduct<G, N>
-where G : Apply<Loc<F, 1>, Output=F> {
-    type Output = F;
+where
+    G : Mapping<Loc<F, 1>, Codomain = F>
+{
+    type Codomain = F;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<F, N>) -> F {
-        x.iter().map(|&y| self.0.apply(Loc([y]))).product()
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> F {
+        x.cow().iter().map(|&y| self.0.apply(Loc([y]))).product()
     }
 }
 
-impl<G, F : Float, const N : usize> Apply<Loc<F, N>>
+
+
+impl<'a, G, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
 for UniformProduct<G, N>
-where G : Apply<Loc<F, 1>, Output=F> {
-    type Output = F;
+where
+    G : DifferentiableMapping<
+        Loc<F, 1>,
+        DerivativeDomain = F,
+        Codomain = F,
+    >
+{
+    type Derivative = Loc<F, N>;
+
     #[inline]
-    fn apply(&self, x : Loc<F, N>) -> F {
-        x.into_iter().map(|y| self.0.apply(Loc([y]))).product()
-    }
-}
-
-impl<'a, G, F : Float, const N : usize> Differentiable<&'a Loc<F, N>>
-for UniformProduct<G, N>
-where G : Apply<Loc<F, 1>, Output=F> + Differentiable<Loc<F, 1>, Output=F> {
-    type Output = Loc<F, N>;
-    #[inline]
-    fn differential(&self, x : &'a Loc<F, N>) -> Loc<F, N> {
-        let vs = x.map(|y| self.0.apply(Loc([y])));
-        product_differential(x, &vs, |y| self.0.differential(Loc([y])))
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x0 : I) -> Loc<F, N> {
+        x0.eval(|x| {
+            let vs = x.map(|y| self.0.apply(Loc([y])));
+            product_differential(x, &vs, |y| self.0.differential(Loc([y])))
+        })
     }
 }
 
@@ -342,13 +443,39 @@
     }).into()
 }
 
-impl<G, F : Float, const N : usize> Differentiable<Loc<F, N>>
-for UniformProduct<G, N>
-where G : Apply<Loc<F, 1>, Output=F> + Differentiable<Loc<F, 1>, Output=F> {
-    type Output = Loc<F, N>;
-    #[inline]
-    fn differential(&self, x : Loc<F, N>) -> Loc<F, N> {
-        self.differential(&x)
+/// Helper function to calulate the Lipschitz factor of $∇f$ for $f(x)=∏_{i=1}^N g(x_i)$.
+///
+/// The parameter `bound` is a bound on $|g|_∞$, `lip` is a Lipschitz factor for $g$,
+/// `dbound` is a bound on $|∇g|_∞$, and `dlip` a Lipschitz factor for $∇g$.
+#[inline]
+pub(crate) fn product_differential_lipschitz_factor<F : Float, const N : usize>(
+    bound : F,
+    lip : F,
+    dbound : F,
+    dlip : F
+) -> F {
+    // For arbitrary ψ(x) = ∏_{i=1}^n ψ_i(x_i), we have
+    // ψ(x) - ψ(y) = ∑_i [ψ_i(x_i)-ψ_i(y_i)] ∏_{j ≠ i} ψ_j(x_j)
+    // by a simple recursive argument. In particular, if ψ_i=g for all i, j, we have
+    // |ψ(x) - ψ(y)| ≤ ∑_i L_g M_g^{n-1}|x-y|, where L_g is the Lipschitz factor of g, and
+    // M_g a bound on it.
+    //
+    // We also have in the general case ∇ψ(x) = ∑_i ∇ψ_i(x_i) ∏_{j ≠ i} ψ_j(x_j), whence
+    // using the previous formula for each i with f_i=∇ψ_i and f_j=ψ_j for j ≠ i, we get
+    //  ∇ψ(x) - ∇ψ(y) = ∑_i[ ∇ψ_i(x_i)∏_{j ≠ i} ψ_j(x_j) - ∇ψ_i(y_i)∏_{j ≠ i} ψ_j(y_j)]
+    //                = ∑_i[ [∇ψ_i(x_i) - ∇ψ_j(x_j)] ∏_{j ≠ i}ψ_j(x_j)
+    //                       + [∑_{k ≠ i} [ψ_k(x_k) - ∇ψ_k(x_k)] ∏_{j ≠ i, k}ψ_j(x_j)]∇ψ_i(x_i)].
+    // With $ψ_i=g for all i, j, it follows that
+    // |∇ψ(x) - ∇ψ(y)| ≤ ∑_i L_{∇g} M_g^{n-1} + ∑_{k ≠ i} L_g M_g^{n-2} M_{∇g}
+    //                 = n [L_{∇g} M_g^{n-1} + (n-1) L_g M_g^{n-2} M_{∇g}].
+    //                 = n M_g^{n-2}[L_{∇g} M_g + (n-1) L_g M_{∇g}].
+    if N >= 2 {
+        F::cast_from(N) * bound.powi((N-2) as i32)
+                        * (dlip * bound  + F::cast_from(N-1) * lip * dbound)
+    } else if N==1 {
+        dlip
+    } else {
+        panic!("Invalid dimension")
     }
 }
 
--- a/src/kernels/gaussian.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/kernels/gaussian.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -17,10 +17,15 @@
     Weighted,
     Bounded,
 };
-use alg_tools::mapping::{Apply, Differentiable};
+use alg_tools::mapping::{
+    Mapping,
+    Instance,
+    Differential,
+    DifferentiableImpl,
+};
 use alg_tools::maputil::array_init;
 
-use crate::types::Lipschitz;
+use crate::types::*;
 use crate::fourier::Fourier;
 use super::base::*;
 use super::ball_indicator::CubeIndicator;
@@ -59,63 +64,108 @@
 
 
 #[replace_float_literals(S::Type::cast_from(literal))]
-impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for Gaussian<S, N>
-where S : Constant {
-    type Output = S::Type;
+impl<'a, S, const N : usize> Mapping<Loc<S::Type, N>> for Gaussian<S, N>
+where
+    S : Constant
+{
+    type Codomain = S::Type;
+
     // This is not normalised to neither to have value 1 at zero or integral 1
     // (unless the cut-off ε=0).
     #[inline]
-    fn apply(&self, x : &'a Loc<S::Type, N>) -> Self::Output {
-        let d_squared = x.norm2_squared();
+    fn apply<I : Instance<Loc<S::Type, N>>>(&self, x : I) -> Self::Codomain {
+        let d_squared = x.eval(|x| x.norm2_squared());
         let σ2 = self.variance.value();
         let scale = self.scale();
         (-d_squared / (2.0 * σ2)).exp() / scale
     }
 }
 
-impl<S, const N : usize> Apply<Loc<S::Type, N>> for Gaussian<S, N>
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, S, const N : usize> DifferentiableImpl<Loc<S::Type, N>> for Gaussian<S, N>
 where S : Constant {
-    type Output = S::Type;
+    type Derivative = Loc<S::Type, N>;
+
     #[inline]
-    fn apply(&self, x : Loc<S::Type, N>) -> Self::Output {
-        self.apply(&x)
+    fn differential_impl<I : Instance<Loc<S::Type, N>>>(&self, x0 : I) -> Self::Derivative {
+        let x = x0.cow();
+        let f = -self.apply(&*x) / self.variance.value();
+        *x * f
     }
 }
 
-#[replace_float_literals(S::Type::cast_from(literal))]
-impl<'a, S, const N : usize> Differentiable<&'a Loc<S::Type, N>> for Gaussian<S, N>
-where S : Constant {
-    type Output = Loc<S::Type, N>;
-    #[inline]
-    fn differential(&self, x : &'a Loc<S::Type, N>) -> Self::Output {
-        x * (self.apply(x) / self.variance.value())
-    }
-}
 
-impl<S, const N : usize> Differentiable<Loc<S::Type, N>> for Gaussian<S, N>
-where S : Constant {
-    type Output = Loc<S::Type, N>;
-    // This is not normalised to neither to have value 1 at zero or integral 1
-    // (unless the cut-off ε=0).
-    #[inline]
-    fn differential(&self, x : Loc<S::Type, N>) -> Self::Output {
-        x * (self.apply(&x) / self.variance.value())
-    }
-}
+// To calculate the the Lipschitz factors, we consider
+// f(t)    = e^{-t²/2}
+// f'(t)   = -t f(t)       which has max at t=1 by f''(t)=0
+// f''(t)  = (t²-1)f(t)    which has max at t=√3 by f'''(t)=0
+// f'''(t) = -(t³-3t)
+// So f has the Lipschitz factor L=f'(1), and f' has the Lipschitz factor L'=f''(√3).
+//
+// Now g(x) = Cf(‖x‖/σ) for a scaling factor C is the Gaussian.
+// Thus ‖g(x)-g(y)‖ = C‖f(‖x‖/σ)-f(‖y‖/σ)‖ ≤ (C/σ)L‖x-y‖,
+// so g has the Lipschitz factor (C/σ)f'(1) = (C/σ)exp(-0.5).
+//
+// Also ∇g(x)= Cx/(σ‖x‖)f'(‖x‖/σ)       (*)
+//            = -(C/σ²)xf(‖x‖/σ)
+//            = -C/σ (x/σ) f(‖x/σ‖)
+// ∇²g(x) = -(C/σ)[Id/σ f(‖x‖/σ) + x ⊗ x/(σ²‖x‖) f'(‖x‖/σ)]
+//        = (C/σ²)[-Id + x ⊗ x/σ²]f(‖x‖/σ).
+// Thus ‖∇²g(x)‖ = (C/σ²)‖-Id + x ⊗ x/σ²‖f(‖x‖/σ), where
+// ‖-Id + x ⊗ x/σ²‖ = ‖[-Id + x ⊗ x/σ²](x/‖x‖)‖ = |-1 + ‖x²/σ^2‖|.
+// This means that  ‖∇²g(x)‖ = (C/σ²)|f''(‖x‖/σ)|, which is maximised with ‖x‖/σ=√3.
+// Hence the Lipschitz factor of ∇g is (C/σ²)f''(√3) = (C/σ²)2e^{-3/2}.
 
 #[replace_float_literals(S::Type::cast_from(literal))]
 impl<S, const N : usize> Lipschitz<L2> for Gaussian<S, N>
 where S : Constant {
     type FloatType = S::Type;
     fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> {
-        // f(x)=f_1(‖x‖_2/σ) * √(2π) / √(2πσ)^N, where f_1 is one-dimensional Gaussian with
-        // variance 1. The Lipschitz factor of f_1 is e^{-1/2}/√(2π), see, e.g.,
-        // https://math.stackexchange.com/questions/3630967/is-the-gaussian-density-lipschitz-continuous
-        // Thus the Lipschitz factor we want is e^{-1/2} / (√(2πσ)^N * σ).
         Some((-0.5).exp() / (self.scale() * self.variance.value().sqrt()))
     }
 }
 
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, S : Constant, const N : usize> Lipschitz<L2>
+for Differential<'a, Loc<S::Type, N>, Gaussian<S, N>> {
+    type FloatType = S::Type;
+    
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<S::Type> {
+        let g = self.base_fn();
+        let σ2 = g.variance.value();
+        let scale = g.scale();
+        Some(2.0*(-3.0/2.0).exp()/(σ2*scale))
+    }
+}
+
+// From above, norm bounds on the differnential can be calculated as achieved
+// for f' at t=1, i.e., the bound is |f'(1)|.
+// For g then |C/σ f'(1)|.
+// It follows that the norm bounds on the differential are just the Lipschitz
+// factors of the undifferentiated function, given how the latter is calculed above.
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'b, S : Constant, const N : usize> NormBounded<L2>
+for Differential<'b, Loc<S::Type, N>, Gaussian<S, N>> {
+    type FloatType = S::Type;
+    
+    fn norm_bound(&self, _l2 : L2) -> S::Type {
+        self.base_fn().lipschitz_factor(L2).unwrap()
+    }
+}
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'b, 'a, S : Constant, const N : usize> NormBounded<L2>
+for Differential<'b, Loc<S::Type, N>, &'a Gaussian<S, N>> {
+    type FloatType = S::Type;
+    
+    fn norm_bound(&self, _l2 : L2) -> S::Type {
+        self.base_fn().lipschitz_factor(L2).unwrap()
+    }
+}
+
+
 #[replace_float_literals(S::Type::cast_from(literal))]
 impl<'a, S, const N : usize> Gaussian<S, N>
 where S : Constant {
@@ -204,16 +254,16 @@
 /// This implements $g := χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$ where $a,b>0$ and $f$ is
 /// a gaussian kernel on $ℝ^n$. For an expression for $g$, see Lemma 3.9 in the manuscript.
 #[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, C, S, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, F : Float, R, C, S, const N : usize> Mapping<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F>,
       S : Constant<Type=F> {
 
-    type Output = F;
+    type Codomain = F;
 
     #[inline]
-    fn apply(&self, y : &'a Loc<F, N>) -> F {
+    fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F {
         let Convolution(ref ind,
                         SupportProductFirst(ref cut,
                                             ref gaussian)) = self;
@@ -224,7 +274,7 @@
         let c = 0.5; // 1/(σ√(2π) * σ√(π/2) = 1/2
         
         // This is just a product of one-dimensional versions
-        y.product_map(|x| {
+        y.cow().product_map(|x| {
             let c1 = -(a.min(b + x)); //(-a).max(-x-b);
             let c2 = a.min(b - x);
             if c1 >= c2 {
@@ -239,43 +289,31 @@
     }
 }
 
-impl<F : Float, R, C, S, const N : usize> Apply<Loc<F, N>>
+/// This implements the differential of $g := χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$ where $a,b>0$
+/// and $f$ is a gaussian kernel on $ℝ^n$. For an expression for the value of $g$, from which the
+/// derivative readily arises (at points of differentiability), see Lemma 3.9 in the manuscript.
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, S, const N : usize> DifferentiableImpl<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F>,
       S : Constant<Type=F> {
 
-    type Output = F;
-
-    #[inline]
-    fn apply(&self, y : Loc<F, N>) -> F {
-        self.apply(&y)
-    }
-}
+    type Derivative = Loc<F, N>;
 
-/// This implements the differential of $g := χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$ where $a,b>0$
-/// and $f$ is a gaussian kernel on $ℝ^n$. For an expression for the value of $g$, from which the
-/// derivative readily arises (at points of differentiability), see Lemma 3.9 in the manuscript.
-#[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, C, S, const N : usize> Differentiable<&'a Loc<F, N>>
-for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
-where R : Constant<Type=F>,
-      C : Constant<Type=F>,
-      S : Constant<Type=F> {
-
-    type Output = Loc<F, N>;
-
+    /// Although implemented, this function is not differentiable.
     #[inline]
-    fn differential(&self, y : &'a Loc<F, N>) -> Loc<F, N> {
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, y0 : I) -> Loc<F, N> {
         let Convolution(ref ind,
                         SupportProductFirst(ref cut,
                                             ref gaussian)) = self;
+        let y = y0.cow();
         let a = cut.r.value();
         let b = ind.r.value();
         let σ = gaussian.variance.value().sqrt();
         let t = F::SQRT_2 * σ;
         let c = 0.5; // 1/(σ√(2π) * σ√(π/2) = 1/2
-        let c_div_t = c / t;
+        let c_mul_erf_scale_div_t = c * F::FRAC_2_SQRT_PI / t;
         
         // Calculate the values for all component functions of the
         // product. This is just the loop from apply above.
@@ -292,35 +330,31 @@
             }
         });
         // This computes the gradient for each coordinate
-        product_differential(y, &unscaled_vs, |x| {
+        product_differential(&*y, &unscaled_vs, |x| {
             let c1 = -(a.min(b + x)); //(-a).max(-x-b);
             let c2 = a.min(b - x);
             if c1 >= c2 {
                 0.0
             } else {
-                // erf'(z) = (2/√π)*exp(-z^2), and we get extra factor -1/(√2*σ) = -1/t
-                // from the chain rule (the minus comes from inside c_1 or c_2).
-                let de1 = (-(c1/t).powi(2)).exp();
-                let de2 = (-(c2/t).powi(2)).exp();
-                c_div_t * (de1 - de2)
+                // erf'(z) = (2/√π)*exp(-z^2), and we get extra factor 1/(√2*σ) = -1/t
+                // from the chain rule (the minus comes from inside c_1 or c_2, and changes the
+                // order of de2 and de1 in the final calculation).
+                let de1 = if b + x < a {
+                    (-((b+x)/t).powi(2)).exp()
+                } else {
+                    0.0
+                };
+                let de2 = if b - x < a {
+                    (-((b-x)/t).powi(2)).exp()
+                } else {
+                    0.0
+                };
+                c_mul_erf_scale_div_t * (de1 - de2)
             }
         })
     }
 }
 
-impl<F : Float, R, C, S, const N : usize> Differentiable<Loc<F, N>>
-for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
-where R : Constant<Type=F>,
-      C : Constant<Type=F>,
-      S : Constant<Type=F> {
-
-    type Output = Loc<F, N>;
-
-    #[inline]
-    fn differential(&self, y : Loc<F, N>) -> Loc<F, N> {
-        self.differential(&y)
-    }
-}
 
 #[replace_float_literals(F::cast_from(literal))]
 impl<'a, F : Float, R, C, S, const N : usize> Lipschitz<L1>
@@ -378,6 +412,7 @@
     }
 }
 
+/*
 impl<'a, F : Float, R, C, S, const N : usize> Lipschitz<L2>
 for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
 where R : Constant<Type=F>,
@@ -389,6 +424,7 @@
         self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt())
     }
 }
+*/
 
 impl<F : Float, R, C, S, const N : usize>
 Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
--- a/src/kernels/hat.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/kernels/hat.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -14,8 +14,9 @@
     GlobalAnalysis,
     Bounded,
 };
-use alg_tools::mapping::Apply;
-use alg_tools::maputil::{array_init};
+use alg_tools::mapping::{Mapping, Instance};
+use alg_tools::maputil::array_init;
+use crate::types::Lipschitz;
 
 /// Representation of the hat function $f(x)=1-\\|x\\|\_1/ε$ of `width` $ε$ on $ℝ^N$.
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
@@ -25,26 +26,17 @@
 }
 
 #[replace_float_literals(C::Type::cast_from(literal))]
-impl<'a, C : Constant, const N : usize> Apply<&'a Loc<C::Type, N>> for Hat<C, N> {
-    type Output = C::Type;
+impl<'a, C : Constant, const N : usize> Mapping<Loc<C::Type, N>> for Hat<C, N> {
+    type Codomain = C::Type;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<C::Type, N>>>(&self, x : I) -> Self::Codomain {
         let ε = self.width.value();
-        0.0.max(1.0-x.norm(L1)/ε)
+        0.0.max(1.0-x.cow().norm(L1)/ε)
     }
 }
 
 #[replace_float_literals(C::Type::cast_from(literal))]
-impl<C : Constant, const N : usize> Apply<Loc<C::Type, N>> for Hat<C, N> {
-    type Output = C::Type;
-    #[inline]
-    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
-        self.apply(&x)
-    }
-}
-
-
-#[replace_float_literals(C::Type::cast_from(literal))]
 impl<'a, C : Constant, const N : usize> Support<C::Type, N> for Hat<C, N> {
     #[inline]
     fn support_hint(&self) -> Cube<C::Type,N> {
@@ -94,6 +86,26 @@
     }
 }
 
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Lipschitz<L1> for Hat<C, N> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l1 : L1) -> Option<C::Type> {
+        Some(1.0/self.width.value())
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Lipschitz<L2> for Hat<C, N> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        self.lipschitz_factor(L1).map(|l1|
+            <L2 as Dominated<C::Type, L1, Loc<C::Type,N>>>::from_norm(&L2, l1, L1)
+        )
+    }
+}
+
 impl<'a, C : Constant, const N : usize>
 LocalAnalysis<C::Type, Bounds<C::Type>, N>
 for Hat<C, N> {
--- a/src/kernels/hat_convolution.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/kernels/hat_convolution.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -14,7 +14,12 @@
     GlobalAnalysis,
     Bounded,
 };
-use alg_tools::mapping::{Apply, Differentiable};
+use alg_tools::mapping::{
+    Mapping,
+    Instance,
+    DifferentiableImpl,
+    Differential,
+};
 use alg_tools::maputil::array_init;
 
 use crate::types::Lipschitz;
@@ -39,6 +44,31 @@
 ///         -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\
 ///     \end{cases}
 /// $$
+// Hence
+// $$
+//     (h\*h)'(y) =
+//     \begin{cases}
+//         2 (y+1)^2 & -1<y\leq -\frac{1}{2}, \\\\
+//         -6 y^2-4 y & -\frac{1}{2}<y\leq 0, \\\\
+//         6 y^2-4 y & 0<y<\frac{1}{2}, \\\\
+//         -2 (y-1)^2 & \frac{1}{2}\leq y<1. \\\\
+//     \end{cases}
+// $$
+// as well as
+// $$
+//     (h\*h)''(y) =
+//     \begin{cases}
+//         4 (y+1) & -1<y\leq -\frac{1}{2}, \\\\
+//         -12 y-4 & -\frac{1}{2}<y\leq 0, \\\\
+//         12 y-4 & 0<y<\frac{1}{2}, \\\\
+//         -4 (y-1) & \frac{1}{2}\leq y<1. \\\\
+//     \end{cases}
+// $$
+// This is maximised at y=±1/2 with value 2, and minimised at y=0 with value -4.
+// Now observe that
+// $$
+//     [∇f(x\_1, …, x\_n)]_j = \frac{4}{σ} (h\*h)'(x\_j/σ) \prod\_{j ≠ i} \frac{4}{σ} (h\*h)(x\_i/σ)
+// $$
 #[derive(Copy,Clone,Debug,Serialize,Eq)]
 pub struct HatConv<S : Constant, const N : usize> {
     /// The parameter $σ$ of the kernel.
@@ -61,27 +91,19 @@
     }
 }
 
-impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for HatConv<S, N>
+impl<'a, S, const N : usize> Mapping<Loc<S::Type, N>> for HatConv<S, N>
 where S : Constant {
-    type Output = S::Type;
+    type Codomain = S::Type;
+
     #[inline]
-    fn apply(&self, y : &'a Loc<S::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<S::Type, N>>>(&self, y : I) -> Self::Codomain {
         let σ = self.radius();
-        y.product_map(|x| {
+        y.cow().product_map(|x| {
             self.value_1d_σ1(x  / σ) / σ
         })
     }
 }
 
-impl<'a, S, const N : usize> Apply<Loc<S::Type, N>> for HatConv<S, N>
-where S : Constant {
-    type Output = S::Type;
-    #[inline]
-    fn apply(&self, y : Loc<S::Type, N>) -> Self::Output {
-        self.apply(&y)
-    }
-}
-
 #[replace_float_literals(S::Type::cast_from(literal))]
 impl<S, const N : usize> Lipschitz<L1> for HatConv<S, N>
 where S : Constant {
@@ -95,7 +117,7 @@
         // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i)
         // Thus
         // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)|
-        // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_i |ψ_i|
+        // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_j |ψ_j|
         let σ = self.radius();
         let l1d = self.lipschitz_1d_σ1() / (σ*σ);
         let m1d = self.value_1d_σ1(0.0) / σ;
@@ -113,31 +135,45 @@
 }
 
 
-impl<'a, S, const N : usize> Differentiable<&'a Loc<S::Type, N>> for HatConv<S, N>
+impl<'a, S, const N : usize> DifferentiableImpl<Loc<S::Type, N>> for HatConv<S, N>
 where S : Constant {
-    type Output = Loc<S::Type, N>;
+    type Derivative = Loc<S::Type, N>;
+
     #[inline]
-    fn differential(&self, y : &'a Loc<S::Type, N>) -> Self::Output {
+    fn differential_impl<I : Instance<Loc<S::Type, N>>>(&self, y0 : I) -> Self::Derivative {
+        let y = y0.cow();
         let σ = self.radius();
         let σ2 = σ * σ;
         let vs = y.map(|x| {
             self.value_1d_σ1(x  / σ) / σ
         });
-        product_differential(y, &vs, |x| {
+        product_differential(&*y, &vs, |x| {
             self.diff_1d_σ1(x  / σ) / σ2
         })
     }
 }
 
-impl<'a, S, const N : usize> Differentiable<Loc<S::Type, N>> for HatConv<S, N>
-where S : Constant {
-    type Output = Loc<S::Type, N>;
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, F : Float, S, const N : usize> Lipschitz<L2>
+for Differential<'a, Loc<F, N>, HatConv<S, N>>
+where S : Constant<Type=F> {
+    type FloatType = F;
+
     #[inline]
-    fn differential(&self, y : Loc<S::Type, N>) -> Self::Output {
-        self.differential(&y)
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<F> {
+        let h = self.base_fn();
+        let σ = h.radius();
+        Some(product_differential_lipschitz_factor::<F, N>(
+            h.value_1d_σ1(0.0) / σ,
+            h.lipschitz_1d_σ1() / (σ*σ),
+            h.maxabsdiff_1d_σ1() / (σ*σ),
+            h.lipschitz_diff_1d_σ1() / (σ*σ),
+        ))
     }
 }
 
+
 #[replace_float_literals(S::Type::cast_from(literal))]
 impl<'a, F : Float, S, const N : usize> HatConv<S, N>
 where S : Constant<Type=F> {
@@ -173,6 +209,34 @@
         // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis
         2.0
     }
+
+    /// Computes the maximum absolute differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn maxabsdiff_1d_σ1(&self) -> F {
+        // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis
+        2.0
+    }
+
+    /// Computes the second differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    #[allow(dead_code)]
+    fn diff2_1d_σ1(&self, x : F) -> F {
+        let y = x.abs();
+        if y >= 1.0 {
+            0.0
+        } else if y > 0.5 {
+            - 16.0 * (y - 1.0)
+        } else /* 0 ≤ y ≤ 0.5 */ {
+            48.0 * y - 16.0
+        }
+    }
+
+    /// Computes the differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn lipschitz_diff_1d_σ1(&self) -> F {
+        // Maximal absolute second differential achieved at 0 by diff2_1d_σ1 analysis
+        16.0
+    }
 }
 
 impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N>
@@ -235,21 +299,21 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, C, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, F : Float, R, C, const N : usize> Mapping<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F> {
 
-    type Output = F;
+    type Codomain = F;
 
     #[inline]
-    fn apply(&self, y : &'a Loc<F, N>) -> F {
+    fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F {
         let Convolution(ref ind, ref hatconv) = self;
         let β = ind.r.value();
         let σ = hatconv.radius();
 
         // This is just a product of one-dimensional versions
-        y.product_map(|x| {
+        y.cow().product_map(|x| {
             // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution
             // we have
             // $$
@@ -264,29 +328,17 @@
     }
 }
 
-impl<'a, F : Float, R, C, const N : usize> Apply<Loc<F, N>>
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, const N : usize> DifferentiableImpl<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F> {
 
-    type Output = F;
+    type Derivative = Loc<F, N>;
 
     #[inline]
-    fn apply(&self, y : Loc<F, N>) -> F {
-        self.apply(&y)
-    }
-}
-
-#[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, C, const N : usize> Differentiable<&'a Loc<F, N>>
-for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
-where R : Constant<Type=F>,
-      C : Constant<Type=F> {
-
-    type Output = Loc<F, N>;
-
-    #[inline]
-    fn differential(&self, y : &'a Loc<F, N>) -> Loc<F, N> {
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, y0 : I) -> Loc<F, N> {
+        let y = y0.cow();
         let Convolution(ref ind, ref hatconv) = self;
         let β = ind.r.value();
         let σ = hatconv.radius();
@@ -295,24 +347,12 @@
         let vs = y.map(|x| {
             self.value_1d_σ1(x / σ, β / σ)
         });
-        product_differential(y, &vs, |x| {
+        product_differential(&*y, &vs, |x| {
             self.diff_1d_σ1(x  / σ, β / σ) / σ2
         })
     }
 }
 
-impl<'a, F : Float, R, C, const N : usize> Differentiable<Loc<F, N>>
-for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
-where R : Constant<Type=F>,
-      C : Constant<Type=F> {
-
-    type Output = Loc<F, N>;
-
-    #[inline]
-    fn differential(&self, y : Loc<F, N>) -> Loc<F, N> {
-        self.differential(&y)
-    }
-}
 
 /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
 /// If $b > d$, add $g()$ to the result.
@@ -415,6 +455,22 @@
     }
 }
 
+/*
+impl<'a, F : Float, R, C, const N : usize> Lipschitz<L2>
+for Differential<Loc<F, N>, Convolution<CubeIndicator<R, N>, HatConv<C, N>>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+
+    type FloatType = F;
+
+    #[inline]
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<F> {
+        dbg!("unimplemented");
+        None
+    }
+}
+*/
+
 impl<F : Float, R, C, const N : usize>
 Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
@@ -556,7 +612,7 @@
 #[cfg(test)]
 mod tests {
     use alg_tools::lingrid::linspace;
-    use alg_tools::mapping::Apply;
+    use alg_tools::mapping::Mapping;
     use alg_tools::norms::Linfinity;
     use alg_tools::loc::Loc;
     use crate::kernels::{BallIndicator, CubeIndicator, Convolution};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/linear.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -0,0 +1,94 @@
+//! Implementation of the linear function
+
+use numeric_literals::replace_float_literals;
+use serde::Serialize;
+use alg_tools::types::*;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+    Bounded,
+};
+use alg_tools::mapping::{Mapping, Instance};
+use alg_tools::maputil::array_init;
+use alg_tools::euclidean::Euclidean;
+
+/// Representation of the hat function $f(x)=1-\\|x\\|\_1/ε$ of `width` $ε$ on $ℝ^N$.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct Linear<F : Float, const N : usize> {
+    /// The parameter $ε>0$.
+    pub v : Loc<F, N>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float, const N : usize> Mapping<Loc<F, N>> for Linear<F, N> {
+    type Codomain = F;
+
+    #[inline]
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
+        x.eval(|x| self.v.dot(x))
+    }
+}
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, const N : usize> Support<F, N> for Linear<F, N> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F,N> {
+        array_init(|| [F::NEG_INFINITY, F::INFINITY]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, _x : &Loc<F,N>) -> bool {
+        true
+    }
+    
+    /*fn fully_in_support(&self, _cube : &Cube<F,N>) -> bool {
+        todo!("Not implemented, but not used at the moment")
+    }*/
+
+    #[inline]
+    fn bisection_hint(&self, _cube : &Cube<F,N>) -> [Option<F>; N] {
+        [None; N]
+    }
+}
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, const N : usize>
+GlobalAnalysis<F, Bounds<F>>
+for Linear<F, N> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        Bounds(F::NEG_INFINITY, F::INFINITY)
+    }
+}
+
+impl<'a, F : Float, const N : usize>
+LocalAnalysis<F, Bounds<F>, N>
+for Linear<F, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        let (lower, upper) = cube.iter_corners()
+                                 .map(|x| self.apply(x))
+                                 .fold((F::INFINITY, F::NEG_INFINITY), |(lower, upper), v| {
+                                      (lower.min(v), upper.max(v))
+                                 });
+        Bounds(lower, upper)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, const N : usize>
+Norm<F, Linfinity>
+for Linear<F, N> {
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> F {
+        self.bounds().upper()
+    }
+}
+
--- a/src/kernels/mollifier.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/kernels/mollifier.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -2,7 +2,7 @@
 //! Implementation of the standard mollifier
 
 use rgsl::hypergeometric::hyperg_U;
-use float_extras::f64::{tgamma as gamma};
+use float_extras::f64::tgamma as gamma;
 use numeric_literals::replace_float_literals;
 use serde::Serialize;
 use alg_tools::types::*;
@@ -17,7 +17,7 @@
     LocalAnalysis,
     GlobalAnalysis
 };
-use alg_tools::mapping::Apply;
+use alg_tools::mapping::{Mapping, Instance};
 use alg_tools::maputil::array_init;
 
 /// Reresentation of the (unnormalised) standard mollifier.
@@ -36,13 +36,14 @@
 }
 
 #[replace_float_literals(C::Type::cast_from(literal))]
-impl<'a, C : Constant, const N : usize> Apply<&'a Loc<C::Type, N>> for Mollifier<C, N> {
-    type Output = C::Type;
+impl<C : Constant, const N : usize> Mapping<Loc<C::Type, N>> for Mollifier<C, N> {
+    type Codomain = C::Type;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<C::Type, N>>>(&self, x : I) -> Self::Codomain {
         let ε = self.width.value();
         let ε2 = ε*ε;
-        let n2 = x.norm2_squared();
+        let n2 = x.eval(|x| x.norm2_squared());
         if n2 < ε2 {
             (n2 / (n2 - ε2)).exp()
         } else {
@@ -51,13 +52,6 @@
     }
 }
 
-impl<C : Constant, const N : usize> Apply<Loc<C::Type, N>> for Mollifier<C, N> {
-    type Output = C::Type;
-    #[inline]
-    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
-        self.apply(&x)
-    }
-}
 
 impl<'a, C : Constant, const N : usize> Support<C::Type, N> for Mollifier<C, N> {
     #[inline]
--- a/src/main.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/main.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -31,6 +31,7 @@
 pub mod seminorms;
 pub mod transport;
 pub mod forward_model;
+pub mod preadjoint_helper;
 pub mod plot;
 pub mod subproblem;
 pub mod tolerance;
@@ -39,6 +40,8 @@
 pub mod fb;
 pub mod radon_fb;
 pub mod sliding_fb;
+pub mod sliding_pdps;
+pub mod forward_pdps;
 pub mod frank_wolfe;
 pub mod pdps;
 pub mod run;
@@ -166,6 +169,13 @@
     tau0 : Option<F>,
 
     #[arg(long, requires = "algorithm")]
+    /// Second primal step length parameter override for SlidingPDPS.
+    ///
+    /// Only use if running just a single algorithm, as different algorithms have different
+    /// regularisation parameters.
+    sigmap0 : Option<F>,
+
+    #[arg(long, requires = "algorithm")]
     /// Dual step length parameter override for --algorithm.
     ///
     /// Only use if running just a single algorithm, as different algorithms have different
@@ -173,7 +183,7 @@
     sigma0 : Option<F>,
 
     #[arg(long)]
-    /// Normalised transport step length for sliding_fb.
+    /// Normalised transport step length for sliding methods.
     theta0 : Option<F>,
 
     #[arg(long)]
@@ -184,15 +194,23 @@
     /// Transport toleranced wrt. ∇v
     transport_tolerance_dv : Option<F>,
 
+    #[arg(long)]
+    /// Transport adaptation factor. Must be in (0, 1).
+    transport_adaptation : Option<F>,
+
+    #[arg(long)]
+    /// Minimal step length parameter for sliding methods.
+    tau0_min : Option<F>,
+
     #[arg(value_enum, long)]
     /// PDPS acceleration, when available.
     acceleration : Option<pdps::Acceleration>,
 
-    #[arg(long)]
-    /// Perform postprocess weight optimisation for saved iterations
-    ///
-    /// Only affects FB, FISTA, and PDPS.
-    postprocessing : Option<bool>,
+    // #[arg(long)]
+    // /// Perform postprocess weight optimisation for saved iterations
+    // ///
+    // /// Only affects FB, FISTA, and PDPS.
+    // postprocessing : Option<bool>,
 
     #[arg(value_name = "n", long)]
     /// Merging frequency, if merging enabled (every n iterations)
@@ -246,9 +264,14 @@
     for experiment_shorthand in cli.experiments.iter().unique() {
         let experiment = experiment_shorthand.get_experiment(&cli.experiment_overrides).unwrap();
         let mut algs : Vec<Named<AlgorithmConfig<float>>>
-            = cli.algorithm.iter()
-                            .map(|alg| experiment.algorithm_defaults(*alg, &cli.algoritm_overrides))
-                            .collect();
+            = cli.algorithm
+                 .iter()
+                 .map(|alg| alg.to_named(
+                    experiment.algorithm_defaults(*alg)
+                              .unwrap_or_else(|| alg.default_config())
+                              .cli_override(&cli.algoritm_overrides)
+                 ))
+                 .collect();
         for filename in cli.saved_algorithm.iter() {
             let f = std::fs::File::open(filename).unwrap();
             let alg = serde_json::from_reader(f).unwrap();
--- a/src/measures.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/measures.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -7,3 +7,4 @@
 mod discrete;
 pub use discrete::*;
 pub mod merging;
+
--- a/src/measures/base.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/measures/base.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -16,3 +16,6 @@
     type Domain;
 }
 
+/// Decomposition of measures
+pub struct MeasureDecomp;
+
--- a/src/measures/delta.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/measures/delta.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -7,8 +7,9 @@
 use crate::types::*;
 use std::ops::{Div, Mul, DivAssign, MulAssign, Neg};
 use serde::ser::{Serialize, Serializer, SerializeStruct};
-use alg_tools::norms::{Norm, Dist};
-use alg_tools::linops::{Apply, Linear};
+use alg_tools::norms::Norm;
+use alg_tools::linops::{Mapping, Linear};
+use alg_tools::instance::{Instance, Space};
 
 /// Representation of a delta measure.
 ///
@@ -50,43 +51,50 @@
 }
 
 
-impl<Domain : PartialEq, F : Float> Measure<F> for DeltaMeasure<Domain, F> {
+impl<Domain, F : Float> Measure<F> for DeltaMeasure<Domain, F> {
     type Domain = Domain;
 }
 
-impl<Domain : PartialEq, F : Float> Norm<F, Radon> for DeltaMeasure<Domain, F> {
+impl<Domain, F : Float> Norm<F, Radon> for DeltaMeasure<Domain, F> {
     #[inline]
     fn norm(&self, _ : Radon) -> F {
         self.α.abs()
     }
 }
 
-impl<Domain : PartialEq, F : Float> Dist<F, Radon> for DeltaMeasure<Domain, F> {
+// impl<Domain : PartialEq, F : Float> Dist<F, Radon> for DeltaMeasure<Domain, F> {
+//     #[inline]
+//     fn dist(&self, other : &Self, _ : Radon) -> F {
+//         if self.x == other. x {
+//             (self.α - other.α).abs()
+//         } else {
+//             self.α.abs() + other.α.abs()
+//         }
+//     }
+// }
+
+impl<Domain, G, F : Num> Mapping<G> for DeltaMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{
+    type Codomain = G::Codomain;
+
     #[inline]
-    fn dist(&self, other : &Self, _ : Radon) -> F {
-        if self.x == other. x {
-            (self.α - other.α).abs()
-        } else {
-            self.α.abs() + other.α.abs()
-        }
+    fn apply<I : Instance<G>>(&self, g : I) -> Self::Codomain {
+        g.eval(|g̃| g̃.apply(&self.x) * self.α)
     }
 }
 
-impl<'b, Domain, G, F : Num, V : Mul<F, Output=V>> Apply<G> for DeltaMeasure<Domain, F>
-where G: for<'a> Apply<&'a Domain, Output = V>,
-      V : Mul<F> {
-    type Output = V;
-
-    #[inline]
-    fn apply(&self, g : G) -> Self::Output {
-        g.apply(&self.x) * self.α
-    }
-}
-
-impl<Domain, G, F : Num, V : Mul<F, Output=V>> Linear<G> for DeltaMeasure<Domain, F>
-where G: for<'a> Apply<&'a Domain, Output = V> {
-    type Codomain = V;
-}
+impl<Domain, G, F : Num> Linear<G> for DeltaMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{ }
 
 // /// Partial blanket implementation of [`DeltaMeasure`] as a linear functional of [`Mapping`]s.
 // /// A full blanket implementation is not possible due to annoying Rust limitations: only [`Apply`]
@@ -141,12 +149,13 @@
     }
 }
 
-/*impl<F : Num> From<(F, F)> for DeltaMeasure<Loc<F, 1>, F> {
+impl<'a, Domain : Clone, F : Num> From<&'a DeltaMeasure<Domain, F>> for DeltaMeasure<Domain, F> {
     #[inline]
-    fn from((x, α) : (F, F)) -> Self {
-        DeltaMeasure{x: Loc([x]), α: α}
+    fn from(d : &'a DeltaMeasure<Domain, F>) -> Self {
+        d.clone()
     }
-}*/
+}
+
 
 impl<Domain, F : Num> DeltaMeasure<Domain, F> {
     /// Set the mass of the spike.
@@ -186,6 +195,26 @@
     }
 }
 
+impl<Domain, F : Num> IntoIterator for DeltaMeasure<Domain, F> {
+    type Item =  Self;
+    type IntoIter =  std::iter::Once<Self>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        std::iter::once(self)
+    }
+}
+
+impl<'a, Domain, F : Num> IntoIterator for &'a DeltaMeasure<Domain, F> {
+    type Item =  Self;
+    type IntoIter =  std::iter::Once<Self>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        std::iter::once(self)
+    }
+}
+
 
 macro_rules! make_delta_scalarop_rhs {
     ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
--- a/src/measures/discrete.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/measures/discrete.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -11,9 +11,11 @@
 
 use alg_tools::norms::Norm;
 use alg_tools::tabledump::TableDump;
-use alg_tools::linops::{Apply, Linear};
+use alg_tools::linops::{Mapping, Linear};
 use alg_tools::iter::{MapF,Mappable};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::collection::Collection;
+use alg_tools::instance::{Instance, Decomposition, MyCow, EitherDecomp, Space};
 
 use crate::types::*;
 use super::base::*;
@@ -29,6 +31,8 @@
     pub(super) spikes : Vec<DeltaMeasure<Domain, F>>,
 }
 
+pub type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F, N>, F>;
+
 /// Iterator over the [`DeltaMeasure`] spikes of a [`DiscreteMeasure`].
 pub type SpikeIter<'a, Domain, F> = std::slice::Iter<'a, DeltaMeasure<Domain, F>>;
 
@@ -109,6 +113,13 @@
         self.spikes.iter_mut().zip(iter).for_each(|(δ, α)| δ.set_mass(α));
     }
 
+    /// Update the locations of all the spikes to those produced by an iterator.
+    #[inline]
+    pub fn set_locations<'a, I : Iterator<Item=&'a Domain>>(&mut self, iter : I) 
+    where Domain : 'static + Clone {
+        self.spikes.iter_mut().zip(iter.cloned()).for_each(|(δ, α)| δ.set_location(α));
+    }
+
     // /// Map the masses of all the spikes using a function and an iterator
     // #[inline]
     // pub fn zipmap_masses<
@@ -190,7 +201,7 @@
 
 impl<Domain, F : Num> IntoIterator for DiscreteMeasure<Domain, F> {
     type Item =  DeltaMeasure<Domain, F>;
-    type IntoIter =  <Vec<DeltaMeasure<Domain, F>> as IntoIterator>::IntoIter;
+    type IntoIter = std::vec::IntoIter<DeltaMeasure<Domain, F>>;
 
     #[inline]
     fn into_iter(self) -> Self::IntoIter {
@@ -198,6 +209,60 @@
     }
 }
 
+impl<'a, Domain, F : Num> IntoIterator for &'a DiscreteMeasure<Domain, F> {
+    type Item =  &'a DeltaMeasure<Domain, F>;
+    type IntoIter =  SpikeIter<'a, Domain, F>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        self.spikes.iter()
+    }
+}
+
+impl<Domain, F : Num> Sum<DeltaMeasure<Domain, F>> for DiscreteMeasure<Domain, F>  {
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = DeltaMeasure<Domain, F>>
+    {
+        Self::from_iter(iter)
+    }
+}
+
+impl<'a, Domain : Clone, F : Num> Sum<&'a DeltaMeasure<Domain, F>>
+    for DiscreteMeasure<Domain, F>
+{
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = &'a DeltaMeasure<Domain, F>>
+    {
+        Self::from_iter(iter.cloned())
+    }
+}
+
+impl<Domain, F : Num> Sum<DiscreteMeasure<Domain, F>> for DiscreteMeasure<Domain, F>  {
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = DiscreteMeasure<Domain, F>>
+    {
+        Self::from_iter(iter.map(|μ| μ.into_iter()).flatten())
+    }
+}
+
+impl<'a, Domain : Clone, F : Num> Sum<&'a DiscreteMeasure<Domain, F>>
+    for DiscreteMeasure<Domain, F>
+{
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = &'a DiscreteMeasure<Domain, F>>
+    {
+        Self::from_iter(iter.map(|μ| μ.iter_spikes()).flatten().cloned())
+    }
+}
+
 impl<Domain : Clone, F : Float> DiscreteMeasure<Domain, F> {
     /// Computes `μ1 ← θ * μ1 - ζ * μ2`, pruning entries where both `μ1` (`self`) and `μ2` have
     // zero weight. `μ2` will contain copy of pruned original `μ1` without arithmetic performed.
@@ -312,6 +377,45 @@
     }
 }
 
+impl<Domain, F : Num> From<Vec<DeltaMeasure<Domain, F>>>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from(spikes : Vec<DeltaMeasure<Domain, F>>) -> Self {
+        DiscreteMeasure{ spikes }
+    }
+}
+
+impl<'a, Domain, F : Num, D> From<&'a [D]>
+for DiscreteMeasure<Domain, F>
+where &'a D : Into<DeltaMeasure<Domain, F>> {
+    #[inline]
+    fn from(list : &'a [D]) -> Self {
+        list.into_iter().map(|d| d.into()).collect()
+    }
+}
+
+
+impl<Domain, F : Num> From<DeltaMeasure<Domain, F>>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from(δ : DeltaMeasure<Domain, F>) -> Self {
+        DiscreteMeasure{
+            spikes : vec!(δ)
+        }
+    }
+}
+
+impl<'a, Domain : Clone, F : Num> From<&'a DeltaMeasure<Domain, F>>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from(δ : &'a DeltaMeasure<Domain, F>) -> Self {
+        DiscreteMeasure{
+            spikes : vec!(δ.clone())
+        }
+    }
+}
+
+
 impl<Domain, F : Num, D : Into<DeltaMeasure<Domain, F>>> FromIterator<D>
 for DiscreteMeasure<Domain, F> {
     #[inline]
@@ -371,19 +475,28 @@
     }
 }
 
-impl<Domain, G, F : Num, Y : Sum + Mul<F, Output=Y>> Apply<G> for DiscreteMeasure<Domain, F>
-where G: for<'a> Apply<&'a Domain, Output = Y> {
-    type Output = Y;
+impl<Domain, G, F : Num> Mapping<G> for DiscreteMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Sum + Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain, Codomain=F> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{
+    type Codomain = G::Codomain;
+
     #[inline]
-    fn apply(&self, g : G) -> Y {
-        self.spikes.iter().map(|m| g.apply(&m.x) * m.α).sum()
+    fn apply<I : Instance<G>>(&self, g : I) -> Self::Codomain {
+        g.eval(|g| self.spikes.iter().map(|m| g.apply(&m.x) * m.α).sum())
     }
 }
 
-impl<Domain, G, F : Num, Y : Sum + Mul<F, Output=Y>> Linear<G> for DiscreteMeasure<Domain, F>
-where G : for<'a> Apply<&'a Domain, Output = Y> {
-    type Codomain = Y;
-}
+impl<Domain, G, F : Num> Linear<G> for DiscreteMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Sum + Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain, Codomain=F> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{ }
 
 
 /// Helper trait for constructing arithmetic operations for combinations
@@ -391,6 +504,7 @@
 trait Lift<F : Num, Domain> {
     type Producer : Iterator<Item=DeltaMeasure<Domain, F>>;
 
+    #[allow(dead_code)]
     /// Lifts `self` into a [`DiscreteMeasure`].
     fn lift(self) -> DiscreteMeasure<Domain, F>;
 
@@ -687,3 +801,217 @@
 
 make_discrete_scalarop_lhs!(Mul, mul; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
 make_discrete_scalarop_lhs!(Div, div; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
+
+impl<F : Num, Domain> Collection for DiscreteMeasure<Domain, F> {
+    type Element = DeltaMeasure<Domain, F>;
+    type RefsIter<'a> = std::slice::Iter<'a, Self::Element> where Self : 'a;
+
+    #[inline]
+    fn iter_refs(&self) -> Self::RefsIter<'_> {
+        self.iter_spikes()
+    }
+}
+
+impl<Domain : Clone, F : Num> Space for DiscreteMeasure<Domain, F> {
+    type Decomp = MeasureDecomp;
+}
+
+pub type SpikeSlice<'b, Domain, F> = &'b [DeltaMeasure<Domain, F>];
+
+pub type EitherSlice<'b, Domain, F> = EitherDecomp<
+    Vec<DeltaMeasure<Domain, F>>,
+    SpikeSlice<'b, Domain, F>
+>;
+
+impl<F : Num, Domain : Clone> Decomposition<DiscreteMeasure<Domain, F>> for MeasureDecomp {
+    type Decomposition<'b> = EitherSlice<'b, Domain, F> where DiscreteMeasure<Domain, F> : 'b;
+    type Reference<'b> = SpikeSlice<'b, Domain, F> where DiscreteMeasure<Domain, F> : 'b;
+
+    /// Left the lightweight reference type into a full decomposition type.
+    fn lift<'b>(r : Self::Reference<'b>) -> Self::Decomposition<'b> {
+        EitherDecomp::Borrowed(r)
+    }
+}
+
+impl<F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for DiscreteMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Owned(self.spikes)
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        self.spikes.as_slice()
+    }
+
+    fn cow<'b>(self) -> MyCow<'b, DiscreteMeasure<Domain, F>> where Self : 'b {
+        MyCow::Owned(self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a DiscreteMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(self.spikes.as_slice())
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        self.spikes.as_slice()
+    }
+
+    fn cow<'b>(self) -> MyCow<'b, DiscreteMeasure<Domain, F>> where Self : 'b {
+        MyCow::Borrowed(self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.clone()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for EitherSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        self
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        match self {
+            EitherDecomp::Owned(v) => v.as_slice(),
+            EitherDecomp::Borrowed(s) => s,
+        }
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        match self {
+            EitherDecomp::Owned(v) => v.into(),
+            EitherDecomp::Borrowed(s) => s.into(),
+        }
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a EitherSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        match self {
+            EitherDecomp::Owned(v) => EitherDecomp::Borrowed(v.as_slice()),
+            EitherDecomp::Borrowed(s) => EitherDecomp::Borrowed(s),
+        }
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        match self {
+            EitherDecomp::Owned(v) => v.as_slice(),
+            EitherDecomp::Borrowed(s) => s,
+        }
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        match self {
+            EitherDecomp::Owned(v) => v.as_slice(),
+            EitherDecomp::Borrowed(s) => s
+        }.into()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for SpikeSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(self)
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        self
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.into()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a SpikeSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(*self)
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        *self
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        (*self).into()
+    }
+}
+
+impl<F : Num, Domain : Clone > Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for DeltaMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Owned(vec![self])
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        std::slice::from_ref(self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.into()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a DeltaMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(std::slice::from_ref(self))
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        std::slice::from_ref(*self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.into()
+    }
+}
--- a/src/pdps.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/pdps.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -6,8 +6,7 @@
  * Valkonen T. - _Proximal methods for point source localisation_,
    [arXiv:2212.02991](https://arxiv.org/abs/2212.02991).
 
-The main routine is [`pointsource_pdps`]. It is based on specilisatinn of
-[`generic_pointsource_fb_reg`] through relevant [`FBSpecialisation`] implementations.
+The main routine is [`pointsource_pdps_reg`].
 Both norm-2-squared and norm-1 data terms are supported. That is, implemented are solvers for
 <div>
 $$
@@ -37,10 +36,6 @@
 For $F_0(y)=\frac{1}{2}\|y\|_2^2$ the second part reads $y = Aμ -b$.
 For $F_0(y)=\|y\|_1$ the second part reads $y ∈ ∂\|·\|_1(Aμ - b)$.
 </p>
-
-Based on zero initialisation for $μ$, we use the [`Subdifferentiable`] trait to make an
-initialisation corresponding to the second part of the optimality conditions.
-In the algorithm itself, standard proximal steps are taking with respect to $F\_0^* + ⟨b, ·⟩$.
 */
 
 use numeric_literals::replace_float_literals;
@@ -48,13 +43,10 @@
 use nalgebra::DVector;
 use clap::ValueEnum;
 
-use alg_tools::iterate::{
-    AlgIteratorFactory,
-    AlgIteratorState,
-};
+use alg_tools::iterate::AlgIteratorFactory;
 use alg_tools::loc::Loc;
 use alg_tools::euclidean::Euclidean;
-use alg_tools::linops::Apply;
+use alg_tools::linops::Mapping;
 use alg_tools::norms::{
     Linfinity,
     Projection,
@@ -69,14 +61,17 @@
     SupportGenerator,
     LocalAnalysis,
 };
-use alg_tools::mapping::RealMapping;
+use alg_tools::mapping::{RealMapping, Instance};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::linops::AXPY;
 
 use crate::types::*;
-use crate::measures::DiscreteMeasure;
+use crate::measures::{DiscreteMeasure, RNDM, Radon};
 use crate::measures::merging::SpikeMerging;
-use crate::forward_model::ForwardModel;
+use crate::forward_model::{
+    AdjointProductBoundedBy,
+    ForwardModel
+};
 use crate::seminorms::DiscreteMeasureOp;
 use crate::plot::{
     SeqPlotter,
@@ -87,7 +82,7 @@
     FBGenericConfig,
     insert_and_reweigh,
     postprocess,
-    prune_and_maybe_simple_merge
+    prune_with_stats
 };
 use crate::regularisation::RegTerm;
 use crate::dataterm::{
@@ -110,7 +105,30 @@
     Full
 }
 
-/// Settings for [`pointsource_pdps`].
+#[replace_float_literals(F::cast_from(literal))]
+impl Acceleration {
+    /// PDPS parameter acceleration. Updates τ and σ and returns ω.
+    /// This uses dual strong convexity, not primal.
+    fn accelerate<F : Float>(self, τ : &mut F, σ : &mut F, γ : F) -> F {
+        match self {
+            Acceleration::None => 1.0,
+            Acceleration::Partial => {
+                let ω = 1.0 / (1.0 + γ * (*σ)).sqrt();
+                *σ *= ω;
+                *τ /= ω;
+                ω
+            },
+            Acceleration::Full => {
+                let ω = 1.0 / (1.0 + 2.0 * γ * (*σ)).sqrt();
+                *σ *= ω;
+                *τ /= ω;
+                ω
+            },
+        }
+    }
+}
+
+/// Settings for [`pointsource_pdps_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
 pub struct PDPSConfig<F : Float> {
@@ -155,9 +173,13 @@
 
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float, V :  Euclidean<F> + AXPY<F>, const N : usize>
-PDPSDataTerm<F, V, N>
-for L2Squared {
+impl<F, V, const N : usize> PDPSDataTerm<F, V, N>
+for L2Squared
+where
+    F : Float,
+    V :  Euclidean<F> + AXPY<F>,
+    for<'b> &'b V : Instance<V>,
+{
     fn some_subdifferential(&self, x : V) -> V { x }
 
     fn factor_of_strong_convexity(&self) -> F {
@@ -166,7 +188,7 @@
 
     #[inline]
     fn dual_update(&self, y : &mut V, y_prev : &V, σ : F) {
-        y.axpy(1.0 / (1.0 + σ), &y_prev, σ / (1.0 + σ));
+        y.axpy(1.0 / (1.0 + σ), y_prev, σ / (1.0 + σ));
     }
 }
 
@@ -210,16 +232,13 @@
     iterator : I,
     mut plotter : SeqPlotter<F, N>,
     dataterm : D,
-) -> DiscreteMeasure<Loc<F, N>, F>
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
-      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>
-                                  + std::ops::Add<A::Observable, Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>, // <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
+      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<&'a 𝒟, FloatType=F>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
+          + AdjointProductBoundedBy<RNDM<F, N>, 𝒟, FloatType=F>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
       𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
@@ -228,14 +247,20 @@
       K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       D : PDPSDataTerm<F, A::Observable, N>,
       Reg : RegTerm<F, N> {
 
+    // Check parameters
+    assert!(pdpsconfig.τ0 > 0.0 &&
+            pdpsconfig.σ0 > 0.0 &&
+            pdpsconfig.τ0 * pdpsconfig.σ0 <= 1.0,
+            "Invalid step length parameters");
+
     // Set up parameters
     let config = &pdpsconfig.generic;
-    let op𝒟norm = op𝒟.opnorm_bound();
-    let l = opA.lipschitz_factor(&op𝒟).unwrap().sqrt();
+    let op𝒟norm = op𝒟.opnorm_bound(Radon, Linfinity);
+    let l = opA.adjoint_product_bound(&op𝒟).unwrap().sqrt();
     let mut τ = pdpsconfig.τ0 / l;
     let mut σ = pdpsconfig.σ0 / l;
     let γ = dataterm.factor_of_strong_convexity();
@@ -249,53 +274,42 @@
     let mut μ = DiscreteMeasure::new();
     let mut y = dataterm.some_subdifferential(-b);
     let mut y_prev = y.clone();
+    let full_stats = |μ : &RNDM<F, N>, ε, stats| IterInfo {
+        value : dataterm.calculate_fit_op(μ, opA, b) + reg.apply(μ),
+        n_spikes : μ.len(),
+        ε,
+        // postprocessing: config.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
     let mut stats = IterInfo::new();
 
     // Run the algorithm
-    iterator.iterate(|state| {
+    for state in iterator.iter_init(|| full_stats(&μ, ε, stats.clone())) {
         // Calculate smooth part of surrogate model.
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        y *= -τ;
-        let r = std::mem::replace(&mut y, opA.empty_observable());
-        let minus_τv = opA.preadjoint().apply(r);
+        let τv = opA.preadjoint().apply(y * τ);
 
         // Save current base point
         let μ_base = μ.clone();
         
         // Insert and reweigh
-        let (d, within_tolerances) = insert_and_reweigh(
-            &mut μ, &minus_τv, &μ_base, None,
+        let (d, _within_tolerances) = insert_and_reweigh(
+            &mut μ, &τv, &μ_base, None,
             op𝒟, op𝒟norm,
             τ, ε,
-            config, &reg, state, &mut stats
+            config, &reg, &state, &mut stats
         );
 
         // Prune and possibly merge spikes
-        prune_and_maybe_simple_merge(
-            &mut μ, &minus_τv, &μ_base,
-            op𝒟,
-            τ, ε,
-            config, &reg, state, &mut stats
-        );
+        if config.merge_now(&state) {
+            stats.merged += μ.merge_spikes(config.merging, |μ_candidate| {
+                let mut d = &τv + op𝒟.preapply(μ_candidate.sub_matching(&μ_base));
+                reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, &config)
+            });
+        }
+        stats.pruned += prune_with_stats(&mut μ);
 
         // Update step length parameters
-        let ω = match pdpsconfig.acceleration {
-            Acceleration::None => 1.0,
-            Acceleration::Partial => {
-                let ω = 1.0 / (1.0 + γ * σ).sqrt();
-                σ = σ * ω;
-                τ = τ / ω;
-                ω
-            },
-            Acceleration::Full => {
-                let ω = 1.0 / (1.0 + 2.0 * γ * σ).sqrt();
-                σ = σ * ω;
-                τ = τ / ω;
-                ω
-            },
-        };
+        let ω = pdpsconfig.acceleration.accelerate(&mut τ, &mut σ, γ);
 
         // Do dual update
         y = b.clone();                          // y = b
@@ -304,32 +318,17 @@
         dataterm.dual_update(&mut y, &y_prev, σ);
         y_prev.copy_from(&y);
 
-        // Update main tolerance for next iteration
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
+        // Give statistics if requested
+        let iter = state.iteration();
         stats.this_iters += 1;
 
-        // Give function value if needed
         state.if_verbose(|| {
-            // Plot if so requested
-            plotter.plot_spikes(
-                format!("iter {} end; {}", state.iteration(), within_tolerances), &d,
-                "start".to_string(), Some(&minus_τv),
-                reg.target_bounds(τ, ε_prev), &μ,
-            );
-            // Calculate mean inner iterations and reset relevant counters.
-            // Return the statistics
-            let res = IterInfo {
-                value : dataterm.calculate_fit_op(&μ, opA, b) + reg.apply(&μ),
-                n_spikes : μ.len(),
-                ε : ε_prev,
-                postprocessing: config.postprocessing.then(|| μ.clone()),
-                .. stats
-            };
-            stats = IterInfo::new();
-            res
-        })
-    });
+            plotter.plot_spikes(iter, Some(&d), Some(&τv), &μ);
+            full_stats(&μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        ε = tolerance.update(ε, iter);
+    }
 
     postprocess(μ, config, dataterm, opA, b)
 }
--- a/src/plot.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/plot.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -1,33 +1,14 @@
 //! Plotting helper utilities
 
 use numeric_literals::replace_float_literals;
-use std::io::Write;
-use image::{
-    ImageFormat,
-    ImageBuffer,
-    Rgb
-};
-use itertools::izip;
-use colorbrewer::Palette as CbPalette;
-
+use serde::Serialize;
 use alg_tools::types::*;
 use alg_tools::lingrid::LinGrid;
-use alg_tools::mapping::{
-    RealMapping,
-    DifferentiableRealMapping,
-    SliceCodomain
-};
+use alg_tools::mapping::RealMapping;
 use alg_tools::loc::Loc;
-use alg_tools::bisection_tree::Bounds;
-use alg_tools::maputil::map4;
 use alg_tools::tabledump::write_csv;
 use crate::measures::*;
 
-/// Default RGB ramp from [`colorbrewer`].
-///
-/// This is a tuple of parameters to [`colorbrewer::get_color_ramp`].
-const RAMP : (CbPalette, u32) = (CbPalette::RdBu, 11);
-
 /// Helper trait for implementing dimension-dependent plotting routines.
 pub trait Plotting<const N : usize> {
     /// Plot several mappings and a discrete measure into a file.
@@ -36,13 +17,10 @@
         T1 : RealMapping<F, N>,
         T2 : RealMapping<F, N>
     > (
-        g_explanation : String,
-        g : &T1,
-        ω_explanation : String,
+        g : Option<&T1>,
         ω : Option<&T2>,
         grid : LinGrid<F, N>,
-        bnd : Option<Bounds<F>>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         filename : String,
     );
 
@@ -54,98 +32,57 @@
         g : &T1,
         grid : LinGrid<F, N>,
         filename : String,
-        explanation : String
     );
-
-    /// Plot a differentiable mapping into several file, sampling values on a given grid.
-    fn plot_into_file_diff<
-        F : Float,
-        T1 : DifferentiableRealMapping<F, N>,
-    > (
-        g : &T1,
-        grid : LinGrid<F, N>,
-        base_filename : String,
-        explanation : String
-    ) {
-        Self::plot_into_file(g, grid, base_filename.clone(), explanation.clone());
-        let d = g.diff_ref();
-        for i in 0..N {
-            Self::plot_into_file(&d.slice_codomain_ref(i),
-                                 grid,
-                                 format!("{base_filename}_diff{i}"),
-                                 format!("{explanation} differential, dimension {i}"));
-        }
-    }
 }
 
 /// Helper type for looking up a [`Plotting`] based on dimension.
 pub struct PlotLookup;
 
+#[derive(Serialize)]
+struct CSVHelper1<F : Float> {
+    x : F,
+    f : F,
+}
+
+#[derive(Serialize)]
+struct CSVHelper1_2<F : Float>{
+    x : F,
+    g : Option<F>,
+    omega : Option<F>
+}
+
+#[derive(Serialize)]
+struct CSVSpike1<F : Float> {
+    x : F,
+    alpha : F,
+}
+
 impl Plotting<1> for PlotLookup {
     fn plot_into_file_spikes<
         F : Float,
         T1 : RealMapping<F, 1>,
         T2 : RealMapping<F, 1>
     > (
-        g_explanation : String,
-        g : &T1,
-        ω_explanation : String,
+        g0 : Option<&T1>,
         ω0 : Option<&T2>,
         grid : LinGrid<F, 1>,
-        bnd0 : Option<Bounds<F>>,
         μ : &DiscreteMeasure<Loc<F, 1>, F>,
         filename : String,
     ) {
-        let start = grid.start[0].as_();
-        let end = grid.end[0].as_();
-        let m = μ.iter_masses().fold(F::ZERO, |m, α| m.max(α));
-        let s = μ.iter_masses().fold(F::ZERO, |m, α| m.add(α));
-        let mut spike_scale = F::ONE;
-
-        let mut plotter = poloto::plot(
-            "f", "x",
-            format!("f(x); spike max={:.4}, n={}, ∑={:.4}", m, μ.len(), s)
-        ).move_into();
-
-        if let Some(ω) = ω0 {
-            let graph_ω = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
-                [x0.as_(), ω.apply(&x).as_()]
-            });
-            plotter.line(ω_explanation.as_str(), graph_ω.clone());
-            // let csv_f = format!("{}.txt", filename);
-            // write_csv(graph_ω, csv_f).expect("CSV save error");
-        }
-
-        let graph_g = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
-            [x0.as_(), g.apply(&x).as_()]
+        let data = grid.into_iter().map(|p@Loc([x]) : Loc<F, 1>| CSVHelper1_2 {
+            x,
+            g : g0.map(|g| g.apply(&p)),
+            omega : ω0.map(|ω| ω.apply(&p))
         });
-        plotter.line(g_explanation.as_str(), graph_g.clone());
-        // let csv_f = format!("{}.txt", filename);
-        // write_csv(graph_g, csv_f).expect("CSV save error");
-
-        bnd0.map(|bnd| {
-            let upperb = bnd.upper().as_();
-            let lowerb =  bnd.lower().as_();
-            let upper : [[f64; 2]; 2] = [[start, upperb], [end, upperb]];
-            let lower = [[start, lowerb], [end, lowerb]];
-            spike_scale *= bnd.upper();
+        let csv_f = format!("{}_functions.csv", filename);
+        write_csv(data, csv_f).expect("CSV save error");
 
-            plotter.line("upper bound", upper)
-                   .line("lower bound", lower)
-                   .ymarker(lowerb)
-                   .ymarker(upperb);
+        let spikes = μ.iter_spikes().map(|δ| {
+            let Loc([x]) = δ.x;
+            CSVSpike1 { x, alpha : δ.α }
         });
-
-        for &DeltaMeasure{ α, x : Loc([x]) } in μ.iter_spikes() {
-            let spike = [[x.as_(), 0.0], [x.as_(), (α/m * spike_scale).as_()]];
-            plotter.line("", spike);
-        }
-
-        let svg = format!("{}", poloto::disp(|a| poloto::simple_theme(a, plotter)));
-
-        std::fs::File::create(filename + ".svg").and_then(|mut file|
-            file.write_all(svg.as_bytes())
-        ).expect("SVG save error");
+        let csv_f = format!("{}_spikes.csv", filename);
+        write_csv(spikes, csv_f).expect("CSV save error");
     }
 
     fn plot_into_file<
@@ -155,150 +92,37 @@
         g : &T1,
         grid : LinGrid<F, 1>,
         filename : String,
-        explanation : String
     ) {
-        let graph_g = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
-            [x0.as_(), g.apply(&x).as_()]
+        let data = grid.into_iter().map(|p@Loc([x]) : Loc<F, 1>| CSVHelper1 {
+            x,
+            f : g.apply(&p),
         });
-
-        let plotter: poloto::Plotter<'_, float, float> = poloto::plot("f", "x", "f(x)")
-            .line(explanation.as_str(), graph_g.clone())
-            .move_into();
-
-        let svg = format!("{}", poloto::disp(|a| poloto::simple_theme(a, plotter)));
-
-        let svg_f = format!("{}.svg", filename);
-        std::fs::File::create(svg_f).and_then(|mut file|
-            file.write_all(svg.as_bytes())
-        ).expect("SVG save error");
-
         let csv_f = format!("{}.txt", filename);
-        write_csv(graph_g, csv_f).expect("CSV save error");
+        write_csv(data, csv_f).expect("CSV save error");
     }
 
 }
 
-/// Convert $[0, 1] ∈ F$ to $\\\{0, …, M\\\} ∈ F$ where $M=$`F::RANGE_MAX`.
-#[inline]
-fn scale_uint<F, U>(v : F) -> U
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-    (v*F::cast_from(U::RANGE_MAX)).as_()
-}
-
-/// Convert $[a, b] ∈ F$ to $\\\{0, …, M\\\} ∈ F$ where $M=$`F::RANGE_MAX`.
-#[replace_float_literals(F::cast_from(literal))]
-#[inline]
-fn scale_range_uint<F, U>(v : F, &Bounds(a, b) : &Bounds<F>) -> U
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-    debug_assert!(a < b);
-    scale_uint(((v - a)/(b - a)).max(0.0).min(1.0))
-}
-
-
-/// Sample a mapping on a grid.
-///
-/// Returns a vector of values as well as upper and lower bounds of the values.
-fn rawdata_and_range<F, T>(grid : &LinGrid<F, 2>, g :&T) -> (Vec<F>, Bounds<F>)
-where F : Float,
-      T : RealMapping<F, 2> {
-    let rawdata : Vec<F> = grid.into_iter().map(|x| g.apply(&x)).collect();
-    let range = rawdata.iter()
-                        .map(|&v| Bounds(v, v))
-                        .reduce(|b1, b2| b1.common(&b2))
-                        .unwrap();
-    (rawdata, range)
+#[derive(Serialize)]
+struct CSVHelper2<F : Float> {
+    x : F,
+    y : F,
+    f : F,
 }
 
-/*fn to_range<'a, F, U>(rawdata : &'a Vec<F>,  range : &'a Bounds<F>)
--> std::iter::Map<std::slice::Iter<'a, F>, impl FnMut(&'a F) -> U>
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-    rawdata.iter().map(move |&v| scale_range_uint(v, range))
-}*/
-
-/// Convert a scalar value to an RGB triplet.
-///
-/// Converts the value `v` supposed to be within the range `[a, b]` to an rgb value according
-/// to the given `ramp` of equally-spaced rgb interpolation points.
-#[replace_float_literals(F::cast_from(literal))]
-fn one_to_ramp<F, U>(
-    &Bounds(a, b) : &Bounds<F>,
-    ramp : &Vec<Loc<F, 3>>,
-    v : F,
-) -> Rgb<U>
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-
-    let n = ramp.len() - 1;
-    let m = F::cast_from(U::RANGE_MAX);
-    let ramprange = move |v : F| {let m : usize = v.as_(); m.min(n).max(0) };
-
-    let w = F::cast_from(n) * (v - a) / (b - a);  // convert [0, 1] to [0, n]
-    let (l, u) = (w.floor(), w.ceil());           // Find closest integers
-    let (rl, ru) = (ramprange(l), ramprange(u));
-    let (cl, cu) = (ramp[rl], ramp[ru]);          // Get corresponding colours
-    let λ = match rl==ru {                        // Interpolation factor
-        true => 0.0,
-        false => (u - w) / (u - l),
-    };
-    let Loc(rgb) = cl * λ + cu * (1.0 - λ);       // Interpolate
-
-    Rgb(rgb.map(|v| (v * m).round().min(m).max(0.0).as_()))
+#[derive(Serialize)]
+struct CSVHelper2_2<F : Float>{
+    x : F,
+    y : F,
+    g : Option<F>,
+    omega : Option<F>
 }
 
-/// Convert a an iterator over scalar values to an iterator over RGB triplets.
-///
-/// The conversion is that performed by [`one_to_ramp`].
-#[replace_float_literals(F::cast_from(literal))]
-fn to_ramp<'a, F, U, I>(
-    bounds : &'a Bounds<F>,
-    ramp : &'a Vec<Loc<F, 3>>,
-    iter : I,
-) -> std::iter::Map<I, impl FnMut(F) -> Rgb<U> + 'a>
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned,
-      I : Iterator<Item = F> + 'a {
-    iter.map(move |v| one_to_ramp(bounds, ramp, v))
-}
-
-/// Convert a [`colorbrewer`] sepcification to a ramp of rgb triplets.
-fn get_ramp<F : Float>((palette, nb) : (CbPalette, u32)) -> Vec<Loc<F, 3>> {
-    let m = F::cast_from(u8::MAX);
-    colorbrewer::get_color_ramp(palette, nb)
-                 .expect("Invalid colorbrewer ramp")
-                 .into_iter()
-                 .map(|rgb::RGB{r, g, b}| {
-                    [r, g, b].map(|c| F::cast_from(c) / m).into()
-                 }).collect()
-}
-
-/// Perform hue shifting of an RGB value.
-///
-// The hue `ω` is in radians.
-#[replace_float_literals(F::cast_from(literal))]
-fn hueshift<F, U>(ω : F, Rgb([r_in, g_in, b_in]) : Rgb<U>) -> Rgb<U>
-where F : Float + CastFrom<U>,
-      U : Unsigned {
-    let m = F::cast_from(U::RANGE_MAX);
-    let r = F::cast_from(r_in) / m;
-    let g = F::cast_from(g_in) / m;
-    let b = F::cast_from(b_in) / m;
-    let u = ω.cos();
-    let w = ω.sin();
-
-    let nr = (0.299 + 0.701*u + 0.168*w) * r
-              + (0.587 - 0.587*u + 0.330*w) * g
-              + (0.114 - 0.114*u - 0.497*w) * b;
-    let ng = (0.299 - 0.299*u - 0.328*w) * r
-              + (0.587 + 0.413*u + 0.035*w) * g
-              + (0.114 - 0.114*u + 0.292*w) *b;
-    let nb = (0.299 - 0.3*u + 1.25*w) * r
-              + (0.587 - 0.588*u - 1.05*w) * g
-              + (0.114 + 0.886*u - 0.203*w) * b;
-
-    Rgb([nr, ng, nb].map(scale_uint))
+#[derive(Serialize)]
+struct CSVSpike2<F : Float> {
+    x : F,
+    y : F,
+    alpha : F,
 }
 
 
@@ -309,55 +133,27 @@
         T1 : RealMapping<F, 2>,
         T2 : RealMapping<F, 2>
     > (
-        _g_explanation : String,
-        g : &T1,
-        _ω_explanation : String,
+        g0 : Option<&T1>,
         ω0 : Option<&T2>,
         grid : LinGrid<F, 2>,
-        _bnd0 : Option<Bounds<F>>,
         μ : &DiscreteMeasure<Loc<F, 2>, F>,
         filename : String,
     ) {
-        let [w, h] = grid.count;
-        let (rawdata_g, range_g) = rawdata_and_range(&grid, g);
-        let (rawdata_ω, range) = match ω0 {
-            Some(ω) => {
-                let (rawdata_ω, range_ω) = rawdata_and_range(&grid, ω);
-                (rawdata_ω, range_g.common(&range_ω))
-            },
-            None => {
-                let mut zeros = Vec::new();
-                zeros.resize(rawdata_g.len(), 0.0);
-                (zeros, range_g)
-            }
-        };
-        let ramp = get_ramp(RAMP);
-        let base_im_iter = to_ramp::<F, u16, _>(&range_g, &ramp, rawdata_g.iter().cloned());
-        let im_iter = izip!(base_im_iter, rawdata_g.iter(), rawdata_ω.iter())
-            .map(|(rgb, &v, &w)| {
-                hueshift(2.0 * F::PI * (v - w).abs() / range.upper(), rgb)
-            });
-        let mut img = ImageBuffer::new(w as u32, h as u32);
-        img.pixels_mut()
-           .zip(im_iter)
-           .for_each(|(p, v)| *p = v);
+        let data = grid.into_iter().map(|p@Loc([x, y]) : Loc<F, 2>| CSVHelper2_2 {
+            x,
+            y,
+            g : g0.map(|g| g.apply(&p)),
+            omega : ω0.map(|ω| ω.apply(&p))
+        });
+        let csv_f = format!("{}_functions.csv", filename);
+        write_csv(data, csv_f).expect("CSV save error");
 
-        // Add spikes
-        let m = μ.iter_masses().fold(F::ZERO, |m, α| m.max(α));
-        let μ_range = Bounds(F::ZERO, m);
-        for &DeltaMeasure{ ref x, α } in μ.iter_spikes() {
-            let [a, b] = map4(x, &grid.start, &grid.end, &grid.count, |&ξ, &a, &b, &n| {
-                ((ξ-a)/(b-a)*F::cast_from(n)).as_()
-            });
-            if a < w.as_() && b < h.as_() {
-                let sc : u16 = scale_range_uint(α, &μ_range);
-                // TODO: use max of points that map to this pixel.
-                img[(a, b)] = Rgb([u16::MAX, u16::MAX, sc/2]);
-            }
-        }
-
-        img.save_with_format(filename + ".png", ImageFormat::Png)
-           .expect("Image save error");
+        let spikes = μ.iter_spikes().map(|δ| {
+            let Loc([x, y]) = δ.x;
+            CSVSpike2 { x, y, alpha : δ.α }
+        });
+        let csv_f = format!("{}_spikes.csv", filename);
+        write_csv(spikes, csv_f).expect("CSV save error");
     }
 
     fn plot_into_file<
@@ -367,22 +163,14 @@
         g : &T1,
         grid : LinGrid<F, 2>,
         filename : String,
-        _explanation : String
     ) {
-        let [w, h] = grid.count;
-        let (rawdata, range) = rawdata_and_range(&grid, g);
-        let ramp = get_ramp(RAMP);
-        let im_iter = to_ramp::<F, u16, _>(&range, &ramp, rawdata.iter().cloned());
-        let mut img = ImageBuffer::new(w as u32, h as u32);
-        img.pixels_mut()
-           .zip(im_iter)
-           .for_each(|(p, v)| *p = v);
-        img.save_with_format(filename.clone() + ".png", ImageFormat::Png)
-           .expect("Image save error");
-        
-        let csv_iter = grid.into_iter().zip(rawdata.iter()).map(|(Loc(x), &v)| (x, v));
-        let csv_f = filename + ".txt";
-        write_csv(csv_iter, csv_f).expect("CSV save error");
+        let data = grid.into_iter().map(|p@Loc([x, y]) : Loc<F, 2>| CSVHelper2 {
+            x,
+            y,
+            f : g.apply(&p),
+        });
+        let csv_f = format!("{}.txt", filename);
+        write_csv(data, csv_f).expect("CSV save error");
     }
 
 }
@@ -410,12 +198,10 @@
     /// This calls [`PlotLookup::plot_into_file_spikes`] with a sequentially numbered file name.
     pub fn plot_spikes<T1, T2>(
         &mut self,
-        g_explanation : String,
-        g : &T1,
-        ω_explanation : String,
+        iter : usize,
+        g : Option<&T1>,
         ω : Option<&T2>,
-        tol : Option<Bounds<F>>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
     ) where T1 : RealMapping<F, N>,
             T2 : RealMapping<F, N>
     {
@@ -424,12 +210,11 @@
         }
         if self.plot_count < self.max_plots {
             PlotLookup::plot_into_file_spikes(
-                g_explanation, g,
-                ω_explanation, ω,
+                g,
+                ω,
                 self.grid,
-                tol,
                 μ,
-                format!("{}out{:03}", self.prefix, self.plot_count)
+                format!("{}out{:03}", self.prefix, iter)
             );
             self.plot_count += 1;
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/preadjoint_helper.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -0,0 +1,55 @@
+/*!
+Preadjoint construction helper
+*/
+
+use std::marker::PhantomData;
+use alg_tools::types::*;
+pub use alg_tools::linops::*;
+use alg_tools::norms::{Norm, HasDualExponent};
+
+/// Helper structure for constructing preadjoints of `S` where `S : Linear<X>`.
+/// [`Linear`] needs to be implemented for each instance, but [`Adjointable`]
+/// and [`BoundedLinear`] have blanket implementations.
+#[derive(Clone,Debug)]
+pub struct PreadjointHelper<'a, S : 'a, X> {
+    pub forward_op : &'a S,
+    _domain : PhantomData<X>
+}
+
+impl<'a, S : 'a, X> PreadjointHelper<'a, S, X> {
+    pub fn new(forward_op : &'a S) -> Self {
+        PreadjointHelper { forward_op, _domain: PhantomData }
+    }
+}
+
+impl<'a, X, Ypre, S> Adjointable<Ypre, X>
+for PreadjointHelper<'a, S, X>
+where
+    X : Space,
+    Ypre : Space,
+    Self : Linear<Ypre>,
+    S : Clone + Linear<X>
+{
+    type AdjointCodomain = S::Codomain;
+    type Adjoint<'b> = S where Self : 'b;
+
+    fn adjoint(&self) -> Self::Adjoint<'_> {
+        self.forward_op.clone()
+    }
+}
+
+impl<'a, F, X, Ypre, ExpXpre, ExpYpre, S> BoundedLinear<Ypre, ExpYpre, ExpXpre, F>
+for PreadjointHelper<'a, S, X>
+where
+    ExpXpre : HasDualExponent,
+    ExpYpre : HasDualExponent,
+    F : Float,
+    X : Space + Norm<F, ExpXpre::DualExp>,
+    Ypre : Space + Norm<F, ExpYpre>,
+    Self : Linear<Ypre>,
+    S : 'a + Clone + BoundedLinear<X, ExpXpre::DualExp, ExpYpre::DualExp, F>
+{
+    fn opnorm_bound(&self, expy : ExpYpre, expx : ExpXpre) -> F {
+        self.forward_op.opnorm_bound(expx.dual_exponent(), expy.dual_exponent())
+    }
+}
--- a/src/radon_fb.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/radon_fb.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -11,10 +11,11 @@
 
 use alg_tools::iterate::{
     AlgIteratorFactory,
-    AlgIteratorState,
+    AlgIteratorIteration,
+    AlgIterator
 };
 use alg_tools::euclidean::Euclidean;
-use alg_tools::linops::Apply;
+use alg_tools::linops::Mapping;
 use alg_tools::sets::Cube;
 use alg_tools::loc::Loc;
 use alg_tools::bisection_tree::{
@@ -29,11 +30,14 @@
 };
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::L2;
 
 use crate::types::*;
 use crate::measures::{
+    RNDM,
     DiscreteMeasure,
     DeltaMeasure,
+    Radon,
 };
 use crate::measures::merging::{
     SpikeMergingMethod,
@@ -54,10 +58,11 @@
 
 use crate::fb::{
     FBGenericConfig,
-    postprocess
+    postprocess,
+    prune_with_stats
 };
 
-/// Settings for [`pointsource_fb_reg`].
+/// Settings for [`pointsource_radon_fb_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
 pub struct RadonFBConfig<F : Float> {
@@ -79,17 +84,17 @@
 
 #[replace_float_literals(F::cast_from(literal))]
 pub(crate) fn insert_and_reweigh<
-    'a, F, GA, BTA, S, Reg, State, const N : usize
+    'a, F, GA, BTA, S, Reg, I, const N : usize
 >(
-    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-    minus_τv : &mut BTFN<F, GA, BTA, N>,
-    μ_base : &mut DiscreteMeasure<Loc<F, N>, F>,
-    _ν_delta: Option<&DiscreteMeasure<Loc<F, N>, F>>,
+    μ : &mut RNDM<F, N>,
+    τv : &mut BTFN<F, GA, BTA, N>,
+    μ_base : &mut RNDM<F, N>,
+    //_ν_delta: Option<&RNDM<F, N>>,
     τ : F,
     ε : F,
     config : &FBGenericConfig<F>,
     reg : &Reg,
-    _state : &State,
+    _state : &AlgIteratorIteration<I>,
     stats : &mut IterInfo<F, N>,
 )
 where F : Float + ToNalgebraRealField,
@@ -97,18 +102,20 @@
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : RegTerm<F, N>,
-      State : AlgIteratorState {
+      I : AlgIterator {
 
     'i_and_w: for i in 0..=1 {
         // Optimise weights
         if μ.len() > 0 {
             // Form finite-dimensional subproblem. The subproblem references to the original μ^k
             // from the beginning of the iteration are all contained in the immutable c and g.
+            // TODO: observe negation of -τv after switch from minus_τv: finite-dimensional
+            // problems have not yet been updated to sign change.
             let g̃ = DVector::from_iterator(μ.len(),
                                            μ.iter_locations()
-                                            .map(|ζ| F::to_nalgebra_mixed(minus_τv.apply(ζ))));
+                                            .map(|ζ| - F::to_nalgebra_mixed(τv.apply(ζ))));
             let mut x = μ.masses_dvector();
             let y = μ_base.masses_dvector();
 
@@ -122,7 +129,7 @@
         if i>0 {
             // Simple debugging test to see if more inserts would be needed. Doesn't seem so.
             //let n = μ.dist_matching(μ_base);
-            //println!("{:?}", reg.find_tolerance_violation_slack(minus_τv, τ, ε, false, config, n));
+            //println!("{:?}", reg.find_tolerance_violation_slack(τv, τ, ε, false, config, n));
             break 'i_and_w
         }
         
@@ -132,69 +139,23 @@
         // Find a spike to insert, if needed.
         // This only check the overall tolerances, not tolerances on support of μ-μ_base or μ,
         // which are supposed to have been guaranteed by the finite-dimensional weight optimisation.
-        match reg.find_tolerance_violation_slack(minus_τv, τ, ε, false, config, n) {
+        match reg.find_tolerance_violation_slack(τv, τ, ε, false, config, n) {
             None => { break 'i_and_w },
             Some((ξ, _v_ξ, _in_bounds)) => {
                 // Weight is found out by running the finite-dimensional optimisation algorithm
                 // above
                 *μ += DeltaMeasure { x : ξ, α : 0.0 };
                 *μ_base += DeltaMeasure { x : ξ, α : 0.0 };
+                stats.inserted += 1;
             }
         };
     }
 }
 
-#[replace_float_literals(F::cast_from(literal))]
-pub(crate) fn prune_and_maybe_simple_merge<
-    'a, F, GA, BTA, S, Reg, State, const N : usize
->(
-    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-    minus_τv : &mut BTFN<F, GA, BTA, N>,
-    μ_base : &DiscreteMeasure<Loc<F, N>, F>,
-    τ : F,
-    ε : F,
-    config : &FBGenericConfig<F>,
-    reg : &Reg,
-    state : &State,
-    stats : &mut IterInfo<F, N>,
-)
-where F : Float + ToNalgebraRealField,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      Reg : RegTerm<F, N>,
-      State : AlgIteratorState {
-
-    assert!(μ_base.len() <= μ.len());
-
-    if state.iteration() % config.merge_every == 0 {
-        stats.merged += μ.merge_spikes(config.merging, |μ_candidate| {
-            // Important: μ_candidate's new points are afterwards,
-            // and do not conflict with μ_base.
-            // TODO: could simplify to requiring μ_base instead of μ_radon.
-            // but may complicate with sliding base's exgtra points that need to be
-            // after μ_candidate's extra points.
-            // TODO: doesn't seem to work, maybe need to merge μ_base as well?
-            // Although that doesn't seem to make sense.
-            let μ_radon = μ_candidate.sub_matching(μ_base);
-            reg.verify_merge_candidate_radonsq(minus_τv, μ_candidate, τ, ε, &config, &μ_radon)
-            //let n = μ_candidate.dist_matching(μ_base);
-            //reg.find_tolerance_violation_slack(minus_τv, τ, ε, false, config, n).is_none()
-        });
-    }
-
-    let n_before_prune = μ.len();
-    μ.prune();
-    debug_assert!(μ.len() <= n_before_prune);
-    stats.pruned += n_before_prune - μ.len();
-}
-
 
 /// Iteratively solve the pointsource localisation problem using simplified forward-backward splitting.
 ///
-/// The settings in `config` have their [respective documentation](FBConfig). `opA` is the
+/// The settings in `config` have their [respective documentation][RadonFBConfig]. `opA` is the
 /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
 /// Finally, the `iterator` is an outer loop verbosity and iteration count control
 /// as documented in [`alg_tools::iterate`].
@@ -219,20 +180,17 @@
     fbconfig : &RadonFBConfig<F>,
     iterator : I,
     mut _plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
       for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       Cube<F, N>: P2Minimise<Loc<F, N>, F>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : RegTerm<F, N> {
 
     // Set up parameters
@@ -240,7 +198,7 @@
     // We need L such that the descent inequality F(ν) - F(μ) - ⟨F'(μ),ν-μ⟩ ≤ (L/2)‖ν-μ‖²_ℳ ∀ ν,μ
     // holds. Since the left hand side expands as (1/2)‖A(ν-μ)‖₂², this is to say, we need L such
     // that ‖Aμ‖₂² ≤ L ‖μ‖²_ℳ ∀ μ. Thus `opnorm_bound` gives the square root of L.
-    let τ = fbconfig.τ0/opA.opnorm_bound().powi(2);
+    let τ = fbconfig.τ0/opA.opnorm_bound(Radon, L2).powi(2);
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
     let tolerance = config.tolerance * τ * reg.tolerance_scaling();
@@ -249,71 +207,74 @@
     // Initialise iterates
     let mut μ = DiscreteMeasure::new();
     let mut residual = -b;
+    
+    // Statistics
+    let full_stats = |residual : &A::Observable,
+                      μ : &RNDM<F, N>,
+                      ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + reg.apply(μ),
+        n_spikes : μ.len(),
+        ε,
+        // postprocessing: config.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
     let mut stats = IterInfo::new();
 
     // Run the algorithm
-    iterator.iterate(|state| {
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) {
         // Calculate smooth part of surrogate model.
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        residual *= -τ;
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let mut minus_τv = opA.preadjoint().apply(r);
+        let mut τv = opA.preadjoint().apply(residual * τ);
 
         // Save current base point
         let mut μ_base = μ.clone();
             
         // Insert and reweigh
         insert_and_reweigh(
-            &mut μ, &mut minus_τv, &mut μ_base, None,
+            &mut μ, &mut τv, &mut μ_base, //None,
             τ, ε,
-            config, &reg, state, &mut stats
+            config, &reg, &state, &mut stats
         );
 
         // Prune and possibly merge spikes
-        prune_and_maybe_simple_merge(
-            &mut μ, &mut minus_τv, &μ_base,
-            τ, ε,
-            config, &reg, state, &mut stats
-        );
+        assert!(μ_base.len() <= μ.len());
+        if config.merge_now(&state) {
+            stats.merged += μ.merge_spikes(config.merging, |μ_candidate| {
+                // Important: μ_candidate's new points are afterwards,
+                // and do not conflict with μ_base.
+                // TODO: could simplify to requiring μ_base instead of μ_radon.
+                // but may complicate with sliding base's exgtra points that need to be
+                // after μ_candidate's extra points.
+                // TODO: doesn't seem to work, maybe need to merge μ_base as well?
+                // Although that doesn't seem to make sense.
+                let μ_radon = μ_candidate.sub_matching(&μ_base);
+                reg.verify_merge_candidate_radonsq(&mut τv, μ_candidate, τ, ε, &config, &μ_radon)
+                //let n = μ_candidate.dist_matching(μ_base);
+                //reg.find_tolerance_violation_slack(τv, τ, ε, false, config, n).is_none()
+            });
+        }
+        stats.pruned += prune_with_stats(&mut μ);
 
         // Update residual
         residual = calculate_residual(&μ, opA, b);
 
-        // Update main tolerance for next iteration
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
+        let iter = state.iteration();
         stats.this_iters += 1;
 
-        // Give function value if needed
+        // Give statistics if needed
         state.if_verbose(|| {
-            // Plot if so requested
-            // plotter.plot_spikes(
-            //     format!("iter {} end;", state.iteration()), &d,
-            //     "start".to_string(), Some(&minus_τv),
-            //     reg.target_bounds(τ, ε_prev), &μ,
-            // );
-            // Calculate mean inner iterations and reset relevant counters.
-            // Return the statistics
-            let res = IterInfo {
-                value : residual.norm2_squared_div2() + reg.apply(&μ),
-                n_spikes : μ.len(),
-                ε : ε_prev,
-                postprocessing: config.postprocessing.then(|| μ.clone()),
-                .. stats
-            };
-            stats = IterInfo::new();
-            res
-        })
-    });
+            full_stats(&residual, &μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
 
     postprocess(μ, config, L2Squared, opA, b)
 }
 
 /// Iteratively solve the pointsource localisation problem using simplified inertial forward-backward splitting.
 ///
-/// The settings in `config` have their [respective documentation](FBConfig). `opA` is the
+/// The settings in `config` have their [respective documentation][RadonFBConfig]. `opA` is the
 /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
 /// Finally, the `iterator` is an outer loop verbosity and iteration count control
 /// as documented in [`alg_tools::iterate`].
@@ -337,21 +298,19 @@
     reg : Reg,
     fbconfig : &RadonFBConfig<F>,
     iterator : I,
-    mut _plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
+    mut plotter : SeqPlotter<F, N>,
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
       for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       Cube<F, N>: P2Minimise<Loc<F, N>, F>,
       PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : RegTerm<F, N> {
 
     // Set up parameters
@@ -359,7 +318,7 @@
     // We need L such that the descent inequality F(ν) - F(μ) - ⟨F'(μ),ν-μ⟩ ≤ (L/2)‖ν-μ‖²_ℳ ∀ ν,μ
     // holds. Since the left hand side expands as (1/2)‖A(ν-μ)‖₂², this is to say, we need L such
     // that ‖Aμ‖₂² ≤ L ‖μ‖²_ℳ ∀ μ. Thus `opnorm_bound` gives the square root of L.
-    let τ = fbconfig.τ0/opA.opnorm_bound().powi(2);
+    let τ = fbconfig.τ0/opA.opnorm_bound(Radon, L2).powi(2);
     let mut λ = 1.0;
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
@@ -370,31 +329,35 @@
     let mut μ = DiscreteMeasure::new();
     let mut μ_prev = DiscreteMeasure::new();
     let mut residual = -b;
+    let mut warned_merging = false;
+
+    // Statistics
+    let full_stats = |ν : &RNDM<F, N>, ε, stats| IterInfo {
+        value : L2Squared.calculate_fit_op(ν, opA, b) + reg.apply(ν),
+        n_spikes : ν.len(),
+        ε,
+        // postprocessing: config.postprocessing.then(|| ν.clone()),
+        .. stats
+    };
     let mut stats = IterInfo::new();
-    let mut warned_merging = false;
 
     // Run the algorithm
-    iterator.iterate(|state| {
+    for state in iterator.iter_init(|| full_stats(&μ, ε, stats.clone())) {
         // Calculate smooth part of surrogate model.
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        residual *= -τ;
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let mut minus_τv = opA.preadjoint().apply(r);
+        let mut τv = opA.preadjoint().apply(residual * τ);
 
         // Save current base point
         let mut μ_base = μ.clone();
             
         // Insert new spikes and reweigh
         insert_and_reweigh(
-            &mut μ, &mut minus_τv, &mut μ_base, None,
+            &mut μ, &mut τv, &mut μ_base, //None,
             τ, ε,
-            config, &reg, state, &mut stats
+            config, &reg, &state, &mut stats
         );
 
         // (Do not) merge spikes.
-        if state.iteration() % config.merge_every == 0 {
+        if config.merge_now(&state) {
             match config.merging {
                 SpikeMergingMethod::None => { },
                 _ => if !warned_merging {
@@ -423,33 +386,19 @@
 
         // Update residual
         residual = calculate_residual(&μ, opA, b);
-
-        // Update main tolerance for next iteration
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
+       
+        let iter = state.iteration();
         stats.this_iters += 1;
 
-        // Give function value if needed
+        // Give statistics if needed
         state.if_verbose(|| {
-            // Plot if so requested
-            // plotter.plot_spikes(
-            //     format!("iter {} end;", state.iteration()), &d,
-            //     "start".to_string(), Some(&minus_τv),
-            //     reg.target_bounds(τ, ε_prev), &μ_prev,
-            // );
-            // Calculate mean inner iterations and reset relevant counters.
-            // Return the statistics
-            let res = IterInfo {
-                value : L2Squared.calculate_fit_op(&μ_prev, opA, b) + reg.apply(&μ_prev),
-                n_spikes : μ_prev.len(),
-                ε : ε_prev,
-                postprocessing: config.postprocessing.then(|| μ_prev.clone()),
-                .. stats
-            };
-            stats = IterInfo::new();
-            res
-        })
-    });
+            plotter.plot_spikes(iter, Option::<&S>::None, Some(&τv), &μ_prev);
+            full_stats(&μ_prev, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
 
     postprocess(μ_prev, config, L2Squared, opA, b)
 }
--- a/src/regularisation.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/regularisation.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -5,13 +5,14 @@
 use numeric_literals::replace_float_literals;
 use serde::{Serialize, Deserialize};
 use alg_tools::norms::Norm;
-use alg_tools::linops::Apply;
+use alg_tools::linops::Mapping;
+use alg_tools::instance::Instance;
 use alg_tools::loc::Loc;
 use crate::types::*;
 use crate::measures::{
-    DiscreteMeasure,
+    RNDM,
     DeltaMeasure,
-    Radon
+    Radon,
 };
 use crate::fb::FBGenericConfig;
 #[allow(unused_imports)] // Used by documentation.
@@ -21,7 +22,6 @@
 
 use nalgebra::{DVector, DMatrix};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
-use alg_tools::mapping::Mapping;
 use alg_tools::bisection_tree::{
     BTFN,
     Bounds,
@@ -56,12 +56,13 @@
     }
 }
 
-impl<'a, F : Float, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
+impl<'a, F : Float, const N : usize> Mapping<RNDM<F, N>>
 for NonnegRadonRegTerm<F> {
-    type Output = F;
+    type Codomain = F;
     
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> F {
-        self.α() * μ.norm(Radon)
+    fn apply<I>(&self, μ : I) -> F
+    where I : Instance<RNDM<F, N>> {
+        self.α() * μ.eval(|x| x.norm(Radon))
     }
 }
 
@@ -81,12 +82,13 @@
     }
 }
 
-impl<'a, F : Float, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
+impl<'a, F : Float, const N : usize> Mapping<RNDM<F, N>>
 for RadonRegTerm<F> {
-    type Output = F;
+    type Codomain = F;
     
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> F {
-        self.α() * μ.norm(Radon)
+    fn apply<I>(&self, μ : I) -> F
+    where I : Instance<RNDM<F, N>> {
+        self.α() * μ.eval(|x| x.norm(Radon))
     }
 }
 
@@ -99,11 +101,12 @@
     NonnegRadon(F),
 }
 
-impl<'a, F : Float, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
+impl<'a, F : Float, const N : usize> Mapping<RNDM<F, N>>
 for Regularisation<F> {
-    type Output = F;
-    
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> F {
+    type Codomain = F;
+
+    fn apply<I>(&self, μ : I) -> F
+    where I : Instance<RNDM<F, N>> {
         match *self {
             Self::Radon(α) => RadonRegTerm(α).apply(μ),
             Self::NonnegRadon(α) => NonnegRadonRegTerm(α).apply(μ),
@@ -111,9 +114,9 @@
     }
 }
 
-/// Abstraction of regularisation terms for [`generic_pointsource_fb_reg`].
+/// Abstraction of regularisation terms.
 pub trait RegTerm<F : Float + ToNalgebraRealField, const N : usize>
-: for<'a> Apply<&'a DiscreteMeasure<Loc<F, N>, F>, Output = F> {
+: Mapping<RNDM<F, N>, Codomain = F> {
     /// Approximately solve the problem
     /// <div>$$
     ///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + τ G(x)
@@ -171,8 +174,7 @@
     ) -> Option<(Loc<F, N>, F, bool)>
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         self.find_tolerance_violation_slack(d, τ, ε, skip_by_rough_check, config, F::ZERO)
     }
 
@@ -199,8 +201,7 @@
     ) -> Option<(Loc<F, N>, F, bool)>
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N>;
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
 
 
     /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
@@ -209,36 +210,34 @@
     fn verify_merge_candidate<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         τ : F,
         ε : F,
         config : &FBGenericConfig<F>,
     ) -> bool
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N>;
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
 
     /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
     ///
     /// This version is s used for Radon-norm squared proximal term in [`crate::radon_fb`].
-    /// The [`DiscreteMeasure`]s `μ` and `radon_μ` are supposed to have same coordinates at
-    /// same agreeing indices.
+    /// The [measures][crate::measures::DiscreteMeasure] `μ` and `radon_μ` are supposed to have
+    /// same coordinates at same agreeing indices.
     ///
     /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
     fn verify_merge_candidate_radonsq<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         τ : F,
         ε : F,
         config : &FBGenericConfig<F>,
-        radon_μ :&DiscreteMeasure<Loc<F, N>, F>,
+        radon_μ :&RNDM<F, N>,
     ) -> bool
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N>;
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
 
 
     /// TODO: document this
@@ -258,7 +257,7 @@
     fn goodness<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         y : &Loc<F, N>,
         z : &Loc<F, N>,
         τ : F,
@@ -267,8 +266,7 @@
     ) -> F
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N>;
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N>;
 
     /// Convert bound on the regulariser to a bond on the Radon norm
     fn radon_norm_bound(&self, b : F) -> F;
@@ -323,69 +321,66 @@
     ) -> Option<(Loc<F, N>, F, bool)>
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
-        let keep_below = τα + slack + ε;
-        let maximise_above = τα + slack + ε * config.insertion_cutoff_factor;
+        let keep_above = -τα - slack - ε;
+        let minimise_below = -τα - slack - ε * config.insertion_cutoff_factor;
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
 
         // If preliminary check indicates that we are in bounds, and if it otherwise matches
         // the insertion strategy, skip insertion.
-        if skip_by_rough_check && d.bounds().upper() <= keep_below {
+        if skip_by_rough_check && d.bounds().lower() >= keep_above {
             None
         } else {
-            // If the rough check didn't indicate no insertion needed, find maximising point.
-            d.maximise_above(maximise_above, refinement_tolerance, config.refinement.max_steps)
-             .map(|(ξ, v_ξ)| (ξ, v_ξ, v_ξ <= keep_below))
+            // If the rough check didn't indicate no insertion needed, find minimising point.
+            d.minimise_below(minimise_below, refinement_tolerance, config.refinement.max_steps)
+             .map(|(ξ, v_ξ)| (ξ, v_ξ, v_ξ >= keep_above))
         }
     }
 
     fn verify_merge_candidate<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         τ : F,
         ε : F,
         config : &FBGenericConfig<F>,
     ) -> bool
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
-        let keep_below = τα + merge_tolerance;
-        let keep_supp_above = τα - merge_tolerance;
+        let keep_above = -τα - merge_tolerance;
+        let keep_supp_below = -τα + merge_tolerance;
         let bnd = d.bounds();
 
         return (
-            bnd.lower() >= keep_supp_above
+            bnd.upper() <= keep_supp_below
             ||
             μ.iter_spikes().all(|&DeltaMeasure{ α, ref x }| {
-                (α == 0.0) || d.apply(x) >= keep_supp_above
+                (α == 0.0) || d.apply(x) <= keep_supp_below
             })
          ) && (
-            bnd.upper() <= keep_below
+            bnd.lower() >= keep_above
             ||
-            d.has_upper_bound(keep_below, refinement_tolerance, config.refinement.max_steps)
+            d.has_lower_bound(keep_above, refinement_tolerance, config.refinement.max_steps)
         )
     }
 
     fn verify_merge_candidate_radonsq<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         τ : F,
         ε : F,
         config : &FBGenericConfig<F>,
-        radon_μ :&DiscreteMeasure<Loc<F, N>, F>,
+        radon_μ :&RNDM<F, N>,
     ) -> bool
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
@@ -395,7 +390,8 @@
         return {
             μ.both_matching(radon_μ)
              .all(|(α, rα, x)| {
-                let v = d.apply(x);
+                let v = -d.apply(x); // TODO: observe ad hoc negation here, after minus_τv 
+                                     // switch to τv.
                 let (l1, u1) = match α.partial_cmp(&0.0).unwrap_or(Equal) {
                     Greater => (τα, τα),
                     _ => (F::NEG_INFINITY, τα),
@@ -410,10 +406,10 @@
                 (l1 + l2 - merge_tolerance <= v) && (v <= u1 + u2 + merge_tolerance)
             })
          } && {
-            let keep_below = τα + slack + merge_tolerance;
-            bnd.upper() <= keep_below
+            let keep_above = -τα - slack - merge_tolerance;
+            bnd.lower() <= keep_above
             ||
-            d.has_upper_bound(keep_below, refinement_tolerance, config.refinement.max_steps)
+            d.has_lower_bound(keep_above, refinement_tolerance, config.refinement.max_steps)
          }
     }
 
@@ -435,7 +431,7 @@
     fn goodness<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        _μ : &DiscreteMeasure<Loc<F, N>, F>,
+        _μ : &RNDM<F, N>,
         y : &Loc<F, N>,
         z : &Loc<F, N>,
         τ : F,
@@ -444,8 +440,7 @@
     ) -> F
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         let w = |x| 1.0.min((ε + d.apply(x))/(τ * self.α()));
         w(z) - w(y)
     }
@@ -500,8 +495,7 @@
     ) -> Option<(Loc<F, N>, F, bool)>
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>, Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
         let keep_below = τα + slack + ε;
         let keep_above = -(τα + slack) - ε;
@@ -538,15 +532,14 @@
     fn verify_merge_candidate<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         τ : F,
         ε : F,
         config : &FBGenericConfig<F>,
     ) -> bool
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>,Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
@@ -582,16 +575,15 @@
     fn verify_merge_candidate_radonsq<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         τ : F,
         ε : F,
         config : &FBGenericConfig<F>,
-        radon_μ : &DiscreteMeasure<Loc<F, N>, F>,
+        radon_μ : &RNDM<F, N>,
     ) -> bool
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
+          G::SupportType : Mapping<Loc<F, N>,Codomain=F> + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
         let merge_tolerance = config.merge_tolerance_mult * ε;
@@ -647,7 +639,7 @@
     fn goodness<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
-        _μ : &DiscreteMeasure<Loc<F, N>, F>,
+        _μ : &RNDM<F, N>,
         y : &Loc<F, N>,
         z : &Loc<F, N>,
         τ : F,
--- a/src/run.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/run.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -26,25 +26,46 @@
     AlgIteratorOptions,
     Verbose,
     AlgIteratorFactory,
+    LoggingIteratorFactory,
+    TimingIteratorFactory,
+    BasicAlgIteratorFactory,
 };
 use alg_tools::logger::Logger;
-use alg_tools::error::DynError;
+use alg_tools::error::{
+    DynError,
+    DynResult,
+};
 use alg_tools::tabledump::TableDump;
 use alg_tools::sets::Cube;
 use alg_tools::mapping::{
     RealMapping,
-    DifferentiableRealMapping
+    DifferentiableMapping,
+    DifferentiableRealMapping,
+    Instance
 };
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::euclidean::Euclidean;
-use alg_tools::lingrid::lingrid;
+use alg_tools::lingrid::{lingrid, LinSpace};
 use alg_tools::sets::SetOrd;
+use alg_tools::linops::{RowOp, IdOp /*, ZeroOp*/};
+use alg_tools::discrete_gradient::{Grad, ForwardNeumann};
+use alg_tools::convex::Zero;
+use alg_tools::maputil::map3;
+use alg_tools::direct_product::Pair;
 
 use crate::kernels::*;
 use crate::types::*;
 use crate::measures::*;
 use crate::measures::merging::SpikeMerging;
 use crate::forward_model::*;
+use crate::forward_model::sensor_grid::{
+    SensorGrid,
+    SensorGridBT,
+    //SensorGridBTFN,
+    Sensor,
+    Spread,
+};
+
 use crate::fb::{
     FBConfig,
     FBGenericConfig,
@@ -58,8 +79,17 @@
 };
 use crate::sliding_fb::{
     SlidingFBConfig,
+    TransportConfig,
     pointsource_sliding_fb_reg
 };
+use crate::sliding_pdps::{
+    SlidingPDPSConfig,
+    pointsource_sliding_pdps_pair
+};
+use crate::forward_pdps::{
+    ForwardPDPSConfig,
+    pointsource_forward_pdps_pair
+};
 use crate::pdps::{
     PDPSConfig,
     pointsource_pdps_reg,
@@ -68,9 +98,9 @@
     FWConfig,
     FWVariant,
     pointsource_fw_reg,
-    WeightOptim,
+    //WeightOptim,
 };
-use crate::subproblem::InnerSettings;
+//use crate::subproblem::InnerSettings;
 use crate::seminorms::*;
 use crate::plot::*;
 use crate::{AlgorithmOverrides, CommandLineArgs};
@@ -82,9 +112,11 @@
 };
 use crate::dataterm::{
     L1,
-    L2Squared
+    L2Squared,
 };
-use alg_tools::norms::L2;
+use alg_tools::norms::{L2, NormExponent};
+use alg_tools::operator_arithmetic::Weighted;
+use anyhow::anyhow;
 
 /// Available algorithms and their configurations
 #[derive(Copy, Clone, Debug, Serialize, Deserialize)]
@@ -96,6 +128,8 @@
     RadonFB(RadonFBConfig<F>),
     RadonFISTA(RadonFBConfig<F>),
     SlidingFB(SlidingFBConfig<F>),
+    ForwardPDPS(ForwardPDPSConfig<F>),
+    SlidingPDPS(SlidingPDPSConfig<F>),
 }
 
 fn unpack_tolerance<F : Float>(v : &Vec<F>) -> Tolerance<F> {
@@ -119,6 +153,15 @@
                 .. g
             }
         };
+        let override_transport = |g : TransportConfig<F>| {
+            TransportConfig {
+                θ0 : cli.theta0.unwrap_or(g.θ0),
+                tolerance_ω: cli.transport_tolerance_omega.unwrap_or(g.tolerance_ω),
+                tolerance_dv: cli.transport_tolerance_dv.unwrap_or(g.tolerance_dv),
+                adaptation: cli.transport_adaptation.unwrap_or(g.adaptation),
+                .. g
+            }
+        };
 
         use AlgorithmConfig::*;
         match self {
@@ -156,17 +199,32 @@
             }),
             SlidingFB(sfb) => SlidingFB(SlidingFBConfig {
                 τ0 : cli.tau0.unwrap_or(sfb.τ0),
-                θ0 : cli.theta0.unwrap_or(sfb.θ0),
-                transport_tolerance_ω: cli.transport_tolerance_omega.unwrap_or(sfb.transport_tolerance_ω),
-                transport_tolerance_dv: cli.transport_tolerance_dv.unwrap_or(sfb.transport_tolerance_dv),
+                transport : override_transport(sfb.transport),
                 insertion : override_fb_generic(sfb.insertion),
                 .. sfb
             }),
+            SlidingPDPS(spdps) => SlidingPDPS(SlidingPDPSConfig {
+                τ0 : cli.tau0.unwrap_or(spdps.τ0),
+                σp0 : cli.sigmap0.unwrap_or(spdps.σp0),
+                σd0 : cli.sigma0.unwrap_or(spdps.σd0),
+                //acceleration : cli.acceleration.unwrap_or(pdps.acceleration),
+                transport : override_transport(spdps.transport),
+                insertion : override_fb_generic(spdps.insertion),
+                .. spdps
+            }),
+            ForwardPDPS(fpdps) => ForwardPDPS(ForwardPDPSConfig {
+                τ0 : cli.tau0.unwrap_or(fpdps.τ0),
+                σp0 : cli.sigmap0.unwrap_or(fpdps.σp0),
+                σd0 : cli.sigma0.unwrap_or(fpdps.σd0),
+                //acceleration : cli.acceleration.unwrap_or(pdps.acceleration),
+                insertion : override_fb_generic(fpdps.insertion),
+                .. fpdps
+            }),
         }
     }
 }
 
-/// Helper struct for tagging and [`AlgorithmConfig`] or [`Experiment`] with a name.
+/// Helper struct for tagging and [`AlgorithmConfig`] or [`ExperimentV2`] with a name.
 #[derive(Clone, Debug, Serialize, Deserialize)]
 pub struct Named<Data> {
     pub name : String,
@@ -198,9 +256,15 @@
     /// The RadonFISTA inertial forward-backward method
     #[clap(name = "radon_fista")]
     RadonFISTA,
-    /// The Sliding FB method
+    /// The sliding FB method
     #[clap(name = "sliding_fb", alias = "sfb")]
     SlidingFB,
+    /// The sliding PDPS method
+    #[clap(name = "sliding_pdps", alias = "spdps")]
+    SlidingPDPS,
+    /// The PDPS method with a forward step for the smooth function
+    #[clap(name = "forward_pdps", alias = "fpdps")]
+    ForwardPDPS,
 }
 
 impl DefaultAlgorithm {
@@ -219,6 +283,8 @@
             RadonFB => AlgorithmConfig::RadonFB(Default::default()),
             RadonFISTA => AlgorithmConfig::RadonFISTA(Default::default()),
             SlidingFB => AlgorithmConfig::SlidingFB(Default::default()),
+            SlidingPDPS => AlgorithmConfig::SlidingPDPS(Default::default()),
+            ForwardPDPS => AlgorithmConfig::ForwardPDPS(Default::default()),
         }
     }
 
@@ -278,7 +344,8 @@
     iter : usize,
     cpu_time : f64,
     value : F,
-    post_value : F,
+    relative_value : F,
+    //post_value : F,
     n_spikes : usize,
     inner_iters : usize,
     merged : usize,
@@ -355,7 +422,7 @@
     /// Kernel $ρ$ of $𝒟$.
     pub kernel : K,
     /// True point sources
-    pub μ_hat : DiscreteMeasure<Loc<F, N>, F>,
+    pub μ_hat : RNDM<F, N>,
     /// Regularisation term and parameter
     pub regularisation : Regularisation<F>,
     /// For plotting : how wide should the kernels be plotted
@@ -367,6 +434,24 @@
     pub algorithm_defaults : HashMap<DefaultAlgorithm, AlgorithmConfig<F>>,
 }
 
+#[derive(Debug, Clone, Serialize)]
+pub struct ExperimentBiased<F, NoiseDistr, S, K, P, B, const N : usize>
+where F : Float,
+      [usize; N] : Serialize,
+      NoiseDistr : Distribution<F>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      K : SimpleConvolutionKernel<F, N>,
+      B : Mapping<Loc<F, N>, Codomain = F> + Serialize + std::fmt::Debug,
+{
+    /// Basic setup
+    pub base : ExperimentV2<F, NoiseDistr, S, K, P, N>,
+    /// Weight of TV term
+    pub λ : F,
+    /// Bias function
+    pub bias : B,
+}
+
 /// Trait for runnable experiments
 pub trait RunnableExperiment<F : ClapFloat> {
     /// Run all algorithms provided, or default algorithms if none provided, on the experiment.
@@ -374,51 +459,180 @@
               algs : Option<Vec<Named<AlgorithmConfig<F>>>>) -> DynError;
 
     /// Return algorithm default config
-    fn algorithm_defaults(&self, alg : DefaultAlgorithm, cli : &AlgorithmOverrides<F>)
-    -> Named<AlgorithmConfig<F>>;
+    fn algorithm_defaults(&self, alg : DefaultAlgorithm) -> Option<AlgorithmConfig<F>>;
+}
+
+/// Helper function to print experiment start message and save setup.
+/// Returns saving prefix.
+fn start_experiment<E, S>(
+    experiment : &Named<E>,
+    cli : &CommandLineArgs,
+    stats : S,
+) -> DynResult<String>
+where
+    E : Serialize + std::fmt::Debug,
+    S : Serialize,
+{
+    let Named { name : experiment_name, data } = experiment;
+
+    println!("{}\n{}",
+                format!("Performing experiment {}…", experiment_name).cyan(),
+                format!("{:?}", data).bright_black());
+
+    // Set up output directory
+    let prefix = format!("{}/{}/", cli.outdir, experiment_name);
+
+    // Save experiment configuration and statistics
+    let mkname_e = |t| format!("{prefix}{t}.json", prefix = prefix, t = t);
+    std::fs::create_dir_all(&prefix)?;
+    write_json(mkname_e("experiment"), experiment)?;
+    write_json(mkname_e("config"), cli)?;
+    write_json(mkname_e("stats"), &stats)?;
+
+    Ok(prefix)
+}
+
+/// Error codes for running an algorithm on an experiment.
+enum RunError {
+    /// Algorithm not implemented for this experiment
+    NotImplemented,
 }
 
-// *** macro boilerplate ***
-macro_rules! impl_experiment {
-($type:ident, $reg_field:ident, $reg_convert:path) => {
-// *** macro ***
+use RunError::*;
+
+type DoRunAllIt<'a, F, const N : usize> = LoggingIteratorFactory<
+    'a,
+    Timed<IterInfo<F, N>>,
+    TimingIteratorFactory<BasicAlgIteratorFactory<IterInfo<F, N>>>
+>;
+
+/// Helper function to run all algorithms on an experiment.
+fn do_runall<F : Float, Z, const N : usize>(
+    experiment_name : &String,
+    prefix : &String,
+    cli : &CommandLineArgs,
+    algorithms : Vec<Named<AlgorithmConfig<F>>>,
+    plotgrid : LinSpace<Loc<F, N>, [usize; N]>,
+    mut save_extra : impl FnMut(String, Z) -> DynError,
+    mut do_alg : impl FnMut(
+        &AlgorithmConfig<F>,
+        DoRunAllIt<F, N>,
+        SeqPlotter<F, N>,
+        String,
+    ) -> Result<(RNDM<F, N>, Z), RunError>,
+) ->  DynError
+where
+    PlotLookup : Plotting<N>,
+{
+    let mut logs = Vec::new();
+
+    let iterator_options = AlgIteratorOptions{
+            max_iter : cli.max_iter,
+            verbose_iter : cli.verbose_iter
+                                .map_or(Verbose::Logarithmic(10),
+                                        |n| Verbose::Every(n)),
+            quiet : cli.quiet,
+    };
+
+    // Run the algorithm(s)
+    for named @ Named { name : alg_name, data : alg } in algorithms.iter() {
+        let this_prefix = format!("{}{}/", prefix, alg_name);
+
+        // Create Logger and IteratorFactory
+        let mut logger = Logger::new();
+        let iterator = iterator_options.instantiate()
+                                        .timed()
+                                        .into_log(&mut logger);
+
+        let running = if !cli.quiet {
+            format!("{}\n{}\n{}\n",
+                    format!("Running {} on experiment {}…", alg_name, experiment_name).cyan(),
+                    format!("{:?}", iterator_options).bright_black(),
+                    format!("{:?}", alg).bright_black())
+        } else {
+            "".to_string()
+        };
+        //
+        // The following is for postprocessing, which has been disabled anyway.
+        //
+        // let reg : Box<dyn WeightOptim<_, _, _, N>> = match regularisation {
+        //     Regularisation::Radon(α) => Box::new(RadonRegTerm(α)),
+        //     Regularisation::NonnegRadon(α) => Box::new(NonnegRadonRegTerm(α)),
+        // };
+        //let findim_data = reg.prepare_optimise_weights(&opA, &b);
+        //let inner_config : InnerSettings<F> = Default::default();
+        //let inner_it = inner_config.iterator_options;
+
+        // Create plotter and directory if needed.
+        let plot_count = if cli.plot >= PlotLevel::Iter { 2000 } else { 0 };
+        let plotter = SeqPlotter::new(this_prefix, plot_count, plotgrid.clone());
+
+        let start = Instant::now();
+        let start_cpu = ProcessTime::now();
+
+        let (μ, z) = match do_alg(alg, iterator, plotter, running) {
+            Ok(μ) => μ,
+            Err(RunError::NotImplemented) => {
+                let msg = format!("Algorithm “{alg_name}” not implemented for {experiment_name}. \
+                                   Skipping.").red();
+                eprintln!("{}", msg);
+                continue
+            }
+        };
+
+        let elapsed = start.elapsed().as_secs_f64();
+        let cpu_time = start_cpu.elapsed().as_secs_f64();
+
+        println!("{}", format!("Elapsed {elapsed}s (CPU time {cpu_time}s)… ").yellow());
+
+        // Save results
+        println!("{}", "Saving results …".green());
+
+        let mkname = |t| format!("{prefix}{alg_name}_{t}");
+
+        write_json(mkname("config.json"), &named)?;
+        write_json(mkname("stats.json"), &AlgorithmStats { cpu_time, elapsed })?;
+        μ.write_csv(mkname("reco.txt"))?;
+        save_extra(mkname(""), z)?;
+        //logger.write_csv(mkname("log.txt"))?;
+        logs.push((mkname("log.txt"), logger));
+            }
+
+    save_logs(logs)
+}
+
+#[replace_float_literals(F::cast_from(literal))]
 impl<F, NoiseDistr, S, K, P, const N : usize> RunnableExperiment<F> for
-Named<$type<F, NoiseDistr, S, K, P, N>>
-where F : ClapFloat + nalgebra::RealField + ToNalgebraRealField<MixedType=F>,
-      [usize; N] : Serialize,
-      S : Sensor<F, N> + Copy + Serialize + std::fmt::Debug,
-      P : Spread<F, N> + Copy + Serialize + std::fmt::Debug,
-      Convolution<S, P>: Spread<F, N> + Bounded<F> + LocalAnalysis<F, Bounds<F>, N> + Copy
-                         // TODO: shold not have differentiability as a requirement, but
-                         // decide availability of sliding based on it.
-                         //+ for<'b> Differentiable<&'b Loc<F, N>, Output = Loc<F, N>>,
-                         // TODO: very weird that rust only compiles with Differentiable
-                         // instead of the above one on references, which is required by
-                         // poitsource_sliding_fb_reg.
-                         + DifferentiableRealMapping<F, N>
-                         + Lipschitz<L2, FloatType=F>,
-      // <DefaultSG<F, S, P, N> as ForwardModel<Loc<F, N>, F>::PreadjointCodomain : for<'b> Differentiable<&'b Loc<F, N>, Output = Loc<F, N>>,
-      AutoConvolution<P> : BoundedBy<F, K>,
-      K : SimpleConvolutionKernel<F, N>
-          + LocalAnalysis<F, Bounds<F>, N>
-          + Copy + Serialize + std::fmt::Debug,
-      Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
-      PlotLookup : Plotting<N>,
-      DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      NoiseDistr : Distribution<F> + Serialize + std::fmt::Debug {
+Named<ExperimentV2<F, NoiseDistr, S, K, P, N>>
+where
+    F : ClapFloat + nalgebra::RealField + ToNalgebraRealField<MixedType=F>,
+    [usize; N] : Serialize,
+    S : Sensor<F, N> + Copy + Serialize + std::fmt::Debug,
+    P : Spread<F, N> + Copy + Serialize + std::fmt::Debug,
+    Convolution<S, P>: Spread<F, N> + Bounded<F> + LocalAnalysis<F, Bounds<F>, N> + Copy
+                        // TODO: shold not have differentiability as a requirement, but
+                        // decide availability of sliding based on it.
+                        //+ for<'b> Differentiable<&'b Loc<F, N>, Output = Loc<F, N>>,
+                        // TODO: very weird that rust only compiles with Differentiable
+                        // instead of the above one on references, which is required by
+                        // poitsource_sliding_fb_reg.
+                        + DifferentiableRealMapping<F, N>
+                        + Lipschitz<L2, FloatType=F>,
+    for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>, // TODO: should not be required generally, only for sliding_fb.
+    AutoConvolution<P> : BoundedBy<F, K>,
+    K : SimpleConvolutionKernel<F, N>
+        + LocalAnalysis<F, Bounds<F>, N>
+        + Copy + Serialize + std::fmt::Debug,
+    Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
+    PlotLookup : Plotting<N>,
+    DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
+    BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    NoiseDistr : Distribution<F> + Serialize + std::fmt::Debug
+{
 
-    fn algorithm_defaults(&self, alg : DefaultAlgorithm, cli : &AlgorithmOverrides<F>)
-    -> Named<AlgorithmConfig<F>> {
-        alg.to_named(
-            self.data
-                .algorithm_defaults
-                .get(&alg)
-                .map_or_else(|| alg.default_config(),
-                            |config| config.clone())
-                .cli_override(cli)
-        )
+    fn algorithm_defaults(&self, alg : DefaultAlgorithm) -> Option<AlgorithmConfig<F>> {
+        self.data.algorithm_defaults.get(&alg).cloned()
     }
 
     fn runall(&self, cli : &CommandLineArgs,
@@ -426,30 +640,15 @@
         // Get experiment configuration
         let &Named {
             name : ref experiment_name,
-            data : $type {
+            data : ExperimentV2 {
                 domain, sensor_count, ref noise_distr, sensor, spread, kernel,
-                ref μ_hat, /*regularisation,*/ kernel_plot_width, dataterm, noise_seed,
+                ref μ_hat, regularisation, kernel_plot_width, dataterm, noise_seed,
                 ..
             }
         } = self;
-        let regularisation = $reg_convert(self.data.$reg_field);
-
-        println!("{}\n{}",
-                 format!("Performing experiment {}…", experiment_name).cyan(),
-                 format!("{:?}", &self.data).bright_black());
-
-        // Set up output directory
-        let prefix = format!("{}/{}/", cli.outdir, self.name);
 
         // Set up algorithms
-        let iterator_options = AlgIteratorOptions{
-                max_iter : cli.max_iter,
-                verbose_iter : cli.verbose_iter
-                                  .map_or(Verbose::Logarithmic(10),
-                                          |n| Verbose::Every(n)),
-                quiet : cli.quiet,
-        };
-        let algorithms = match (algs, self.data.dataterm) {
+        let algorithms = match (algs, dataterm) {
             (Some(algs), _) => algs,
             (None, DataTerm::L2Squared) => vec![DefaultAlgorithm::FB.get_named()],
             (None, DataTerm::L1) => vec![DefaultAlgorithm::PDPS.get_named()],
@@ -464,281 +663,404 @@
         let mut rng = StdRng::seed_from_u64(noise_seed);
 
         // Generate the data and calculate SSNR statistic
-        let b_hat = opA.apply(μ_hat);
+        let b_hat : DVector<_> = opA.apply(μ_hat);
         let noise = DVector::from_distribution(b_hat.len(), &noise_distr, &mut rng);
         let b = &b_hat + &noise;
         // Need to wrap calc_ssnr into a function to hide ultra-lame nalgebra::RealField
         // overloading log10 and conflicting with standard NumTraits one.
         let stats = ExperimentStats::new(&b, &noise);
 
-        // Save experiment configuration and statistics
-        let mkname_e = |t| format!("{prefix}{t}.json", prefix = prefix, t = t);
-        std::fs::create_dir_all(&prefix)?;
-        write_json(mkname_e("experiment"), self)?;
-        write_json(mkname_e("config"), cli)?;
-        write_json(mkname_e("stats"), &stats)?;
+        let prefix = start_experiment(&self, cli, stats)?;
 
         plotall(cli, &prefix, &domain, &sensor, &kernel, &spread,
                 &μ_hat, &op𝒟, &opA, &b_hat, &b, kernel_plot_width)?;
 
-        // Run the algorithm(s)
-        for named @ Named { name : alg_name, data : alg } in algorithms.iter() {
-            let this_prefix = format!("{}{}/", prefix, alg_name);
+        let plotgrid = lingrid(&domain, &[if N==1 { 1000 } else { 100 }; N]);
+
+        let save_extra = |_, ()| Ok(());
 
-            let running = || if !cli.quiet {
-                println!("{}\n{}\n{}",
-                        format!("Running {} on experiment {}…", alg_name, experiment_name).cyan(),
-                        format!("{:?}", iterator_options).bright_black(),
-                        format!("{:?}", alg).bright_black());
-            };
-            let not_implemented = || {
-                let msg = format!("Algorithm “{alg_name}” not implemented for \
-                                   dataterm {dataterm:?} and regularisation {regularisation:?}. \
-                                   Skipping.").red();
-                eprintln!("{}", msg);
-            };
-            // Create Logger and IteratorFactory
-            let mut logger = Logger::new();
-            let reg : Box<dyn WeightOptim<_, _, _, N>> = match regularisation {
-                Regularisation::Radon(α) => Box::new(RadonRegTerm(α)),
-                Regularisation::NonnegRadon(α) => Box::new(NonnegRadonRegTerm(α)),
-            };
-            let findim_data = reg.prepare_optimise_weights(&opA, &b);
-            let inner_config : InnerSettings<F> = Default::default();
-            let inner_it = inner_config.iterator_options;
-            let logmap = |iter, Timed { cpu_time, data }| {
-                let IterInfo {
-                    value,
-                    n_spikes,
-                    inner_iters,
-                    merged,
-                    pruned,
-                    postprocessing,
-                    this_iters,
-                    ..
-                } = data;
-                let post_value = match (postprocessing, dataterm) {
-                    (Some(mut μ), DataTerm::L2Squared) => {
-                        // Comparison postprocessing is only implemented for the case handled
-                        // by the FW variants.
-                        reg.optimise_weights(
-                            &mut μ, &opA, &b, &findim_data, &inner_config,
-                            inner_it
-                        );
-                        dataterm.value_at_residual(opA.apply(&μ) - &b)
-                            + regularisation.apply(&μ)
-                    },
-                    _ => value,
-                };
-                CSVLog {
-                    iter,
-                    value,
-                    post_value,
-                    n_spikes,
-                    cpu_time : cpu_time.as_secs_f64(),
-                    inner_iters,
-                    merged,
-                    pruned,
-                    this_iters
-                }
-            };
-            let iterator = iterator_options.instantiate()
-                                           .timed()
-                                           .mapped(logmap)
-                                           .into_log(&mut logger);
-            let plotgrid = lingrid(&domain, &[if N==1 { 1000 } else { 100 }; N]);
-
-            // Create plotter and directory if needed.
-            let plot_count = if cli.plot >= PlotLevel::Iter { 2000 } else { 0 };
-            let plotter = SeqPlotter::new(this_prefix, plot_count, plotgrid);
-
-            // Run the algorithm
-            let start = Instant::now();
-            let start_cpu = ProcessTime::now();
+        do_runall(experiment_name, &prefix, cli, algorithms, plotgrid, save_extra,
+            |alg, iterator, plotter, running|
+        {
             let μ = match alg {
                 AlgorithmConfig::FB(ref algconfig) => {
                     match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fb_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fb_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        _ => Err(NotImplemented)
                     }
                 },
                 AlgorithmConfig::FISTA(ref algconfig) => {
                     match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fista_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fista_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        _ => Err(NotImplemented),
                     }
                 },
                 AlgorithmConfig::RadonFB(ref algconfig) => {
                     match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_radon_fb_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), algconfig,
                                 iterator, plotter
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_radon_fb_reg(
                                 &opA, &b, RadonRegTerm(α), algconfig,
                                 iterator, plotter
                             )
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        _ => Err(NotImplemented),
                     }
                 },
                 AlgorithmConfig::RadonFISTA(ref algconfig) => {
                     match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_radon_fista_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), algconfig,
                                 iterator, plotter
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_radon_fista_reg(
                                 &opA, &b, RadonRegTerm(α), algconfig,
                                 iterator, plotter
                             )
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        _ => Err(NotImplemented),
                     }
                 },
                 AlgorithmConfig::SlidingFB(ref algconfig) => {
                     match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_sliding_fb_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_sliding_fb_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        _ => Err(NotImplemented),
                     }
                 },
                 AlgorithmConfig::PDPS(ref algconfig) => {
-                    running();
+                    print!("{running}");
                     match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L2Squared
                             )
-                        },
-                        (Regularisation::Radon(α),DataTerm::L2Squared) => {
+                        }),
+                        (Regularisation::Radon(α),DataTerm::L2Squared) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L2Squared
                             )
-                        },
-                        (Regularisation::NonnegRadon(α), DataTerm::L1) => {
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L1) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L1
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L1) => {
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L1) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L1
                             )
-                        },
+                        }),
                     }
                 },
                 AlgorithmConfig::FW(ref algconfig) => {
                     match (regularisation, dataterm) {
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fw_reg(&opA, &b, RadonRegTerm(α),
                                                algconfig, iterator, plotter)
-                        },
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fw_reg(&opA, &b, NonnegRadonRegTerm(α),
                                                algconfig, iterator, plotter)
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        _ => Err(NotImplemented),
                     }
-                }
-            };
-
-            let elapsed = start.elapsed().as_secs_f64();
-            let cpu_time = start_cpu.elapsed().as_secs_f64();
-
-            println!("{}", format!("Elapsed {elapsed}s (CPU time {cpu_time}s)… ").yellow());
-
-            // Save results
-            println!("{}", "Saving results…".green());
-
-            let mkname = |t| format!("{prefix}{alg_name}_{t}");
-
-            write_json(mkname("config.json"), &named)?;
-            write_json(mkname("stats.json"), &AlgorithmStats { cpu_time, elapsed })?;
-            μ.write_csv(mkname("reco.txt"))?;
-            logger.write_csv(mkname("log.txt"))?;
-        }
-
-        Ok(())
+                },
+                _ => Err(NotImplemented),
+            }?;
+            Ok((μ, ()))
+        })
     }
 }
-// *** macro end boiler plate ***
-}}
-// *** actual code ***
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, NoiseDistr, S, K, P, B, const N : usize> RunnableExperiment<F> for
+Named<ExperimentBiased<F, NoiseDistr, S, K, P, B, N>>
+where
+    F : ClapFloat + nalgebra::RealField + ToNalgebraRealField<MixedType=F>,
+    [usize; N] : Serialize,
+    S : Sensor<F, N> + Copy + Serialize + std::fmt::Debug,
+    P : Spread<F, N> + Copy + Serialize + std::fmt::Debug,
+    Convolution<S, P>: Spread<F, N> + Bounded<F> + LocalAnalysis<F, Bounds<F>, N> + Copy
+                        // TODO: shold not have differentiability as a requirement, but
+                        // decide availability of sliding based on it.
+                        //+ for<'b> Differentiable<&'b Loc<F, N>, Output = Loc<F, N>>,
+                        // TODO: very weird that rust only compiles with Differentiable
+                        // instead of the above one on references, which is required by
+                        // poitsource_sliding_fb_reg.
+                        + DifferentiableRealMapping<F, N>
+                        + Lipschitz<L2, FloatType=F>,
+    for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>, // TODO: should not be required generally, only for sliding_fb.
+    AutoConvolution<P> : BoundedBy<F, K>,
+    K : SimpleConvolutionKernel<F, N>
+        + LocalAnalysis<F, Bounds<F>, N>
+        + Copy + Serialize + std::fmt::Debug,
+    Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
+    PlotLookup : Plotting<N>,
+    DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
+    BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    NoiseDistr : Distribution<F> + Serialize + std::fmt::Debug,
+    B : Mapping<Loc<F, N>, Codomain = F> + Serialize + std::fmt::Debug,
+{
+
+    fn algorithm_defaults(&self, alg : DefaultAlgorithm) -> Option<AlgorithmConfig<F>> {
+        self.data.base.algorithm_defaults.get(&alg).cloned()
+    }
+
+    fn runall(&self, cli : &CommandLineArgs,
+              algs : Option<Vec<Named<AlgorithmConfig<F>>>>) -> DynError {
+        // Get experiment configuration
+        let &Named {
+            name : ref experiment_name,
+            data : ExperimentBiased {
+                λ,
+                ref bias,
+                base : ExperimentV2 {
+                    domain, sensor_count, ref noise_distr, sensor, spread, kernel,
+                    ref μ_hat, regularisation, kernel_plot_width, dataterm, noise_seed,
+                    ..
+                }
+            }
+        } = self;
+
+        // Set up algorithms
+        let algorithms = match (algs, dataterm) {
+            (Some(algs), _) => algs,
+            _ => vec![DefaultAlgorithm::SlidingPDPS.get_named()],
+        };
+
+        // Set up operators
+        let depth = DynamicDepth(8);
+        let opA = DefaultSG::new(domain, sensor_count, sensor, spread, depth);
+        let op𝒟 = DefaultSeminormOp::new(depth, domain, kernel);
+        let opAext = RowOp(opA.clone(), IdOp::new());
+        let fnR = Zero::new();
+        let h = map3(domain.span_start(), domain.span_end(), sensor_count,
+                     |a, b, n| (b-a)/F::cast_from(n))
+                    .into_iter()
+                    .reduce(NumTraitsFloat::max)
+                    .unwrap();
+        let z = DVector::zeros(sensor_count.iter().product());
+        let opKz = Grad::new_for(&z, h, sensor_count, ForwardNeumann).unwrap();
+        let y = opKz.apply(&z);
+        let fnH = Weighted{ base_fn : L1.as_mapping(), weight : λ};  // TODO: L_{2,1}
+        // let zero_y = y.clone();
+        // let zeroBTFN = opA.preadjoint().apply(&zero_y);
+        // let opKμ = ZeroOp::new(&zero_y, zeroBTFN);
+
+        // Set up random number generator.
+        let mut rng = StdRng::seed_from_u64(noise_seed);
+
+        // Generate the data and calculate SSNR statistic
+        let bias_vec = DVector::from_vec(opA.grid()
+                                            .into_iter()
+                                            .map(|v| bias.apply(v))
+                                            .collect::<Vec<F>>());
+        let b_hat : DVector<_> = opA.apply(μ_hat) + &bias_vec;
+        let noise = DVector::from_distribution(b_hat.len(), &noise_distr, &mut rng);
+        let b = &b_hat + &noise;
+        // Need to wrap calc_ssnr into a function to hide ultra-lame nalgebra::RealField
+        // overloading log10 and conflicting with standard NumTraits one.
+        let stats = ExperimentStats::new(&b, &noise);
+
+        let prefix = start_experiment(&self, cli, stats)?;
+
+        plotall(cli, &prefix, &domain, &sensor, &kernel, &spread,
+                &μ_hat, &op𝒟, &opA, &b_hat, &b, kernel_plot_width)?;
+
+        opA.write_observable(&bias_vec, format!("{prefix}bias"))?;
+
+        let plotgrid = lingrid(&domain, &[if N==1 { 1000 } else { 100 }; N]);
+
+        let save_extra = |prefix, z| opA.write_observable(&z, format!("{prefix}z"));
 
-impl_experiment!(ExperimentV2, regularisation, std::convert::identity);
+        // Run the algorithms
+        do_runall(experiment_name, &prefix, cli, algorithms, plotgrid, save_extra,
+            |alg, iterator, plotter, running|
+        {
+            let Pair(μ, z) = match alg {
+                AlgorithmConfig::ForwardPDPS(ref algconfig) => {
+                    match (regularisation, dataterm) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
+                            pointsource_forward_pdps_pair(
+                                &opAext, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
+                            pointsource_forward_pdps_pair(
+                                &opAext, &b, RadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        _ => Err(NotImplemented)
+                    }
+                },
+                AlgorithmConfig::SlidingPDPS(ref algconfig) => {
+                    match (regularisation, dataterm) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_pdps_pair(
+                                &opAext, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_pdps_pair(
+                                &opAext, &b, RadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        _ => Err(NotImplemented)
+                    }
+                },
+                _ => Err(NotImplemented)
+            }?;
+            Ok((μ, z))
+        })
+    }
+}
+
+
+/// Calculative minimum and maximum values of all the `logs`, and save them into
+/// corresponding file names given as the first elements of the tuples in the vectors.
+fn save_logs<F : Float, const N : usize>(
+    logs : Vec<(String, Logger<Timed<IterInfo<F, N>>>)>
+) -> DynError {
+    // Process logs for relative values
+    println!("{}", "Processing logs…");
+
+
+    // Find minimum value and initial value within a single log
+    let proc_single_log = |log : &Logger<Timed<IterInfo<F, N>>>| {
+        let d = log.data();
+        let mi = d.iter()
+                  .map(|i| i.data.value)
+                  .reduce(NumTraitsFloat::min);
+        d.first()
+            .map(|i| i.data.value)
+            .zip(mi)
+    };
+
+    // Find minimum and maximum value over all logs
+    let (v_ini, v_min) = logs.iter()
+                             .filter_map(|&(_, ref log)| proc_single_log(log))
+                             .reduce(|(i1, m1), (i2, m2)| (i1.max(i2), m1.min(m2)))
+                             .ok_or(anyhow!("No algorithms found"))?;
+
+    let logmap = |Timed { cpu_time, iter, data }| {
+        let IterInfo {
+            value,
+            n_spikes,
+            inner_iters,
+            merged,
+            pruned,
+            //postprocessing,
+            this_iters,
+            ..
+        } = data;
+        // let post_value = match (postprocessing, dataterm) {
+        //     (Some(mut μ), DataTerm::L2Squared) => {
+        //         // Comparison postprocessing is only implemented for the case handled
+        //         // by the FW variants.
+        //         reg.optimise_weights(
+        //             &mut μ, &opA, &b, &findim_data, &inner_config,
+        //             inner_it
+        //         );
+        //         dataterm.value_at_residual(opA.apply(&μ) - &b)
+        //             + regularisation.apply(&μ)
+        //     },
+        //     _ => value,
+        // };
+        let relative_value = (value - v_min)/(v_ini - v_min);
+        CSVLog {
+            iter,
+            value,
+            relative_value,
+            //post_value,
+            n_spikes,
+            cpu_time : cpu_time.as_secs_f64(),
+            inner_iters,
+            merged,
+            pruned,
+            this_iters
+        }
+    };
+
+    println!("{}", "Saving logs …".green());
+
+    for (name, logger) in logs {
+        logger.map(logmap).write_csv(name)?;
+    }
+
+    Ok(())
+}
+
 
 /// Plot experiment setup
 #[replace_float_literals(F::cast_from(literal))]
@@ -749,7 +1071,7 @@
     sensor : &Sensor,
     kernel : &Kernel,
     spread : &Spread,
-    μ_hat : &DiscreteMeasure<Loc<F, N>, F>,
+    μ_hat : &RNDM<F, N>,
     op𝒟 : &𝒟,
     opA : &A,
     b_hat : &A::Observable,
@@ -761,10 +1083,10 @@
       Spread : RealMapping<F, N> + Support<F, N> + Clone,
       Kernel : RealMapping<F, N> + Support<F, N>,
       Convolution<Sensor, Spread> : DifferentiableRealMapping<F, N> + Support<F, N>,
-      //Differential<Loc<F, N>, Convolution<Sensor, Spread>> : RealVectorField<F, N, N>,
       𝒟 : DiscreteMeasureOp<Loc<F, N>, F>,
       𝒟::Codomain : RealMapping<F, N>,
-      A : ForwardModel<Loc<F, N>, F>,
+      A : ForwardModel<RNDM<F, N>, F>,
+      for<'a> &'a A::Observable : Instance<A::Observable>,
       A::PreadjointCodomain : DifferentiableRealMapping<F, N> + Bounded<F>,
       PlotLookup : Plotting<N>,
       Cube<F, N> : SetOrd {
@@ -776,38 +1098,35 @@
     let base = Convolution(sensor.clone(), spread.clone());
 
     let resolution = if N==1 { 100 } else { 40 };
-    let pfx = |n| format!("{}{}", prefix, n);
+    let pfx = |n| format!("{prefix}{n}");
     let plotgrid = lingrid(&[[-kernel_plot_width, kernel_plot_width]; N].into(), &[resolution; N]);
 
-    PlotLookup::plot_into_file(sensor, plotgrid, pfx("sensor"), "sensor".to_string());
-    PlotLookup::plot_into_file(kernel, plotgrid, pfx("kernel"), "kernel".to_string());
-    PlotLookup::plot_into_file(spread, plotgrid, pfx("spread"), "spread".to_string());
-    PlotLookup::plot_into_file_diff(&base, plotgrid, pfx("base_sensor"), "base_sensor".to_string());
+    PlotLookup::plot_into_file(sensor, plotgrid, pfx("sensor"));
+    PlotLookup::plot_into_file(kernel, plotgrid, pfx("kernel"));
+    PlotLookup::plot_into_file(spread, plotgrid, pfx("spread"));
+    PlotLookup::plot_into_file(&base, plotgrid, pfx("base_sensor"));
 
     let plotgrid2 = lingrid(&domain, &[resolution; N]);
 
     let ω_hat = op𝒟.apply(μ_hat);
     let noise =  opA.preadjoint().apply(opA.apply(μ_hat) - b);
-    PlotLookup::plot_into_file(&ω_hat, plotgrid2, pfx("omega_hat"), "ω̂".to_string());
-    PlotLookup::plot_into_file(&noise, plotgrid2, pfx("omega_noise"),
-                               "noise Aᵀ(Aμ̂ - b)".to_string());
+    PlotLookup::plot_into_file(&ω_hat, plotgrid2, pfx("omega_hat"));
+    PlotLookup::plot_into_file(&noise, plotgrid2, pfx("omega_noise"));
 
     let preadj_b =  opA.preadjoint().apply(b);
     let preadj_b_hat =  opA.preadjoint().apply(b_hat);
     //let bounds = preadj_b.bounds().common(&preadj_b_hat.bounds());
     PlotLookup::plot_into_file_spikes(
-        "Aᵀb".to_string(), &preadj_b,
-        "Aᵀb̂".to_string(), Some(&preadj_b_hat),
-        plotgrid2, None, &μ_hat,
+        Some(&preadj_b),
+        Some(&preadj_b_hat),
+        plotgrid2,
+        &μ_hat,
         pfx("omega_b")
     );
-    PlotLookup::plot_into_file_diff(&preadj_b, plotgrid2, pfx("preadj_b"),
-                                    "preadj_b".to_string());
-    PlotLookup::plot_into_file_diff(&preadj_b_hat, plotgrid2, pfx("preadj_b_hat"),
-                                    "preadj_b_hat".to_string());
+    PlotLookup::plot_into_file(&preadj_b, plotgrid2, pfx("preadj_b"));
+    PlotLookup::plot_into_file(&preadj_b_hat, plotgrid2, pfx("preadj_b_hat"));
 
     // Save true solution and observables
-    let pfx = |n| format!("{}{}", prefix, n);
     μ_hat.write_csv(pfx("orig.txt"))?;
     opA.write_observable(&b_hat, pfx("b_hat"))?;
     opA.write_observable(&b, pfx("b_noisy"))
--- a/src/seminorms.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/seminorms.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -12,9 +12,11 @@
 use alg_tools::bisection_tree::*;
 use alg_tools::mapping::RealMapping;
 use alg_tools::iter::{Mappable, FilterMapX};
-use alg_tools::linops::{Apply, Linear, BoundedLinear};
+use alg_tools::linops::{Mapping, Linear, BoundedLinear};
+use alg_tools::instance::Instance;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
-use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter};
+use alg_tools::norms::Linfinity;
+use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter, Radon, RNDM};
 use nalgebra::DMatrix;
 use std::marker::PhantomData;
 use itertools::Itertools;
@@ -22,9 +24,12 @@
 /// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$.
 ///
 /// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`].
-pub trait DiscreteMeasureOp<Domain, F> : BoundedLinear<DiscreteMeasure<Domain, F>, FloatType=F>
-where F : Float + ToNalgebraRealField,
-      Domain : 'static {
+pub trait DiscreteMeasureOp<Domain, F>
+    : BoundedLinear<DiscreteMeasure<Domain, F>, Radon, Linfinity, F>
+where
+    F : Float + ToNalgebraRealField,
+    Domain : 'static + Clone + PartialEq,
+{
     /// The output type of [`Self::preapply`].
     type PreCodomain;
 
@@ -38,7 +43,7 @@
     fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType>
     where I : ExactSizeIterator<Item=&'a Domain> + Clone;
 
-    /// [`Apply::apply`] that typically returns an uninitialised [`PreBTFN`]
+    /// [`Mapping`] that typically returns an uninitialised [`PreBTFN`]
     /// instead of a full [`BTFN`].
     fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain;
 }
@@ -73,7 +78,7 @@
 pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize>
 where K : SimpleConvolutionKernel<F, N> {
     kernel : K,
-    centres : DiscreteMeasure<Loc<F, N>, F>,
+    centres : RNDM<F, N>,
 }
 
 impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N>
@@ -130,9 +135,9 @@
 where F : Float + ToNalgebraRealField,
       BT : BTImpl<F, N, Data=usize>,
       K : SimpleConvolutionKernel<F, N> {
-    /// Depth of the [`BT`] bisection tree for the outputs [`Apply::apply`].
+    /// Depth of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
     depth : BT::Depth,
-    /// Domain of the [`BT`] bisection tree for the outputs [`Apply::apply`].
+    /// Domain of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
     domain : Cube<F, N>,
     /// The convolution kernel
     kernel : K,
@@ -146,7 +151,7 @@
 
     /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`.
     ///
-    /// The output of [`Apply::apply`] is a [`BT`] of given `depth`.
+    /// The output of [`Mapping::apply`] is a [`BT`] of given `depth`.
     pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self {
         ConvolutionOp {
             depth : depth,
@@ -157,7 +162,7 @@
     }
 
     /// Returns the support generator for this convolution operator.
-    fn support_generator(&self, μ : DiscreteMeasure<Loc<F, N>, F>)
+    fn support_generator(&self, μ : RNDM<F, N>)
     -> ConvolutionSupportGenerator<F, K, N> {
 
         // TODO: can we avoid cloning μ?
@@ -173,94 +178,43 @@
     }
 }
 
-impl<F, K, BT, const N : usize> Apply<DiscreteMeasure<Loc<F, N>, F>>
+impl<F, K, BT, const N : usize> Mapping<RNDM<F, N>>
 for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+where
+    F : Float + ToNalgebraRealField,
+    BT : BTImpl<F, N, Data=usize>,
+    K : SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N>
+{
 
-    type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
+    type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
 
-    fn apply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::Output {
-        let g = self.support_generator(μ);
+    fn apply<I>(&self, μ : I) -> Self::Codomain
+    where I : Instance<RNDM<F, N>> {
+        let g = self.support_generator(μ.own());
         BTFN::construct(self.domain.clone(), self.depth, g)
     }
 }
 
-impl<'a, F, K, BT, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
+/// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s.
+impl<F, K, BT, const N : usize> Linear<RNDM<F, N>>
+for ConvolutionOp<F, K, BT, N>
+where
+    F : Float + ToNalgebraRealField,
+    BT : BTImpl<F, N, Data=usize>,
+    K : SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N>
+{ }
+
+impl<F, K, BT, const N : usize>
+BoundedLinear<RNDM<F, N>, Radon, Linfinity, F>
 for ConvolutionOp<F, K, BT, N>
 where F : Float + ToNalgebraRealField,
       BT : BTImpl<F, N, Data=usize>,
       K : SimpleConvolutionKernel<F, N>,
       Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
 
-    type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
-
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> Self::Output {
-        self.apply(μ.clone())
-    }
-}
-
-/// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s.
-impl<F, K, BT, const N : usize> Linear<DiscreteMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-    type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
-}
-
-impl<F, K, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
-
-    type Output = Weighted<Shift<K, F, N>, F>;
-
-    #[inline]
-    fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> Self::Output {
-        self.kernel.clone().shift(δ.x).weigh(δ.α)
-    }
-}
-
-impl<'a, F, K, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
-
-    type Output = Weighted<Shift<K, F, N>, F>;
-
-    #[inline]
-    fn apply(&self, δ : &'a DeltaMeasure<Loc<F, N>, F>) -> Self::Output {
-        self.kernel.clone().shift(δ.x).weigh(δ.α)
-    }
-}
-
-/// [`ConvolutionOp`]s as linear operators over [`DeltaMeasure`]s.
-///
-/// The codomain is different from the implementation for [`DiscreteMeasure`].
-impl<F, K, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
-    type Codomain = Weighted<Shift<K, F, N>, F>;
-}
-
-impl<F, K, BT, const N : usize> BoundedLinear<DiscreteMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type FloatType = F;
-
-    fn opnorm_bound(&self) -> F {
+    fn opnorm_bound(&self, _ : Radon, _ : Linfinity) -> F {
         // With μ = ∑_i α_i δ_{x_i}, we have
         // |𝒟μ|_∞
         // = sup_z |∑_i α_i φ(z - x_i)|
@@ -292,10 +246,10 @@
         DMatrix::from_iterator(n, n, values)
     }
 
-    /// A version of [`Apply::apply`] that does not instantiate the [`BTFN`] codomain with
+    /// A version of [`Mapping::apply`] that does not instantiate the [`BTFN`] codomain with
     /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when
     /// the output is to be added as the right-hand-side operand to a proper BTFN.
-    fn preapply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::PreCodomain {
+    fn preapply(&self, μ : RNDM<F, N>) -> Self::PreCodomain {
         BTFN::new_pre(self.support_generator(μ))
     }
 }
--- a/src/sliding_fb.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/sliding_fb.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -10,15 +10,12 @@
 use itertools::izip;
 use std::iter::Iterator;
 
-use alg_tools::iterate::{
-    AlgIteratorFactory,
-    AlgIteratorState
-};
+use alg_tools::iterate::AlgIteratorFactory;
 use alg_tools::euclidean::Euclidean;
 use alg_tools::sets::Cube;
 use alg_tools::loc::Loc;
-use alg_tools::mapping::{Apply, Differentiable};
-use alg_tools::norms::{Norm, L2};
+use alg_tools::mapping::{Mapping, DifferentiableMapping, Instance};
+use alg_tools::norms::Norm;
 use alg_tools::bisection_tree::{
     BTFN,
     PreBTFN,
@@ -33,14 +30,19 @@
 };
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::{L2, Linfinity};
 
 use crate::types::*;
-use crate::measures::{DeltaMeasure, DiscreteMeasure, Radon};
+use crate::measures::{DiscreteMeasure, Radon, RNDM};
 use crate::measures::merging::{
-    //SpikeMergingMethod,
+    SpikeMergingMethod,
     SpikeMerging,
 };
-use crate::forward_model::ForwardModel;
+use crate::forward_model::{
+    ForwardModel,
+    AdjointProductBoundedBy,
+    LipschitzValues,
+};
 use crate::seminorms::DiscreteMeasureOp;
 //use crate::tolerance::Tolerance;
 use crate::plot::{
@@ -56,7 +58,44 @@
     calculate_residual,
     calculate_residual2,
 };
-use crate::transport::TransportLipschitz;
+//use crate::transport::TransportLipschitz;
+
+/// Transport settings for [`pointsource_sliding_fb_reg`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct TransportConfig<F : Float> {
+    /// Transport step length $θ$ normalised to $(0, 1)$.
+    pub θ0 : F,
+    /// Factor in $(0, 1)$ for decreasing transport to adapt to tolerance.
+    pub adaptation : F,
+    /// Transport tolerance wrt. ω
+    pub tolerance_ω : F,
+    /// Transport tolerance wrt. ∇v
+    pub tolerance_dv : F,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl <F : Float> TransportConfig<F> {
+    /// Check that the parameters are ok. Panics if not.
+    pub fn check(&self) {
+        assert!(self.θ0 > 0.0);
+        assert!(0.0 < self.adaptation && self.adaptation < 1.0);
+        assert!(self.tolerance_dv > 0.0);
+        assert!(self.tolerance_ω > 0.0);
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for TransportConfig<F> {
+    fn default() -> Self {
+        TransportConfig {
+            θ0 : 0.01,
+            adaptation : 0.9,
+            tolerance_ω : 1000.0, // TODO: no idea what this should be
+            tolerance_dv : 1000.0, // TODO: no idea what this should be
+        }
+    }
+}
 
 /// Settings for [`pointsource_sliding_fb_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
@@ -64,15 +103,8 @@
 pub struct SlidingFBConfig<F : Float> {
     /// Step length scaling
     pub τ0 : F,
-    /// Transport step length $θ$ normalised to $(0, 1)$.
-    pub θ0 : F,
-    /// Maximum transport mass scaling.
-    // /// The maximum transported mass is this factor times $\norm{b}^2/(2α)$.
-    // pub max_transport_scale : F,
-    /// Transport tolerance wrt. ω
-    pub transport_tolerance_ω : F,
-    /// Transport tolerance wrt. ∇v
-    pub transport_tolerance_dv : F,
+    /// Transport parameters
+    pub transport : TransportConfig<F>,
     /// Generic parameters
     pub insertion : FBGenericConfig<F>,
 }
@@ -82,38 +114,243 @@
     fn default() -> Self {
         SlidingFBConfig {
             τ0 : 0.99,
-            θ0 : 0.99,
-            //max_transport_scale : 10.0,
-            transport_tolerance_ω : 1.0, // TODO: no idea what this should be
-            transport_tolerance_dv : 1.0, // TODO: no idea what this should be
+            transport : Default::default(),
             insertion : Default::default()
         }
     }
 }
 
-/// Scale each |γ|_i ≠ 0 by q_i=q̄/g(γ_i)
+/// Internal type of adaptive transport step length calculation
+pub(crate) enum TransportStepLength<F : Float, G : Fn(F, F) -> F> {
+    /// Fixed, known step length
+    Fixed(F),
+    /// Adaptive step length, only wrt. maximum transport.
+    /// Content of `l` depends on use case, while `g` calculates the step length from `l`.
+    AdaptiveMax{ l : F, max_transport : F, g : G },
+    /// Adaptive step length.
+    /// Content of `l` depends on use case, while `g` calculates the step length from `l`.
+    FullyAdaptive{ l : F, max_transport : F, g : G },
+}
+
+/// Constrution and a priori transport adaptation.
 #[replace_float_literals(F::cast_from(literal))]
-fn scale_down<'a, I, F, G, const N : usize>(
-    iter : I,
-    q̄ : F,
-    mut g : G
-) where F : Float,
-        I : Iterator<Item = &'a mut DeltaMeasure<Loc<F,N>, F>>,
-        G : FnMut(&DeltaMeasure<Loc<F,N>, F>) -> F {
-    iter.for_each(|δ| {
-        if δ.α != 0.0 {
-            let b = g(δ);
-            if b * δ.α > 0.0 {
-                δ.α *= q̄/b;
+pub(crate) fn initial_transport<F, G, D, Observable, const N : usize>(
+    γ1 : &mut RNDM<F, N>,
+    μ : &mut RNDM<F, N>,
+    opAapply : impl Fn(&RNDM<F, N>) -> Observable,
+    ε : F,
+    τ : F,
+    θ_or_adaptive : &mut TransportStepLength<F, G>,
+    opAnorm : F,
+    v : D,
+    tconfig : &TransportConfig<F>
+) -> (Vec<F>, RNDM<F, N>)
+where
+    F : Float + ToNalgebraRealField,
+    G : Fn(F, F) -> F,
+    Observable : Euclidean<F, Output=Observable>,
+    for<'a> &'a Observable : Instance<Observable>,
+    //for<'b> A::Preadjoint<'b> : LipschitzValues<FloatType=F>,
+    D : DifferentiableMapping<Loc<F, N>, DerivativeDomain=Loc<F, N>>,
+{
+
+    use TransportStepLength::*;
+
+    // Save current base point and shift μ to new positions. Idea is that
+    //  μ_base(_masses) = μ^k (vector of masses)
+    //  μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
+    //  γ1 = π_♯^1γ^{k+1}
+    //  μ = μ^{k+1}
+    let μ_base_masses : Vec<F> = μ.iter_masses().collect();
+    let mut μ_base_minus_γ0 = μ.clone(); // Weights will be set in the loop below.
+    // Construct μ^{k+1} and π_♯^1γ^{k+1} initial candidates
+    //let mut sum_norm_dv = 0.0;
+    let γ_prev_len = γ1.len();
+    assert!(μ.len() >= γ_prev_len);
+    γ1.extend(μ[γ_prev_len..].iter().cloned());
+
+    // Calculate initial transport and step length.
+    // First calculate initial transported weights
+    for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+        // If old transport has opposing sign, the new transport will be none.
+        ρ.α = if (ρ.α > 0.0 && δ.α < 0.0) || (ρ.α < 0.0 && δ.α > 0.0) {
+            0.0
+        } else {
+            δ.α
+        };
+    };
+
+    // A priori transport adaptation based on bounding 2 ‖A‖ ‖A(γ₁-γ₀)‖‖γ‖ by scaling γ.
+    // 1. Calculate transport rays.
+    //    If the Lipschitz factor of the values v=∇F(μ) are not known, estimate it.
+    match *θ_or_adaptive {
+        Fixed(θ) => {
+            let θτ = τ * θ;
+            for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+                ρ.x = δ.x - v.differential(&δ.x) * (ρ.α.signum() * θτ);
+            }
+        },
+        AdaptiveMax{ l : ℓ_v, ref mut max_transport, g : ref calculate_θ } => {
+            *max_transport = max_transport.max(γ1.norm(Radon));
+            let θτ = τ * calculate_θ(ℓ_v, *max_transport);
+            for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+                ρ.x = δ.x - v.differential(&δ.x) * (ρ.α.signum() * θτ);
+            }
+        },
+        FullyAdaptive{ l : ref mut adaptive_ℓ_v, ref mut max_transport, g : ref calculate_θ } => {
+            *max_transport = max_transport.max(γ1.norm(Radon));
+            let mut θ = calculate_θ(*adaptive_ℓ_v, *max_transport);
+            loop {
+                let θτ = τ * θ;
+                for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+                    let dv_x = v.differential(&δ.x);
+                    ρ.x = δ.x - &dv_x * (ρ.α.signum() * θτ);
+                    // Estimate Lipschitz factor of ∇v
+                    let this_ℓ_v = (dv_x - v.differential(&ρ.x)).norm2();
+                    *adaptive_ℓ_v = adaptive_ℓ_v.max(this_ℓ_v);
+                }
+                let new_θ = calculate_θ(*adaptive_ℓ_v / tconfig.adaptation, *max_transport);
+                if new_θ <= θ {
+                    break
+                }
+                θ = new_θ;
             }
         }
-    });
+    }
+
+    // 2. Adjust transport mass, if needed.
+    // This tries to remove the smallest transport masses first.
+    if true {
+        // Alternative 1 : subtract same amount from all transport rays until reaching zero
+        loop {
+            let nr =γ1.norm(Radon);
+            let n = τ * 2.0 * opAnorm * (opAapply(&*γ1)-opAapply(&*μ)).norm2();
+            if n <= 0.0 || nr <= 0.0 {
+                break
+            }
+            let reduction_needed = nr - (ε * tconfig.tolerance_dv / n);
+            if reduction_needed <= 0.0 {
+                break
+            }
+            let (min_nonzero, n_nonzero) = γ1.iter_masses()
+                                            .map(|α| α.abs())
+                                            .filter(|α| *α > F::EPSILON)
+                                            .fold((F::INFINITY, 0), |(a, n), b| (a.min(b), n+1));
+            assert!(n_nonzero > 0);
+            // Reduction that can be done in all nonzero spikes simultaneously
+            let h = (reduction_needed / F::cast_from(n_nonzero)).min(min_nonzero);
+            for (δ, ρ) in izip!(μ.iter_spikes_mut(), γ1.iter_spikes_mut()) {
+                ρ.α = ρ.α.signum() * (ρ.α.abs() - h).max(0.0);
+                δ.α = ρ.α;
+            }
+            if min_nonzero * F::cast_from(n_nonzero) >= reduction_needed {
+                break
+            }
+        }
+    } else {
+        // Alternative 2: first reduce transport rays with greater effect based on differential.
+        // This is a an inefficient quick-and-dirty implementation.
+        loop {
+            let nr = γ1.norm(Radon);
+            let a = opAapply(&*γ1)-opAapply(&*μ);
+            let na = a.norm2();
+            let n = τ * 2.0 * opAnorm * na;
+            if n <= 0.0 || nr <= 0.0 {
+                break
+            }
+            let reduction_needed = nr - (ε * tconfig.tolerance_dv / n);
+            if reduction_needed <= 0.0 {
+                break
+            }
+            let mut max_d = 0.0;
+            let mut max_d_ind = 0;
+            for (δ, ρ, i) in izip!(μ.iter_spikes_mut(), γ1.iter_spikes(), 0..) {
+                // Calculate differential of  ‖A(γ₁-γ₀)‖‖γ‖  wrt. each spike
+                let s = δ.α.signum();
+                // TODO: this is very inefficient implementation due to the limitations
+                // of the closure parameters.
+                let δ1 = DiscreteMeasure::from([(ρ.x, s)]);
+                let δ2 = DiscreteMeasure::from([(δ.x, s)]);
+                let a_part = opAapply(&δ1)-opAapply(&δ2);
+                let d = a.dot(&a_part)/na * nr + 2.0 * na;
+                if d > max_d {
+                    max_d = d;
+                    max_d_ind = i;
+                }
+            }
+            // Just set mass to zero for transport ray with greater differential
+            assert!(max_d > 0.0);
+            γ1[max_d_ind].α = 0.0;
+            μ[max_d_ind].α = 0.0;
+        }
+    }
+
+    // Set initial guess for μ=μ^{k+1}.
+    for (δ, ρ, &β) in izip!(μ.iter_spikes_mut(), γ1.iter_spikes(), μ_base_masses.iter()) {
+        if ρ.α.abs() > F::EPSILON {
+            δ.x = ρ.x;
+            //δ.α = ρ.α; // already set above
+        } else {
+            δ.α = β;
+        }
+    }
+    // Calculate μ^k-π_♯^0γ^{k+1} and v̆ = A_*(A[μ_transported + μ_transported_base]-b)
+    μ_base_minus_γ0.set_masses(μ_base_masses.iter().zip(γ1.iter_masses())
+                                                   .map(|(&a,b)| a - b));
+    (μ_base_masses, μ_base_minus_γ0)
+}
+
+/// A posteriori transport adaptation.
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn aposteriori_transport<F, const N : usize>(
+    γ1 : &mut RNDM<F, N>,
+    μ : &mut RNDM<F, N>,
+    μ_base_minus_γ0 : &mut RNDM<F, N>,
+    μ_base_masses : &Vec<F>,
+    ε : F,
+    tconfig : &TransportConfig<F>
+) -> bool
+where F : Float + ToNalgebraRealField {
+
+    // 1. If π_♯^1γ^{k+1} = γ1 has non-zero mass at some point y, but μ = μ^{k+1} does not,
+    // then the ansatz ∇w̃_x(y) = w^{k+1}(y) may not be satisfied. So set the mass of γ1
+    // at that point to zero, and retry.
+    let mut all_ok = true;
+    for (α_μ, α_γ1) in izip!(μ.iter_masses(), γ1.iter_masses_mut()) {
+        if α_μ == 0.0 && *α_γ1 != 0.0 {
+            all_ok = false;
+            *α_γ1 = 0.0;
+        }
+    }
+
+    // 2. Through bounding ∫ B_ω(y, z) dλ(x, y, z).
+    //    through the estimate ≤ C ‖Δ‖‖γ^{k+1}‖ for Δ := μ^{k+1}-μ^k-(π_♯^1-π_♯^0)γ^{k+1},
+    //    which holds for some some C if the convolution kernel in 𝒟 has Lipschitz gradient.
+    let nγ = γ1.norm(Radon);
+    let nΔ = μ_base_minus_γ0.norm(Radon) + μ.dist_matching(&γ1);
+    let t = ε * tconfig.tolerance_ω;
+    if nγ*nΔ > t {
+        // Since t/(nγ*nΔ)<1, and the constant tconfig.adaptation < 1,
+        // this will guarantee that eventually ‖γ‖ decreases sufficiently that we
+        // will not enter here.
+        *γ1 *= tconfig.adaptation * t / ( nγ * nΔ );
+        all_ok = false
+    }
+
+    if !all_ok {
+        // Update weights for μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
+        μ_base_minus_γ0.set_masses(μ_base_masses.iter().zip(γ1.iter_masses())
+                                                        .map(|(&a,b)| a - b));
+
+    }
+
+    all_ok
 }
 
 /// Iteratively solve the pointsource localisation problem using sliding forward-backward
 /// splitting
 ///
-/// The parametrisatio is as for [`pointsource_fb_reg`].
+/// The parametrisation is as for [`pointsource_fb_reg`].
 /// Inertia is currently not supported.
 #[replace_float_literals(F::cast_from(literal))]
 pub fn pointsource_sliding_fb_reg<'a, F, I, A, GA, 𝒟, BTA, BT𝒟, G𝒟, S, K, Reg, const N : usize>(
@@ -121,203 +358,113 @@
     b : &A::Observable,
     reg : Reg,
     op𝒟 : &'a 𝒟,
-    sfbconfig : &SlidingFBConfig<F>,
+    config : &SlidingFBConfig<F>,
     iterator : I,
     mut plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
-      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
-      A::PreadjointCodomain : for<'b> Differentiable<&'b Loc<F, N>, Output=Loc<F, N>>,
+      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>,
+      for<'b> A::Preadjoint<'b> : LipschitzValues<FloatType=F>,
+      A::PreadjointCodomain : DifferentiableMapping<
+        Loc<F, N>, DerivativeDomain=Loc<F, N>, Codomain=F
+      >,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<&'a 𝒟, FloatType=F> + TransportLipschitz<L2Squared, FloatType=F>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
+          + AdjointProductBoundedBy<RNDM<F, N>, 𝒟, FloatType=F>,
+          //+ TransportLipschitz<L2Squared, FloatType=F>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
       𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>,
                                           Codomain = BTFN<F, G𝒟, BT𝒟, N>>,
       BT𝒟 : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>
-         + Differentiable<Loc<F, N>, Output=Loc<F,N>>,
+         + DifferentiableMapping<Loc<F, N>, DerivativeDomain=Loc<F,N>>,
       K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-         //+ Differentiable<Loc<F, N>, Output=Loc<F,N>>,
+         //+ Differentiable<Loc<F, N>, Derivative=Loc<F,N>>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       Cube<F, N>: P2Minimise<Loc<F, N>, F>,
       PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : SlidingRegTerm<F, N> {
 
-    assert!(sfbconfig.τ0 > 0.0 &&
-            sfbconfig.θ0 > 0.0);
-
-    // Set up parameters
-    let config = &sfbconfig.insertion;
-    let op𝒟norm = op𝒟.opnorm_bound();
-    //let max_transport = sfbconfig.max_transport_scale
-    //                    * reg.radon_norm_bound(b.norm2_squared() / 2.0);
-    //let tlip = opA.transport_lipschitz_factor(L2Squared) * max_transport;
-    //let ℓ = 0.0;
-    let θ = sfbconfig.θ0; // (ℓ + tlip);
-    let τ = sfbconfig.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
-    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
-    // by τ compared to the conditional gradient approach.
-    let tolerance = config.tolerance * τ * reg.tolerance_scaling();
-    let mut ε = tolerance.initial();
+    // Check parameters
+    assert!(config.τ0 > 0.0, "Invalid step length parameter");
+    config.transport.check();
 
     // Initialise iterates
     let mut μ = DiscreteMeasure::new();
     let mut γ1 = DiscreteMeasure::new();
-    let mut residual = -b;
+    let mut residual = -b; // Has to equal $Aμ-b$.
+
+    // Set up parameters
+    let op𝒟norm = op𝒟.opnorm_bound(Radon, Linfinity);
+    let opAnorm = opA.opnorm_bound(Radon, L2);
+    //let max_transport = config.max_transport.scale
+    //                    * reg.radon_norm_bound(b.norm2_squared() / 2.0);
+    //let ℓ = opA.transport.lipschitz_factor(L2Squared) * max_transport;
+    let ℓ = 0.0;
+    let τ = config.τ0 / opA.adjoint_product_bound(&op𝒟).unwrap();
+    let calculate_θ = |ℓ_v, _| config.transport.θ0 / (τ*(ℓ + ℓ_v));
+    let mut θ_or_adaptive = match opA.preadjoint().value_diff_unit_lipschitz_factor() {
+        // We only estimate w (the uniform Lipschitz for of v), if we also estimate ℓ_v
+        // (the uniform Lipschitz factor of ∇v).
+        // We assume that the residual is decreasing.
+        Some(ℓ_v0) => TransportStepLength::Fixed(calculate_θ(ℓ_v0 * residual.norm2(), 0.0)),
+        None => TransportStepLength::FullyAdaptive {
+            l : 0.0,
+            max_transport : 0.0,
+            g : calculate_θ
+        },
+    };
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.insertion.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    // Statistics
+    let full_stats = |residual : &A::Observable,
+                      μ : &RNDM<F, N>,
+                      ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + reg.apply(μ),
+        n_spikes : μ.len(),
+        ε,
+        // postprocessing: config.insertion.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
     let mut stats = IterInfo::new();
 
     // Run the algorithm
-    iterator.iterate(|state| {
-        // Calculate smooth part of surrogate model.
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let v = opA.preadjoint().apply(r);
-
-        // Save current base point and shift μ to new positions. Idea is that
-        //  μ_base(_masses) = μ^k (vector of masses)
-        //  μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
-        //  γ1 = π_♯^1γ^{k+1}
-        //  μ = μ^{k+1}
-        let μ_base_masses : Vec<F> = μ.iter_masses().collect();
-        let mut μ_base_minus_γ0 = μ.clone(); // Weights will be set in the loop below.
-        // Construct μ^{k+1} and π_♯^1γ^{k+1} initial candidates
-        let mut sum_norm_dv_times_γinit = 0.0;
-        let mut sum_abs_γinit = 0.0;
-        //let mut sum_norm_dv = 0.0;
-        let γ_prev_len = γ1.len();
-        assert!(μ.len() >= γ_prev_len);
-        γ1.extend(μ[γ_prev_len..].iter().cloned());
-        for (δ, ρ) in izip!(μ.iter_spikes_mut(), γ1.iter_spikes_mut()) {
-            let d_v_x = v.differential(&δ.x);
-            // If old transport has opposing sign, the new transport will be none.
-            ρ.α = if (ρ.α > 0.0 && δ.α < 0.0) || (ρ.α < 0.0 && δ.α > 0.0) {
-                0.0
-            } else {
-                δ.α
-            };
-            δ.x -= d_v_x * (θ * δ.α.signum()); // This is δ.α.signum() when δ.α ≠ 0.
-            ρ.x = δ.x;
-            let nrm = d_v_x.norm(L2);
-            let a = ρ.α.abs();
-            let v = nrm * a;
-            if v > 0.0 {
-                sum_norm_dv_times_γinit += v;
-                sum_abs_γinit += a;
-            }
-        }
-
-        // A priori transport adaptation based on bounding ∫ ⟨∇v(x), z-y⟩ dλ(x, y, z).
-        // This is just one option, there are many.
-        let t = ε * sfbconfig.transport_tolerance_dv;
-        if sum_norm_dv_times_γinit > t {
-            // Scale each |γ|_i by q_i=q̄/‖vx‖_i such that ∑_i |γ|_i q_i ‖vx‖_i = t
-            // TODO: store the closure values above?
-            scale_down(γ1.iter_spikes_mut(),
-                       t / sum_abs_γinit,
-                       |δ| v.differential(&δ.x).norm(L2));
-        }
-        //println!("|γ| = {}, |μ| = {}", γ1.norm(crate::measures::Radon), μ.norm(crate::measures::Radon));
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) {
+        // Calculate initial transport
+        let v = opA.preadjoint().apply(residual);
+        let (μ_base_masses, mut μ_base_minus_γ0) = initial_transport(
+            &mut γ1, &mut μ, |ν| opA.apply(ν),
+            ε, τ, &mut θ_or_adaptive, opAnorm,
+            v, &config.transport,
+        );
 
         // Solve finite-dimensional subproblem several times until the dual variable for the
         // regularisation term conforms to the assumptions made for the transport above.
-        let (d, within_tolerances) = 'adapt_transport: loop {
-            // Update weights for μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
-            for (δ_γ1, δ_μ_base_minus_γ0, &α_μ_base) in izip!(γ1.iter_spikes(),
-                                                              μ_base_minus_γ0.iter_spikes_mut(),
-                                                              μ_base_masses.iter()) {
-                δ_μ_base_minus_γ0.set_mass(α_μ_base - δ_γ1.get_mass());
-            }
-
-            // Calculate transported_minus_τv = -τA_*(A[μ_transported + μ_transported_base]-b)
+        let (d, _within_tolerances, τv̆) = 'adapt_transport: loop {
+            // Calculate τv̆ = τA_*(A[μ_transported + μ_transported_base]-b)
             let residual_μ̆ = calculate_residual2(&γ1, &μ_base_minus_γ0, opA, b);
-            let transported_minus_τv̆ = opA.preadjoint().apply(residual_μ̆ * (-τ));
+            let τv̆ = opA.preadjoint().apply(residual_μ̆ * τ);
 
             // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes.
             let (d, within_tolerances) = insert_and_reweigh(
-                &mut μ, &transported_minus_τv̆, &γ1, Some(&μ_base_minus_γ0),
+                &mut μ, &τv̆, &γ1, Some(&μ_base_minus_γ0),
                 op𝒟, op𝒟norm,
-                τ, ε,
-                config,
-                &reg, state, &mut stats,
+                τ, ε, &config.insertion,
+                &reg, &state, &mut stats,
             );
 
-            // A posteriori transport adaptation based on bounding (1/τ)∫ ω(z) - ω(y) dλ(x, y, z).
-            let all_ok = if false { // Basic check
-                // If π_♯^1γ^{k+1} = γ1 has non-zero mass at some point y, but μ = μ^{k+1} does not,
-                // then the ansatz ∇w̃_x(y) = w^{k+1}(y) may not be satisfied. So set the mass of γ1
-                // at that point to zero, and retry.
-                let mut all_ok = true;
-                for (α_μ, α_γ1) in izip!(μ.iter_masses(), γ1.iter_masses_mut()) {
-                    if α_μ == 0.0 && *α_γ1 != 0.0 {
-                        all_ok = false;
-                        *α_γ1 = 0.0;
-                    }
-                }
-                all_ok
-            } else {
-                // TODO: Could maybe optimise, as this is also formed in insert_and_reweigh above.
-                let mut minus_ω = op𝒟.apply(γ1.sub_matching(&μ) + &μ_base_minus_γ0);
-
-                // let vpos = γ1.iter_spikes()
-                //              .filter(|δ| δ.α > 0.0)
-                //              .map(|δ| minus_ω.apply(&δ.x))
-                //              .reduce(F::max)
-                //              .and_then(|threshold| {
-                //                 minus_ω.minimise_below(threshold,
-                //                                         ε * config.refinement.tolerance_mult,
-                //                                         config.refinement.max_steps)
-                //                        .map(|(_z, minus_ω_z)| minus_ω_z)
-                //              });
-
-                // let vneg = γ1.iter_spikes()
-                //              .filter(|δ| δ.α < 0.0)
-                //              .map(|δ| minus_ω.apply(&δ.x))
-                //              .reduce(F::min)
-                //              .and_then(|threshold| {
-                //                 minus_ω.maximise_above(threshold,
-                //                                         ε * config.refinement.tolerance_mult,
-                //                                         config.refinement.max_steps)
-                //                        .map(|(_z, minus_ω_z)| minus_ω_z)
-                //              });
-                let (_, vpos) = minus_ω.minimise(ε * config.refinement.tolerance_mult,
-                                                 config.refinement.max_steps);
-                let (_, vneg) = minus_ω.maximise(ε * config.refinement.tolerance_mult,
-                                                 config.refinement.max_steps);
-            
-                let t = τ * ε * sfbconfig.transport_tolerance_ω;
-                let val = |δ : &DeltaMeasure<Loc<F, N>, F>| {
-                    δ.α * (minus_ω.apply(&δ.x) - if δ.α >= 0.0 { vpos } else { vneg })
-                    // match if δ.α >= 0.0 { vpos } else { vneg } {
-                    //     None => 0.0,
-                    //     Some(v) => δ.α * (minus_ω.apply(&δ.x) - v)
-                    // }
-                };
-                // Calculate positive/bad (rp) values under the integral.
-                // Also store sum of masses for the positive entries.
-                let (rp, w) = γ1.iter_spikes().fold((0.0, 0.0), |(p, w), δ| {
-                    let v = val(δ);
-                    if v <= 0.0 { (p, w) } else { (p + v, w + δ.α.abs()) }
-                });
-
-                if rp > t {
-                    // TODO: store v above?
-                    scale_down(γ1.iter_spikes_mut(), t / w, val);
-                    false
-                } else {
-                    true
-                }
-            };
-
-            if all_ok {
-                break 'adapt_transport (d, within_tolerances)
+            // A posteriori transport adaptation.
+            if aposteriori_transport(
+                &mut γ1, &mut μ, &mut μ_base_minus_γ0, &μ_base_masses,
+                ε, &config.transport
+            ) {
+                break 'adapt_transport (d, within_tolerances, τv̆)
             }
         };
 
@@ -330,10 +477,24 @@
         stats.transport_error = Some({
             assert_eq!(μ_base_masses.len(), γ1.len());
             let (a, b) = stats.transport_error.unwrap_or((0.0, 0.0));
-            let err = izip!(μ.iter_masses(), γ1.iter_masses()).map(|(v,w)| (v-w).abs()).sum();
-            (a + err, b + γ1.norm(Radon))
+            (a + μ.dist_matching(&γ1), b + γ1.norm(Radon))
         });
 
+        // Merge spikes.
+        // This expects the prune below to prune γ.
+        // TODO: This may not work correctly in all cases.
+        let ins = &config.insertion;
+        if ins.merge_now(&state) {
+            if let SpikeMergingMethod::None = ins.merging {
+            } else {
+                stats.merged += μ.merge_spikes(ins.merging, |μ_candidate| {
+                    let ν = μ_candidate.sub_matching(&γ1)-&μ_base_minus_γ0;
+                    let mut d = &τv̆ + op𝒟.preapply(ν);
+                    reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, ins)
+                });
+            }
+        }
+
         // Prune spikes with zero weight. To maintain correct ordering between μ and γ1, also the
         // latter needs to be pruned when μ is.
         // TODO: This could do with a two-vector Vec::retain to avoid copies.
@@ -341,40 +502,25 @@
         if μ_new.len() != μ.len() {
             let mut μ_iter = μ.iter_spikes();
             γ1.prune_by(|_| μ_iter.next().unwrap().α != F::ZERO);
+            stats.pruned += μ.len() - μ_new.len();
             μ = μ_new;
         }
 
-        // TODO: how to merge?
-
         // Update residual
         residual = calculate_residual(&μ, opA, b);
 
-        // Update main tolerance for next iteration
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
+        let iter = state.iteration();
         stats.this_iters += 1;
 
-        // Give function value if needed
+        // Give statistics if requested
         state.if_verbose(|| {
-            // Plot if so requested
-            plotter.plot_spikes(
-                format!("iter {} end; {}", state.iteration(), within_tolerances), &d,
-                "start".to_string(), None::<&A::PreadjointCodomain>, // TODO: Should be Some(&((-τ) * v)), but not implemented
-                reg.target_bounds(τ, ε_prev), &μ,
-            );
-            // Calculate mean inner iterations and reset relevant counters.
-            // Return the statistics
-            let res = IterInfo {
-                value : residual.norm2_squared_div2() + reg.apply(&μ),
-                n_spikes : μ.len(),
-                ε : ε_prev,
-                postprocessing: config.postprocessing.then(|| μ.clone()),
-                .. stats
-            };
-            stats = IterInfo::new();
-            res
-        })
-    });
+            plotter.plot_spikes(iter, Some(&d), Some(&τv̆), &μ);
+            full_stats(&residual, &μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
 
-    postprocess(μ, config, L2Squared, opA, b)
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
+
+    postprocess(μ, &config.insertion, L2Squared, opA, b)
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/sliding_pdps.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -0,0 +1,384 @@
+/*!
+Solver for the point source localisation problem using a sliding
+primal-dual proximal splitting method.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+//use colored::Colorize;
+//use nalgebra::{DVector, DMatrix};
+use std::iter::Iterator;
+
+use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::sets::Cube;
+use alg_tools::loc::Loc;
+use alg_tools::mapping::{Mapping, DifferentiableRealMapping, Instance};
+use alg_tools::norms::Norm;
+use alg_tools::direct_product::Pair;
+use alg_tools::bisection_tree::{
+    BTFN,
+    PreBTFN,
+    Bounds,
+    BTNodeLookup,
+    BTNode,
+    BTSearch,
+    P2Minimise,
+    SupportGenerator,
+    LocalAnalysis,
+    //Bounded,
+};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::linops::{
+    BoundedLinear, AXPY, GEMV, Adjointable, IdOp,
+};
+use alg_tools::convex::{Conjugable, Prox};
+use alg_tools::norms::{L2, Linfinity, PairNorm};
+
+use crate::types::*;
+use crate::measures::{DiscreteMeasure, Radon, RNDM};
+use crate::measures::merging::SpikeMerging;
+use crate::forward_model::{
+    ForwardModel,
+    AdjointProductPairBoundedBy,
+    LipschitzValues,
+};
+// use crate::transport::TransportLipschitz;
+use crate::seminorms::DiscreteMeasureOp;
+//use crate::tolerance::Tolerance;
+use crate::plot::{
+    SeqPlotter,
+    Plotting,
+    PlotLookup
+};
+use crate::fb::*;
+use crate::regularisation::SlidingRegTerm;
+// use crate::dataterm::L2Squared;
+use crate::sliding_fb::{
+    TransportConfig,
+    TransportStepLength,
+    initial_transport,
+    aposteriori_transport,
+};
+use crate::dataterm::{calculate_residual, calculate_residual2};
+
+/// Settings for [`pointsource_sliding_pdps_pair`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct SlidingPDPSConfig<F : Float> {
+    /// Primal step length scaling.
+    pub τ0 : F,
+    /// Primal step length scaling.
+    pub σp0 : F,
+    /// Dual step length scaling.
+    pub σd0 : F,
+    /// Transport parameters
+    pub transport : TransportConfig<F>,
+    /// Generic parameters
+    pub insertion : FBGenericConfig<F>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for SlidingPDPSConfig<F> {
+    fn default() -> Self {
+        let τ0 = 0.99;
+        SlidingPDPSConfig {
+            τ0,
+            σd0 : 0.1,
+            σp0 : 0.99,
+            transport : Default::default(),
+            insertion : Default::default()
+        }
+    }
+}
+
+type MeasureZ<F, Z, const N : usize> = Pair<RNDM<F, N>, Z>;
+
+/// Iteratively solve the pointsource localisation with an additional variable
+/// using sliding primal-dual proximal splitting
+///
+/// The parametrisation is as for [`crate::forward_pdps::pointsource_forward_pdps_pair`].
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_sliding_pdps_pair<
+    'a, F, I, A, GA, 𝒟, BTA, BT𝒟, G𝒟, S, K, Reg, Z, R, Y, /*KOpM, */ KOpZ, H, const N : usize
+>(
+    opA : &'a A,
+    b : &A::Observable,
+    reg : Reg,
+    op𝒟 : &'a 𝒟,
+    config : &SlidingPDPSConfig<F>,
+    iterator : I,
+    mut plotter : SeqPlotter<F, N>,
+    //opKμ : KOpM,
+    opKz : &KOpZ,
+    fnR : &R,
+    fnH : &H,
+    mut z : Z,
+    mut y : Y,
+) -> MeasureZ<F, Z, N>
+where
+    F : Float + ToNalgebraRealField,
+    I : AlgIteratorFactory<IterInfo<F, N>>,
+    for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>,
+    for<'b> A::Preadjoint<'b> : LipschitzValues<FloatType=F>,
+    BTFN<F, GA, BTA, N> : DifferentiableRealMapping<F, N>,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    A : ForwardModel<
+            MeasureZ<F, Z, N>,
+            F,
+            PairNorm<Radon, L2, L2>,
+            PreadjointCodomain = Pair<BTFN<F, GA, BTA, N>, Z>,
+        >
+        + AdjointProductPairBoundedBy<MeasureZ<F, Z, N>, 𝒟, IdOp<Z>, FloatType=F>,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
+    𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>,
+                                        Codomain = BTFN<F, G𝒟, BT𝒟, N>>,
+    BT𝒟 : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>
+        + DifferentiableRealMapping<F, N>,
+    K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+        //+ Differentiable<Loc<F, N>, Derivative=Loc<F,N>>,
+    BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+    PlotLookup : Plotting<N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    Reg : SlidingRegTerm<F, N>,
+    // KOpM : Linear<RNDM<F, N>, Codomain=Y>
+    //     + GEMV<F, RNDM<F, N>>
+    //     + Preadjointable<
+    //         RNDM<F, N>, Y,
+    //         PreadjointCodomain = BTFN<F, GA, BTA, N>,
+    //     >
+    //     + TransportLipschitz<L2Squared, FloatType=F>
+    //     + AdjointProductBoundedBy<RNDM<F, N>, 𝒟, FloatType=F>,
+    // for<'b> KOpM::Preadjoint<'b> : GEMV<F, Y>,
+    // Since Z is Hilbert, we may just as well use adjoints for K_z.
+    KOpZ : BoundedLinear<Z, L2, L2, F, Codomain=Y>
+        + GEMV<F, Z>
+        + Adjointable<Z, Y, AdjointCodomain = Z>,
+    for<'b> KOpZ::Adjoint<'b> : GEMV<F, Y>,
+    Y : AXPY<F> + Euclidean<F, Output=Y> + Clone + ClosedAdd,
+    for<'b> &'b Y : Instance<Y>,
+    Z : AXPY<F, Owned=Z> + Euclidean<F, Output=Z> + Clone + Norm<F, L2>,
+    for<'b> &'b Z : Instance<Z>,
+    R : Prox<Z, Codomain=F>,
+    H : Conjugable<Y, F, Codomain=F>,
+    for<'b> H::Conjugate<'b> : Prox<Y>,
+{
+
+    // Check parameters
+    assert!(config.τ0 > 0.0 &&
+            config.τ0 < 1.0 &&
+            config.σp0 > 0.0 &&
+            config.σp0 < 1.0 &&
+            config.σd0 > 0.0 &&
+            config.σp0 * config.σd0 <= 1.0,
+            "Invalid step length parameters");
+    config.transport.check();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut γ1 = DiscreteMeasure::new();
+    let mut residual = calculate_residual(Pair(&μ, &z), opA, b);
+    let zero_z = z.similar_origin();
+
+    // Set up parameters
+    let op𝒟norm = op𝒟.opnorm_bound(Radon, Linfinity);
+    // TODO: maybe this PairNorm doesn't make sense here?
+    let opAnorm = opA.opnorm_bound(PairNorm(Radon, L2, L2), L2);
+    let bigθ = 0.0; //opKμ.transport_lipschitz_factor(L2Squared);
+    let bigM = 0.0; //opKμ.adjoint_product_bound(&op𝒟).unwrap().sqrt();
+    let nKz = opKz.opnorm_bound(L2, L2);
+    let ℓ = 0.0;
+    let opIdZ = IdOp::new();
+    let (l, l_z) = opA.adjoint_product_pair_bound(&op𝒟, &opIdZ).unwrap();
+    // We need to satisfy
+    //
+    //     τσ_dM(1-σ_p L_z)/(1 - τ L) + [σ_p L_z + σ_pσ_d‖K_z‖^2] < 1
+    //                                  ^^^^^^^^^^^^^^^^^^^^^^^^^
+    // with 1 > σ_p L_z and 1 > τ L.
+    //
+    // To do so, we first solve σ_p and σ_d from standard PDPS step length condition
+    // ^^^^^ < 1. then we solve τ from  the rest.
+    let σ_d = config.σd0 / nKz;
+    let σ_p = config.σp0 / (l_z + config.σd0 * nKz);
+    // Observe that = 1 - ^^^^^^^^^^^^^^^^^^^^^ = 1 - σ_{p,0}
+    // We get the condition τσ_d M (1-σ_p L_z) < (1-σ_{p,0})*(1-τ L)
+    // ⟺ τ [ σ_d M (1-σ_p L_z) + (1-σ_{p,0}) L ] < (1-σ_{p,0})
+    let φ = 1.0 - config.σp0;
+    let a = 1.0 - σ_p * l_z;
+    let τ = config.τ0 * φ / ( σ_d * bigM * a + φ * l );
+    let ψ = 1.0 - τ * l;
+    let β = σ_p * config.σd0 * nKz / a; // σ_p * σ_d * (nKz * nK_z) / a;
+    assert!(β < 1.0);
+    // Now we need κ‖K_μ(π_♯^1 - π_♯^0)γ‖^2 ≤ (1/θ - τ[ℓ_v + ℓ]) ∫ c_2 dγ for κ defined as:
+    let κ = σ_d * ψ / ((1.0 - β) * ψ - τ * σ_d * bigM);
+    //  The factor two in the manuscript disappears due to the definition of 𝚹 being
+    // for ‖x-y‖₂² instead of c_2(x, y)=‖x-y‖₂²/2.
+    let calculate_θ = |ℓ_v, max_transport| {
+        config.transport.θ0 / (τ*(ℓ + ℓ_v) + κ * bigθ * max_transport)
+    };
+    let mut θ_or_adaptive = match opA.preadjoint().value_diff_unit_lipschitz_factor() {
+        // We only estimate w (the uniform Lipschitz for of v), if we also estimate ℓ_v
+        // (the uniform Lipschitz factor of ∇v).
+        // We assume that the residual is decreasing.
+        Some(ℓ_v0) => TransportStepLength::AdaptiveMax{
+            l: ℓ_v0 * b.norm2(),
+            max_transport : 0.0,
+            g : calculate_θ
+        },
+        None => TransportStepLength::FullyAdaptive{
+            l : 0.0,
+            max_transport : 0.0,
+            g : calculate_θ
+        },
+    };
+    // Acceleration is not currently supported
+    // let γ = dataterm.factor_of_strong_convexity();
+    let ω = 1.0;
+
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.insertion.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    let starH = fnH.conjugate();
+
+    // Statistics
+    let full_stats = |residual : &A::Observable, μ : &RNDM<F, N>, z : &Z, ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + fnR.apply(z)
+                + reg.apply(μ) + fnH.apply(/* opKμ.apply(μ) + */ opKz.apply(z)),
+        n_spikes : μ.len(),
+        ε,
+        // postprocessing: config.insertion.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
+    let mut stats = IterInfo::new();
+
+    // Run the algorithm
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, &z, ε, stats.clone())) {
+        // Calculate initial transport
+        let Pair(v, _) = opA.preadjoint().apply(&residual);
+        //opKμ.preadjoint().apply_add(&mut v, y);
+        let z_base = z.clone();
+        // We want to proceed as in Example 4.12 but with v and v̆ as in §5.
+        // With A(ν, z) = A_μ ν + A_z z, following Example 5.1, we have
+        // P_ℳ[F'(ν, z) + Ξ(ν, z, y)]= A_ν^*[A_ν ν + A_z z] + K_μ ν = A_ν^*A(ν, z) + K_μ ν,
+        // where A_ν^* becomes a multiplier.
+        // This is much easier with K_μ = 0, which is the only reason why are enforcing it.
+        // TODO: Write a version of initial_transport that can deal with K_μ ≠ 0.
+ 
+        let (μ_base_masses, mut μ_base_minus_γ0) = initial_transport(
+            &mut γ1, &mut μ, |ν| opA.apply(Pair(ν, &z)),
+            ε, τ, &mut θ_or_adaptive, opAnorm,
+            v, &config.transport,
+        );
+
+        // Solve finite-dimensional subproblem several times until the dual variable for the
+        // regularisation term conforms to the assumptions made for the transport above.
+        let (d, _within_tolerances, Pair(τv̆, τz̆)) = 'adapt_transport: loop {
+            // Calculate τv̆ = τA_*(A[μ_transported + μ_transported_base]-b)
+            let residual_μ̆ = calculate_residual2(Pair(&γ1, &z),
+                                                 Pair(&μ_base_minus_γ0, &zero_z),
+                                                 opA, b);
+            let Pair(τv̆, τz) = opA.preadjoint().apply(residual_μ̆ * τ);
+            // opKμ.preadjoint().gemv(&mut τv̆, τ, y, 1.0);
+
+            // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes.
+            let (d, within_tolerances) = insert_and_reweigh(
+                &mut μ, &τv̆, &γ1, Some(&μ_base_minus_γ0),
+                op𝒟, op𝒟norm,
+                τ, ε, &config.insertion,
+                &reg, &state, &mut stats,
+            );
+
+            // A posteriori transport adaptation.
+            // TODO: this does not properly treat v^{k+1} - v̆^k that depends on z^{k+1}!
+            if aposteriori_transport(
+                &mut γ1, &mut μ, &mut μ_base_minus_γ0, &μ_base_masses,
+                ε, &config.transport
+            ) {
+                break 'adapt_transport (d, within_tolerances, Pair(τv̆, τz))
+            }
+        };
+
+        stats.untransported_fraction = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.untransported_fraction.unwrap_or((0.0, 0.0));
+            let source = μ_base_masses.iter().map(|v| v.abs()).sum();
+            (a + μ_base_minus_γ0.norm(Radon), b + source)
+        });
+        stats.transport_error = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.transport_error.unwrap_or((0.0, 0.0));
+            (a + μ.dist_matching(&γ1), b + γ1.norm(Radon))
+        });
+
+        // // Merge spikes.
+        // // This expects the prune below to prune γ.
+        // // TODO: This may not work correctly in all cases.
+        // let ins = &config.insertion;
+        // if ins.merge_now(&state) {
+        //     if let SpikeMergingMethod::None = ins.merging {
+        //     } else {
+        //         stats.merged += μ.merge_spikes(ins.merging, |μ_candidate| {
+        //             let ν = μ_candidate.sub_matching(&γ1)-&μ_base_minus_γ0;
+        //             let mut d = &τv̆ + op𝒟.preapply(ν);
+        //             reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, ins)
+        //         });
+        //     }
+        // }
+
+        // Prune spikes with zero weight. To maintain correct ordering between μ and γ1, also the
+        // latter needs to be pruned when μ is.
+        // TODO: This could do with a two-vector Vec::retain to avoid copies.
+        let μ_new = DiscreteMeasure::from_iter(μ.iter_spikes().filter(|δ| δ.α != F::ZERO).cloned());
+        if μ_new.len() != μ.len() {
+            let mut μ_iter = μ.iter_spikes();
+            γ1.prune_by(|_| μ_iter.next().unwrap().α != F::ZERO);
+            stats.pruned += μ.len() - μ_new.len();
+            μ = μ_new;
+        }
+
+        // Do z variable primal update
+        z.axpy(-σ_p/τ, τz̆, 1.0); // TODO: simplify nasty factors
+        opKz.adjoint().gemv(&mut z, -σ_p, &y, 1.0);
+        z = fnR.prox(σ_p, z);
+        // Do dual update
+        // opKμ.gemv(&mut y, σ_d*(1.0 + ω), &μ, 1.0);    // y = y + σ_d K[(1+ω)(μ,z)^{k+1}]
+        opKz.gemv(&mut y, σ_d*(1.0 + ω), &z, 1.0);
+        // opKμ.gemv(&mut y, -σ_d*ω, μ_base, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        opKz.gemv(&mut y, -σ_d*ω, z_base, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        y = starH.prox(σ_d, y);
+
+        // Update residual
+        residual = calculate_residual(Pair(&μ, &z), opA, b);
+
+        // Update step length parameters
+        // let ω = pdpsconfig.acceleration.accelerate(&mut τ, &mut σ, γ);
+
+        // Give statistics if requested
+        let iter = state.iteration();
+        stats.this_iters += 1;
+
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, Some(&d), Some(&τv̆), &μ);
+            full_stats(&residual, &μ, &z, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
+
+    let fit = |μ̃ : &RNDM<F, N>| {
+        (opA.apply(Pair(μ̃, &z))-b).norm2_squared_div2()
+        //+ fnR.apply(z) + reg.apply(μ)
+        + fnH.apply(/* opKμ.apply(&μ̃) + */ opKz.apply(&z))
+    };
+
+    μ.merge_spikes_fitness(config.insertion.merging, fit, |&v| v);
+    μ.prune();
+    Pair(μ, z)
+}
--- a/src/types.rs	Thu Aug 29 00:00:00 2024 -0500
+++ b/src/types.rs	Tue Dec 31 09:25:45 2024 -0500
@@ -4,7 +4,6 @@
 
 use colored::ColoredString;
 use serde::{Serialize, Deserialize};
-use clap::ValueEnum;
 use alg_tools::iterate::LogRepr;
 use alg_tools::euclidean::Euclidean;
 use alg_tools::norms::{Norm, L1};
@@ -13,7 +12,7 @@
 pub use alg_tools::loc::Loc;
 pub use alg_tools::sets::Cube;
 
-use crate::measures::DiscreteMeasure;
+// use crate::measures::DiscreteMeasure;
 
 /// [`Float`] with extra display and string conversion traits such that [`clap`] doesn't choke up.
 pub trait ClapFloat : Float
@@ -31,6 +30,8 @@
     pub n_spikes : usize,
     /// Number of iterations this statistic covers
     pub this_iters : usize,
+    /// Number of spikes inserted since last IterInfo statistic
+    pub inserted : usize,
     /// Number of spikes removed by merging since last IterInfo statistic
     pub merged : usize,
     /// Number of spikes removed by pruning since last IterInfo statistic
@@ -43,8 +44,8 @@
     pub transport_error : Option<(F, F)>,
     /// Current tolerance
     pub ε : F,
-    /// Solve fin.dim problem for this measure to get the optimal `value`.
-    pub postprocessing : Option<DiscreteMeasure<Loc<F, N>, F>>,
+    // /// Solve fin.dim problem for this measure to get the optimal `value`.
+    // pub postprocessing : Option<RNDM<F, N>>,
 }
 
 impl<F : Float, const N : usize>  IterInfo<F, N> {
@@ -55,10 +56,11 @@
             n_spikes : 0,
             this_iters : 0,
             merged : 0,
+            inserted : 0,
             pruned : 0,
             inner_iters : 0,
             ε : F::NAN,
-            postprocessing : None,
+            // postprocessing : None,
             untransported_fraction : None,
             transport_error : None,
         }
@@ -68,13 +70,14 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F, const N : usize> LogRepr for IterInfo<F, N> where F : LogRepr + Float {
     fn logrepr(&self) -> ColoredString {
-        format!("{}\t| N = {}, ε = {:.8}, inner_iters_mean = {}, merged+pruned_mean = {}+{}{}{}",
+        format!("{}\t| N = {}, ε = {:.8}, 𝔼inner_it = {}, 𝔼ins/mer/pru = {}/{}/{}{}{}",
                 self.value.logrepr(),
                 self.n_spikes,
                 self.ε,
-                self.inner_iters as float / self.this_iters as float,
-                self.merged as float / self.this_iters as float,
-                self.pruned as float / self.this_iters as float,
+                self.inner_iters as float / self.this_iters.max(1) as float,
+                self.inserted as float / self.this_iters.max(1) as float,
+                self.merged as float / self.this_iters.max(1) as float,
+                self.pruned as float / self.this_iters.max(1) as float,
                 match self.untransported_fraction {
                     None => format!(""),
                     Some((a, b)) => if b > 0.0 {
@@ -117,7 +120,7 @@
 }
 
 /// Data term type
-#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug, ValueEnum)]
+#[derive(Clone, Copy, PartialEq, Serialize, Deserialize, Debug)]
 pub enum DataTerm {
     /// $\\|z\\|\_2^2/2$
     L2Squared,
@@ -140,10 +143,18 @@
 pub struct L2Squared;
 
 /// Trait for indicating that `Self` is Lipschitz with respect to the (semi)norm `D`.
-pub trait Lipschitz<D> {
+pub trait Lipschitz<M> {
     /// The type of floats
     type FloatType : Float;
 
     /// Returns the Lipschitz factor of `self` with respect to the (semi)norm `D`.
-    fn lipschitz_factor(&self, seminorm : D) -> Option<Self::FloatType>;
+    fn lipschitz_factor(&self, seminorm : M) -> Option<Self::FloatType>;
 }
+
+/// Trait for norm-bounded functions.
+pub trait NormBounded<M> {
+    type FloatType : Float;
+
+    /// Returns a bound on the values of this function object in the `M`-norm.
+    fn norm_bound(&self, m : M) -> Self::FloatType;
+}

mercurial