Merge dev to default

Mon, 17 Feb 2025 13:54:53 -0500

author
Tuomo Valkonen <tuomov@iki.fi>
date
Mon, 17 Feb 2025 13:54:53 -0500
changeset 52
f0e8704d3f0e
parent 31
6105b5cd8d89 (current diff)
parent 51
0693cc9ba9f0 (diff)
child 53
92cae2e8f598

Merge dev to default

--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/.cargo/config.toml	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,2 @@
+[target.'cfg(all(target_os = "macos"))']
+rustflags = ["-L", "/opt/homebrew/include"]
--- a/Cargo.lock	Tue Aug 01 10:25:09 2023 +0300
+++ b/Cargo.lock	Mon Feb 17 13:54:53 2025 -0500
@@ -1,12 +1,12 @@
 # This file is automatically @generated by Cargo.
 # It is not intended for manual editing.
-version = 3
+version = 4
 
 [[package]]
 name = "GSL"
-version = "6.0.0"
+version = "7.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9becaf6d7d1ba36a457288e661fa6a0472e8328629276f45369eafcd48ef1ce"
+checksum = "db3943d5a15b5c46e991124abee6a1bc89c7c9ffb25dbb8aeb4eab926fd9b307"
 dependencies = [
  "GSL-sys",
  "paste",
@@ -23,24 +23,19 @@
 ]
 
 [[package]]
-name = "adler"
-version = "1.0.2"
+name = "aho-corasick"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
-
-[[package]]
-name = "aho-corasick"
-version = "0.7.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
 dependencies = [
  "memchr",
 ]
 
 [[package]]
 name = "alg_tools"
-version = "0.1.0"
+version = "0.3.0"
 dependencies = [
+ "anyhow",
  "colored",
  "cpu-time",
  "csv",
@@ -52,10 +47,16 @@
  "rayon",
  "serde",
  "serde_json",
- "trait-set",
+ "simba",
 ]
 
 [[package]]
+name = "android-tzdata"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
+
+[[package]]
 name = "android_system_properties"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -65,6 +66,61 @@
 ]
 
 [[package]]
+name = "anstream"
+version = "0.6.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is_terminal_polyfill",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
+
+[[package]]
+name = "anstyle-parse"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "3.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125"
+dependencies = [
+ "anstyle",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
+
+[[package]]
 name = "approx"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -74,69 +130,43 @@
 ]
 
 [[package]]
-name = "atty"
-version = "0.2.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-dependencies = [
- "hermit-abi 0.1.19",
- "libc",
- "winapi",
-]
-
-[[package]]
 name = "autocfg"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
 
 [[package]]
-name = "bit_field"
-version = "0.10.1"
+name = "base64"
+version = "0.22.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4"
+checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
 
 [[package]]
 name = "bitflags"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
-
-[[package]]
-name = "bstr"
-version = "0.2.17"
+version = "2.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
-dependencies = [
- "lazy_static",
- "memchr",
- "regex-automata",
- "serde",
-]
+checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
 
 [[package]]
 name = "bumpalo"
-version = "3.11.1"
+version = "3.16.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
 
 [[package]]
 name = "bytemuck"
-version = "1.12.3"
+version = "1.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f"
-
-[[package]]
-name = "byteorder"
-version = "1.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a"
 
 [[package]]
 name = "cc"
-version = "1.0.77"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4"
+checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8"
+dependencies = [
+ "shlex",
+]
 
 [[package]]
 name = "cfg-if"
@@ -146,33 +176,39 @@
 
 [[package]]
 name = "chrono"
-version = "0.4.23"
+version = "0.4.39"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
+checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825"
 dependencies = [
+ "android-tzdata",
  "iana-time-zone",
  "js-sys",
- "num-integer",
  "num-traits",
  "serde",
- "time",
  "wasm-bindgen",
- "winapi",
+ "windows-targets 0.52.6",
 ]
 
 [[package]]
 name = "clap"
-version = "4.0.27"
+version = "4.5.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0acbd8d28a0a60d7108d7ae850af6ba34cf2d1257fc646980e5f97ce14275966"
+checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84"
 dependencies = [
- "bitflags",
+ "clap_builder",
  "clap_derive",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.5.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838"
+dependencies = [
+ "anstream",
+ "anstyle",
  "clap_lex",
- "is-terminal",
- "once_cell",
  "strsim",
- "termcolor",
  "terminal_size",
  "unicase",
  "unicode-width",
@@ -180,67 +216,43 @@
 
 [[package]]
 name = "clap_derive"
-version = "4.0.21"
+version = "4.5.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014"
+checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab"
 dependencies = [
  "heck",
- "proc-macro-error",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.93",
 ]
 
 [[package]]
 name = "clap_lex"
-version = "0.3.0"
+version = "0.7.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8"
-dependencies = [
- "os_str_bytes",
-]
+checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
 
 [[package]]
-name = "codespan-reporting"
-version = "0.11.1"
+name = "colorchoice"
+version = "1.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
-dependencies = [
- "termcolor",
- "unicode-width",
-]
-
-[[package]]
-name = "color_quant"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
-
-[[package]]
-name = "colorbrewer"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "525be5012d97bc222e124ded87f18601e6fbd24a406761bcb1664475663919a6"
-dependencies = [
- "rgb",
-]
+checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
 
 [[package]]
 name = "colored"
-version = "2.0.0"
+version = "2.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd"
+checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8"
 dependencies = [
- "atty",
  "lazy_static",
- "winapi",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
 name = "core-foundation-sys"
-version = "0.8.3"
+version = "0.8.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
+checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
 
 [[package]]
 name = "cpu-time"
@@ -253,179 +265,116 @@
 ]
 
 [[package]]
-name = "crc32fast"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "crossbeam-channel"
-version = "0.5.6"
+name = "crossbeam-deque"
+version = "0.8.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
+checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
 dependencies = [
- "cfg-if",
- "crossbeam-utils",
-]
-
-[[package]]
-name = "crossbeam-deque"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
-dependencies = [
- "cfg-if",
  "crossbeam-epoch",
  "crossbeam-utils",
 ]
 
 [[package]]
 name = "crossbeam-epoch"
-version = "0.9.13"
+version = "0.9.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
 dependencies = [
- "autocfg",
- "cfg-if",
  "crossbeam-utils",
- "memoffset",
- "scopeguard",
 ]
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.8.14"
+version = "0.8.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "crunchy"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
 
 [[package]]
 name = "csv"
-version = "1.1.6"
+version = "1.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
+checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
 dependencies = [
- "bstr",
  "csv-core",
- "itoa 0.4.8",
+ "itoa",
  "ryu",
  "serde",
 ]
 
 [[package]]
 name = "csv-core"
-version = "0.1.10"
+version = "0.1.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70"
 dependencies = [
  "memchr",
 ]
 
 [[package]]
-name = "cxx"
-version = "1.0.82"
+name = "darling"
+version = "0.20.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4a41a86530d0fe7f5d9ea779916b7cadd2d4f9add748b99c2c029cbbdfaf453"
+checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989"
 dependencies = [
- "cc",
- "cxxbridge-flags",
- "cxxbridge-macro",
- "link-cplusplus",
+ "darling_core",
+ "darling_macro",
 ]
 
 [[package]]
-name = "cxx-build"
-version = "1.0.82"
+name = "darling_core"
+version = "0.20.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06416d667ff3e3ad2df1cd8cd8afae5da26cf9cec4d0825040f88b5ca659a2f0"
+checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5"
 dependencies = [
- "cc",
- "codespan-reporting",
- "once_cell",
+ "fnv",
+ "ident_case",
  "proc-macro2",
  "quote",
- "scratch",
- "syn",
+ "strsim",
+ "syn 2.0.93",
 ]
 
 [[package]]
-name = "cxxbridge-flags"
-version = "1.0.82"
+name = "darling_macro"
+version = "0.20.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "820a9a2af1669deeef27cb271f476ffd196a2c4b6731336011e0ba63e2c7cf71"
+checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
+dependencies = [
+ "darling_core",
+ "quote",
+ "syn 2.0.93",
+]
 
 [[package]]
-name = "cxxbridge-macro"
-version = "1.0.82"
+name = "deranged"
+version = "0.3.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470"
+checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
 dependencies = [
- "proc-macro2",
- "quote",
- "syn",
+ "powerfmt",
+ "serde",
 ]
 
 [[package]]
 name = "either"
-version = "1.8.0"
+version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
+checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+
+[[package]]
+name = "equivalent"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
 
 [[package]]
 name = "errno"
-version = "0.2.8"
+version = "0.3.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
+checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860"
 dependencies = [
- "errno-dragonfly",
- "libc",
- "winapi",
-]
-
-[[package]]
-name = "errno-dragonfly"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
-dependencies = [
- "cc",
  "libc",
-]
-
-[[package]]
-name = "exr"
-version = "1.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8eb5f255b5980bb0c8cf676b675d1a99be40f316881444f44e0462eaf5df5ded"
-dependencies = [
- "bit_field",
- "flume",
- "half",
- "lebe",
- "miniz_oxide",
- "smallvec",
- "threadpool",
-]
-
-[[package]]
-name = "flate2"
-version = "1.0.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
-dependencies = [
- "crc32fast",
- "miniz_oxide",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -438,193 +387,125 @@
 ]
 
 [[package]]
-name = "flume"
-version = "0.10.14"
+name = "fnv"
+version = "1.0.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577"
-dependencies = [
- "futures-core",
- "futures-sink",
- "nanorand",
- "pin-project",
- "spin",
-]
-
-[[package]]
-name = "futures-core"
-version = "0.3.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac"
-
-[[package]]
-name = "futures-sink"
-version = "0.3.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
 
 [[package]]
 name = "getrandom"
-version = "0.2.8"
+version = "0.2.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
+checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
 dependencies = [
  "cfg-if",
- "js-sys",
  "libc",
- "wasi 0.11.0+wasi-snapshot-preview1",
- "wasm-bindgen",
+ "wasi",
 ]
 
 [[package]]
-name = "gif"
-version = "0.11.4"
+name = "hashbrown"
+version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06"
-dependencies = [
- "color_quant",
- "weezl",
-]
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
 
 [[package]]
-name = "half"
-version = "2.1.0"
+name = "hashbrown"
+version = "0.15.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad6a9459c9c30b177b925162351f97e7d967c7ea8bab3b8352805327daf45554"
-dependencies = [
- "crunchy",
-]
+checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
 
 [[package]]
 name = "heck"
-version = "0.4.0"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
 
 [[package]]
-name = "hermit-abi"
-version = "0.1.19"
+name = "hex"
+version = "0.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "hermit-abi"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
-dependencies = [
- "libc",
-]
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
 
 [[package]]
 name = "iana-time-zone"
-version = "0.1.53"
+version = "0.1.61"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
+checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
 dependencies = [
  "android_system_properties",
  "core-foundation-sys",
  "iana-time-zone-haiku",
  "js-sys",
  "wasm-bindgen",
- "winapi",
+ "windows-core",
 ]
 
 [[package]]
 name = "iana-time-zone-haiku"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
-dependencies = [
- "cxx",
- "cxx-build",
-]
-
-[[package]]
-name = "image"
-version = "0.24.5"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69b7ea949b537b0fd0af141fff8c77690f2ce96f4f41f042ccb6c69c6c965945"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
 dependencies = [
- "bytemuck",
- "byteorder",
- "color_quant",
- "exr",
- "gif",
- "jpeg-decoder",
- "num-rational",
- "num-traits",
- "png",
- "scoped_threadpool",
- "tiff",
+ "cc",
 ]
 
 [[package]]
-name = "io-lifetimes"
-version = "0.7.5"
+name = "ident_case"
+version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074"
+checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
 
 [[package]]
-name = "io-lifetimes"
-version = "1.0.2"
+name = "indexmap"
+version = "1.9.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e394faa0efb47f9f227f1cd89978f854542b318a6f64fa695489c9c993056656"
+checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
 dependencies = [
- "libc",
- "windows-sys",
+ "autocfg",
+ "hashbrown 0.12.3",
+ "serde",
 ]
 
 [[package]]
-name = "is-terminal"
-version = "0.4.0"
+name = "indexmap"
+version = "2.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aae5bc6e2eb41c9def29a3e0f1306382807764b9b53112030eff57435667352d"
+checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f"
 dependencies = [
- "hermit-abi 0.2.6",
- "io-lifetimes 1.0.2",
- "rustix 0.36.3",
- "windows-sys",
+ "equivalent",
+ "hashbrown 0.15.2",
+ "serde",
 ]
 
 [[package]]
+name = "is_terminal_polyfill"
+version = "1.70.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
+
+[[package]]
 name = "itertools"
-version = "0.10.5"
+version = "0.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
 dependencies = [
  "either",
 ]
 
 [[package]]
 name = "itoa"
-version = "0.4.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
-
-[[package]]
-name = "itoa"
-version = "1.0.4"
+version = "1.0.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
-
-[[package]]
-name = "jpeg-decoder"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e"
-dependencies = [
- "rayon",
-]
+checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
 
 [[package]]
 name = "js-sys"
-version = "0.3.60"
+version = "0.3.76"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
+checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7"
 dependencies = [
+ "once_cell",
  "wasm-bindgen",
 ]
 
@@ -635,101 +516,50 @@
 checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
 
 [[package]]
-name = "lebe"
-version = "0.5.2"
+name = "libc"
+version = "0.2.149"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8"
-
-[[package]]
-name = "libc"
-version = "0.2.137"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
+checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b"
 
 [[package]]
 name = "libm"
-version = "0.2.6"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
-
-[[package]]
-name = "link-cplusplus"
-version = "1.0.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
-dependencies = [
- "cc",
-]
+checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
 
 [[package]]
 name = "linux-raw-sys"
-version = "0.0.46"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d"
-
-[[package]]
-name = "linux-raw-sys"
-version = "0.1.3"
+version = "0.4.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f"
-
-[[package]]
-name = "lock_api"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
-dependencies = [
- "autocfg",
- "scopeguard",
-]
+checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f"
 
 [[package]]
 name = "log"
-version = "0.4.17"
+version = "0.4.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
-dependencies = [
- "cfg-if",
-]
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
 
 [[package]]
 name = "matrixmultiply"
-version = "0.3.2"
+version = "0.3.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84"
+checksum = "7574c1cf36da4798ab73da5b215bbf444f50718207754cb522201d78d1cd0ff2"
 dependencies = [
+ "autocfg",
  "rawpointer",
 ]
 
 [[package]]
 name = "memchr"
-version = "2.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
-
-[[package]]
-name = "memoffset"
-version = "0.7.1"
+version = "2.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
-dependencies = [
- "autocfg",
-]
-
-[[package]]
-name = "miniz_oxide"
-version = "0.6.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
-dependencies = [
- "adler",
-]
+checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
 
 [[package]]
 name = "nalgebra"
-version = "0.31.4"
+version = "0.33.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20bd243ab3dbb395b39ee730402d2e5405e448c75133ec49cc977762c4cba3d1"
+checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b"
 dependencies = [
  "approx",
  "matrixmultiply",
@@ -744,29 +574,20 @@
 
 [[package]]
 name = "nalgebra-macros"
-version = "0.1.0"
+version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218"
+checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
-]
-
-[[package]]
-name = "nanorand"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3"
-dependencies = [
- "getrandom",
+ "syn 2.0.93",
 ]
 
 [[package]]
 name = "num"
-version = "0.4.0"
+version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606"
+checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41"
 dependencies = [
  "num-bigint",
  "num-complex",
@@ -778,39 +599,43 @@
 
 [[package]]
 name = "num-bigint"
-version = "0.4.3"
+version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"
+checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
 dependencies = [
- "autocfg",
  "num-integer",
  "num-traits",
 ]
 
 [[package]]
 name = "num-complex"
-version = "0.4.2"
+version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19"
+checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495"
 dependencies = [
  "num-traits",
 ]
 
 [[package]]
-name = "num-integer"
-version = "0.1.45"
+name = "num-conv"
+version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
+checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
+
+[[package]]
+name = "num-integer"
+version = "0.1.46"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
 dependencies = [
- "autocfg",
  "num-traits",
 ]
 
 [[package]]
 name = "num-iter"
-version = "0.1.43"
+version = "0.1.45"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
+checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
 dependencies = [
  "autocfg",
  "num-integer",
@@ -831,124 +656,71 @@
 
 [[package]]
 name = "num-traits"
-version = "0.2.15"
+version = "0.2.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
 dependencies = [
  "autocfg",
  "libm",
 ]
 
 [[package]]
-name = "num_cpus"
-version = "1.14.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
-dependencies = [
- "hermit-abi 0.1.19",
- "libc",
-]
-
-[[package]]
 name = "numeric_literals"
 version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a"
 dependencies = [
  "quote",
- "syn",
+ "syn 1.0.109",
 ]
 
 [[package]]
 name = "once_cell"
-version = "1.16.0"
+version = "1.20.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
-
-[[package]]
-name = "os_str_bytes"
-version = "6.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
 
 [[package]]
 name = "paste"
-version = "1.0.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
-
-[[package]]
-name = "pin-project"
-version = "1.0.12"
+version = "1.0.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc"
-dependencies = [
- "pin-project-internal",
-]
-
-[[package]]
-name = "pin-project-internal"
-version = "1.0.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
+checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
 
 [[package]]
 name = "pkg-config"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
-
-[[package]]
-name = "png"
-version = "0.17.7"
+version = "0.3.27"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d708eaf860a19b19ce538740d2b4bdeeb8337fa53f7738455e706623ad5c638"
-dependencies = [
- "bitflags",
- "crc32fast",
- "flate2",
- "miniz_oxide",
-]
+checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
 
 [[package]]
 name = "pointsource_algs"
-version = "1.0.1"
+version = "2.0.0-pre"
 dependencies = [
  "GSL",
  "alg_tools",
+ "anyhow",
  "chrono",
  "clap",
- "colorbrewer",
  "colored",
  "cpu-time",
  "float_extras",
- "image",
  "itertools",
  "nalgebra",
  "num-traits",
  "numeric_literals",
- "poloto",
  "rand",
  "rand_distr",
  "regex",
- "rgb",
  "serde",
  "serde_json",
+ "serde_with",
 ]
 
 [[package]]
-name = "poloto"
-version = "3.13.1"
+name = "powerfmt"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2541c28c0622b297e342444bd8b1d87b02c8478dd3ed0ecc3eee47dc4d13282"
-dependencies = [
- "tagger",
-]
+checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
 
 [[package]]
 name = "ppv-lite86"
@@ -957,43 +729,19 @@
 checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
 
 [[package]]
-name = "proc-macro-error"
-version = "1.0.4"
+name = "proc-macro2"
+version = "1.0.92"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
-dependencies = [
- "proc-macro-error-attr",
- "proc-macro2",
- "quote",
- "syn",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro-error-attr"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
-dependencies = [
- "proc-macro2",
- "quote",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.47"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
+checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
 dependencies = [
  "unicode-ident",
 ]
 
 [[package]]
 name = "quote"
-version = "1.0.21"
+version = "1.0.38"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc"
 dependencies = [
  "proc-macro2",
 ]
@@ -1046,32 +794,41 @@
 
 [[package]]
 name = "rayon"
-version = "1.6.0"
+version = "1.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e060280438193c554f654141c9ea9417886713b7acd75974c85b18a69a88e0b"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
 dependencies = [
- "crossbeam-deque",
  "either",
  "rayon-core",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.10.1"
+version = "1.12.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3"
+checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
 dependencies = [
- "crossbeam-channel",
  "crossbeam-deque",
  "crossbeam-utils",
- "num_cpus",
 ]
 
 [[package]]
 name = "regex"
-version = "1.7.0"
+version = "1.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
 dependencies = [
  "aho-corasick",
  "memchr",
@@ -1079,123 +836,111 @@
 ]
 
 [[package]]
-name = "regex-automata"
-version = "0.1.10"
+name = "regex-syntax"
+version = "0.8.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
 
 [[package]]
-name = "regex-syntax"
-version = "0.6.28"
+name = "rustix"
+version = "0.38.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
+checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed"
+dependencies = [
+ "bitflags",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.48.0",
+]
 
 [[package]]
-name = "rgb"
-version = "0.8.34"
+name = "ryu"
+version = "1.0.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3603b7d71ca82644f79b5a06d1220e9a58ede60bd32255f698cb1af8838b8db3"
+checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
+
+[[package]]
+name = "safe_arch"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f398075ce1e6a179b46f51bd88d0598b92b00d3551f1a2d4ac49e771b56ac354"
 dependencies = [
  "bytemuck",
 ]
 
 [[package]]
-name = "rustix"
-version = "0.35.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9"
-dependencies = [
- "bitflags",
- "errno",
- "io-lifetimes 0.7.5",
- "libc",
- "linux-raw-sys 0.0.46",
- "windows-sys",
-]
-
-[[package]]
-name = "rustix"
-version = "0.36.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e"
-dependencies = [
- "bitflags",
- "errno",
- "io-lifetimes 1.0.2",
- "libc",
- "linux-raw-sys 0.1.3",
- "windows-sys",
-]
-
-[[package]]
-name = "ryu"
-version = "1.0.11"
+name = "serde"
+version = "1.0.189"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
-
-[[package]]
-name = "safe_arch"
-version = "0.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529"
-dependencies = [
- "bytemuck",
-]
-
-[[package]]
-name = "scoped_threadpool"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8"
-
-[[package]]
-name = "scopeguard"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-
-[[package]]
-name = "scratch"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
-
-[[package]]
-name = "serde"
-version = "1.0.148"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
+checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.148"
+version = "1.0.189"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
+checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.93",
 ]
 
 [[package]]
 name = "serde_json"
-version = "1.0.89"
+version = "1.0.107"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
+checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
 dependencies = [
- "itoa 1.0.4",
+ "itoa",
  "ryu",
  "serde",
 ]
 
 [[package]]
+name = "serde_with"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa"
+dependencies = [
+ "base64",
+ "chrono",
+ "hex",
+ "indexmap 1.9.3",
+ "indexmap 2.7.0",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "serde_with_macros",
+ "time",
+]
+
+[[package]]
+name = "serde_with_macros"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e"
+dependencies = [
+ "darling",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.93",
+]
+
+[[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
+[[package]]
 name = "simba"
-version = "0.7.3"
+version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f3fd720c48c53cace224ae62bef1bbff363a70c68c4802a78b5cc6159618176"
+checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa"
 dependencies = [
  "approx",
  "num-complex",
@@ -1205,31 +950,27 @@
 ]
 
 [[package]]
-name = "smallvec"
-version = "1.10.0"
+name = "strsim"
+version = "0.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
 
 [[package]]
-name = "spin"
-version = "0.9.4"
+name = "syn"
+version = "1.0.109"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
 dependencies = [
- "lock_api",
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
 ]
 
 [[package]]
-name = "strsim"
-version = "0.10.0"
+name = "syn"
+version = "2.0.93"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
-
-[[package]]
-name = "syn"
-version = "1.0.104"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
+checksum = "9c786062daee0d6db1132800e623df74274a0a87322d8e183338e01b3d98d058"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -1237,98 +978,78 @@
 ]
 
 [[package]]
-name = "tagger"
-version = "4.3.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6aaa6f5d645d1dae4cd0286e9f8bf15b75a31656348e5e106eb1a940abd34b63"
-
-[[package]]
-name = "termcolor"
-version = "1.1.3"
+name = "terminal_size"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
-dependencies = [
- "winapi-util",
-]
-
-[[package]]
-name = "terminal_size"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "40ca90c434fd12083d1a6bdcbe9f92a14f96c8a1ba600ba451734ac334521f7a"
+checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9"
 dependencies = [
- "rustix 0.35.13",
- "windows-sys",
-]
-
-[[package]]
-name = "threadpool"
-version = "1.8.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
-dependencies = [
- "num_cpus",
-]
-
-[[package]]
-name = "tiff"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f17def29300a156c19ae30814710d9c63cd50288a49c6fd3a10ccfbe4cf886fd"
-dependencies = [
- "flate2",
- "jpeg-decoder",
- "weezl",
+ "rustix",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
 name = "time"
-version = "0.1.45"
+version = "0.3.37"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a"
+checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
 dependencies = [
- "libc",
- "wasi 0.10.0+wasi-snapshot-preview1",
- "winapi",
+ "deranged",
+ "itoa",
+ "num-conv",
+ "powerfmt",
+ "serde",
+ "time-core",
+ "time-macros",
 ]
 
 [[package]]
-name = "trait-set"
-version = "0.2.0"
+name = "time-core"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "875c4c873cc824e362fa9a9419ffa59807244824275a44ad06fec9684fff08f2"
+checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
+
+[[package]]
+name = "time-macros"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
 dependencies = [
- "proc-macro2",
- "quote",
- "syn",
+ "num-conv",
+ "time-core",
 ]
 
 [[package]]
 name = "typenum"
-version = "1.15.0"
+version = "1.17.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
 
 [[package]]
 name = "unicase"
-version = "2.6.0"
+version = "2.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6"
+checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
 dependencies = [
  "version_check",
 ]
 
 [[package]]
 name = "unicode-ident"
-version = "1.0.5"
+version = "1.0.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
 
 [[package]]
 name = "unicode-width"
-version = "0.1.10"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd"
+
+[[package]]
+name = "utf8parse"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
 
 [[package]]
 name = "version_check"
@@ -1338,46 +1059,40 @@
 
 [[package]]
 name = "wasi"
-version = "0.10.0+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
-
-[[package]]
-name = "wasi"
 version = "0.11.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
 
 [[package]]
 name = "wasm-bindgen"
-version = "0.2.83"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
+checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396"
 dependencies = [
  "cfg-if",
+ "once_cell",
  "wasm-bindgen-macro",
 ]
 
 [[package]]
 name = "wasm-bindgen-backend"
-version = "0.2.83"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
+checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79"
 dependencies = [
  "bumpalo",
  "log",
- "once_cell",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.93",
  "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-macro"
-version = "0.2.83"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
+checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe"
 dependencies = [
  "quote",
  "wasm-bindgen-macro-support",
@@ -1385,34 +1100,28 @@
 
 [[package]]
 name = "wasm-bindgen-macro-support"
-version = "0.2.83"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
+checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.93",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-shared"
-version = "0.2.83"
+version = "0.2.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
-
-[[package]]
-name = "weezl"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb"
+checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6"
 
 [[package]]
 name = "wide"
-version = "0.7.5"
+version = "0.7.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae41ecad2489a1655c8ef8489444b0b113c0a0c795944a3572a0931cf7d2525c"
+checksum = "ebecebefc38ff1860b4bc47550bbfa63af5746061cf0d29fcd7fa63171602598"
 dependencies = [
  "bytemuck",
  "safe_arch",
@@ -1435,73 +1144,155 @@
 checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 
 [[package]]
-name = "winapi-util"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
-dependencies = [
- "winapi",
-]
-
-[[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
+name = "windows-core"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
 name = "windows-sys"
-version = "0.42.0"
+version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets 0.48.5",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
 dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
+dependencies = [
+ "windows_aarch64_gnullvm 0.48.5",
+ "windows_aarch64_msvc 0.48.5",
+ "windows_i686_gnu 0.48.5",
+ "windows_i686_msvc 0.48.5",
+ "windows_x86_64_gnu 0.48.5",
+ "windows_x86_64_gnullvm 0.48.5",
+ "windows_x86_64_msvc 0.48.5",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.6",
+ "windows_aarch64_msvc 0.52.6",
+ "windows_i686_gnu 0.52.6",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.6",
+ "windows_x86_64_gnu 0.52.6",
+ "windows_x86_64_gnullvm 0.52.6",
+ "windows_x86_64_msvc 0.52.6",
 ]
 
 [[package]]
 name = "windows_aarch64_gnullvm"
-version = "0.42.0"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
 
 [[package]]
 name = "windows_aarch64_msvc"
-version = "0.42.0"
+version = "0.52.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
 
 [[package]]
 name = "windows_i686_gnu"
-version = "0.42.0"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
 
 [[package]]
 name = "windows_i686_msvc"
-version = "0.42.0"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
 
 [[package]]
 name = "windows_x86_64_gnu"
-version = "0.42.0"
+version = "0.52.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
 
 [[package]]
 name = "windows_x86_64_gnullvm"
-version = "0.42.0"
+version = "0.52.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
 
 [[package]]
 name = "windows_x86_64_msvc"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5"
+checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
--- a/Cargo.toml	Tue Aug 01 10:25:09 2023 +0300
+++ b/Cargo.toml	Mon Feb 17 13:54:53 2025 -0500
@@ -1,8 +1,8 @@
 [package]
 name = "pointsource_algs"
-version = "1.0.1"
+version = "2.0.0-pre"
 edition = "2021"
-rust-version = "1.67"
+rust-version = "1.85"
 authors = ["Tuomo Valkonen <tuomov@iki.fi>"]
 description = "Algorithms for point source localisation"
 homepage = "https://tuomov.iki.fi/software/pointsource_algs/"
@@ -21,29 +21,32 @@
 ]
 categories = ["mathematics", "science", "computer-vision"]
 
+[dependencies.alg_tools]
+version = "~0.3.0-dev"
+path = "../alg_tools"
+default-features =  false
+features = ["nightly"]
+
 [dependencies]
-alg_tools = { version = "~0.1.0", path = "../alg_tools", default-features = false }
 serde = { version = "1.0", features = ["derive"] }
 num-traits = { version = "~0.2.14", features = ["std"] }
 rand = "~0.8.5"
-colored = "~2.0.0"
+colored = "~2.1.0"
 rand_distr = "~0.4.3"
-nalgebra = { version = "~0.31.0", features = ["rand-no-std"] }
-itertools = "~0.10.3" 
+nalgebra = { version = "~0.33.0", features = ["rand-no-std"] }
+itertools = "~0.13.0"
 numeric_literals = "~0.2.0"
-poloto = "~3.13.1"
-GSL = "~6.0.0"
+GSL = "~7.0.0"
 float_extras = "~0.1.6"
-clap = { version = "~4.0.27", features = ["derive", "unicode", "wrap_help"] }
-image = "~0.24.3"
+clap = { version = "~4.5.0", features = ["derive", "unicode", "wrap_help"] }
 cpu-time = "~1.0.0"
-colorbrewer = "~0.2.0"
-rgb = "~0.8.33"
 serde_json = "~1.0.85"
 chrono = { version = "~0.4.23", features = ["alloc", "std", "serde"] }
+anyhow = "1.0.95"
+serde_with = { version = "3.11.0", features = ["macros"] }
 
 [build-dependencies]
-regex = "~1.7.0"
+regex = "~1.11.0"
 
 [profile.release]
 debug = true
--- a/README.md	Tue Aug 01 10:25:09 2023 +0300
+++ b/README.md	Mon Feb 17 13:54:53 2025 -0500
@@ -1,19 +1,19 @@
 
 # Proximal methods for point source localisation: the implementation
 
-This package contains [Rust] codes for the manuscript “_Proximal methods for
-point source localisation_” ([arXiv:2212.02991]) by Tuomo Valkonen
-⟨tuomov@iki.fi⟩. It concerns solution of problems of the type
+This package contains the [Rust] codes for the numerical experiments in the articles
+* T. Valkonen, “_Proximal methods for
+point source localisation_”, Journal of Nonsmooth Analysis and Optimization  4 (2023), 10433, [doi:10.46298/jnsao-2023-10433] ([arXiv:2212.02991])
+* T. Valkonen, “_Point source localisation with unbalanced optimal transport_” (2025), submitted.
+
+It concerns solution of problems of the type
 $$
     \min_{μ ∈ ℳ(Ω)}~ F(μ) + λ \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(μ),
 $$
 where $F$ is a data term, and $ℳ(Ω)$ is the space of Radon measures on the
 (rectangular) domain $Ω ⊂ ℝ^n$. Implemented are $F(μ)=\frac12\|Aμ-b\|_2^2$ and
 $F(μ)=\|Aμ-b\|_1$ for the forward operator $A \in 𝕃(ℳ(Ω); ℝ^m)$ modelling a
-simple sensor grid. For the 2-norm-squared data term implemented are the
-algorithms μFB, μFISTA, and μPDPS from the aforementioned manuscript along with
-comparison relaxed and fully corrective conditional gradient methods from the
-literature. For the 1-norm data term only the μPDPS is applicable.
+simple sensor grid.
 
 ## Installation and usage
 
@@ -36,8 +36,8 @@
     brew install gsl
     ```
     For other operating systems, suggestions are available in the [rust-GSL]
-    crate documentation. On Windows, you will likely need to pass extra
-    `RUSTFLAGS` options to Cargo in the following steps to locate the library.
+    crate documentation. You may need to pass extra `RUSTFLAGS` options to
+    Cargo in the following steps to locate the library.
 
 4. Download [alg_tools] and unpack it under the same directory as this
    package.
@@ -49,38 +49,33 @@
   [rust-GSL]: https://docs.rs/GSL/6.0.0/rgsl/
   [Homebrew]: https://brew.sh
   [arXiv:2212.02991]: https://arxiv.org/abs/2212.02991
+  [doi:10.46298/jnsao-2023-10433]: http://doi.org/10.46298/jnsao-2023-10433
 
 ### Building and running the experiments
 
-To compile the code and run the experiments in the manuscript, use
+To compile and install the program, use
+```console
+cargo install --path=.
+```
+When doing this for the first time, several dependencies will be downloaded.
+Now you can run the default set of experiments with
+```
+pointsource_algs -o results
+```
+The `-o results` option tells `pointsource_algs` to write results in the
+`results` directory. The option is required.
+
+Alternatively, you may build and run the program without installing with
 ```console
 cargo run --release -- -o results
 ```
-When doing this for the first time, several dependencies will be downloaded.
-The double-dash (`--`) separates the arguments of Cargo and this software,
-`pointsource_algs`. The `--release` option to Cargo is required for `rustc` to
-build optimised high performance code. Without that flag the performance will
-be significantly worse. The `-o results` option tells `pointsource_algs` to
-write results in the `results` directory. The option is required.
-
-Alternatively, you may build the executable with
-```console
-cargo build --release
-```
-and then run it with
-```
-target/release/pointsource_algs -o results
-```
+The double-dash separates the options for the Cargo build system 
+and `pointsource_algs`.
 
 ### Documentation
 
 Use the `--help` option to get an extensive listing of command line options to
-customise algorithm parameters and the experiments performed. As above with
-`-o`, if using `cargo` to run the executable, you have to pass any arguments
-to `pointsource_algs` after a double-dash:
-```console
-cargo run --release -- --help
-```
+customise algorithm parameters and the experiments performed.
 
 ## Internals
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/dataterm.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,94 @@
+/*!
+Basid definitions for data terms
+*/
+
+use numeric_literals::replace_float_literals;
+
+use alg_tools::euclidean::Euclidean;
+use alg_tools::linops::GEMV;
+pub use alg_tools::norms::L1;
+use alg_tools::norms::Norm;
+use alg_tools::instance::{Instance, Space};
+
+use crate::types::*;
+pub use crate::types::L2Squared;
+use crate::measures::RNDM;
+
+/// Calculates the residual $Aμ-b$.
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn calculate_residual<
+    X : Space,
+    I : Instance<X>,
+    F : Float,
+    V : Euclidean<F> + Clone,
+    A : GEMV<F, X, Codomain = V>,
+>(
+    μ : I,
+    opA : &A,
+    b : &V
+) -> V {
+    let mut r = b.clone();
+    opA.gemv(&mut r, 1.0, μ, -1.0);
+    r
+}
+
+/// Calculates the residual $A(μ+μ_delta)-b$.
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn calculate_residual2<
+    F : Float,
+    X : Space,
+    I : Instance<X>,
+    J : Instance<X>,
+    V : Euclidean<F> + Clone,
+    A : GEMV<F, X, Codomain = V>,
+>(
+    μ : I,
+    μ_delta : J,
+    opA : &A,
+    b : &V
+) -> V {
+    let mut r = b.clone();
+    opA.gemv(&mut r, 1.0, μ, -1.0);
+    opA.gemv(&mut r, 1.0, μ_delta, 1.0);
+    r
+}
+
+
+/// Trait for data terms
+#[replace_float_literals(F::cast_from(literal))]
+pub trait DataTerm<F : Float, V, const N : usize> {
+    /// Calculates $F(y)$, where $F$ is the data fidelity.
+    fn calculate_fit(&self, _residual : &V) -> F;
+
+    /// Calculates $F(Aμ-b)$, where $F$ is the data fidelity.
+    fn calculate_fit_op<I, A : GEMV<F, RNDM<F, N>, Codomain = V>>(
+        &self,
+        μ : I,
+        opA : &A,
+        b : &V
+    ) -> F
+    where
+        V : Euclidean<F> + Clone,
+        I : Instance<RNDM<F, N>>,
+    {
+        let r = calculate_residual(μ, opA, b);
+        self.calculate_fit(&r)
+    }
+}
+
+impl<F : Float, V : Euclidean<F>, const N : usize>
+DataTerm<F, V, N>
+for L2Squared {
+    fn calculate_fit(&self, residual : &V) -> F {
+        residual.norm2_squared_div2()
+    }
+}
+
+
+impl<F : Float, V : Euclidean<F> + Norm<F, L1>, const N : usize>
+DataTerm<F, V, N>
+for L1 {
+    fn calculate_fit(&self, residual : &V) -> F {
+        residual.norm(L1)
+    }
+}
--- a/src/experiments.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/experiments.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -13,21 +13,24 @@
 use alg_tools::error::DynResult;
 use alg_tools::norms::Linfinity;
 
-use crate::ExperimentOverrides;
+use crate::{ExperimentOverrides, AlgorithmOverrides};
 use crate::kernels::*;
-use crate::kernels::{SupportProductFirst as Prod};
-use crate::pdps::PDPSConfig;
+use crate::kernels::SupportProductFirst as Prod;
 use crate::types::*;
 use crate::run::{
     RunnableExperiment,
     ExperimentV2,
+    ExperimentBiased,
     Named,
     DefaultAlgorithm,
-    AlgorithmConfig
 };
 //use crate::fb::FBGenericConfig;
 use crate::rand_distr::{SerializableNormal, SaltAndPepper};
 use crate::regularisation::Regularisation;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::instance::Instance;
+use alg_tools::mapping::Mapping;
+use alg_tools::operator_arithmetic::{MappingSum, Weighted};
 
 /// Experiments shorthands, to be used with the command line parser
 
@@ -58,6 +61,12 @@
     /// Two dimensions, “fast” spread, 1-norm data fidelity
     #[clap(name = "2d_l1_fast")]
     Experiment2D_L1_Fast,
+    /// One dimension, “fast” spread, 2-norm-squared data fidelity with extra TV-regularised bias
+    #[clap(name = "1d_tv_fast")]
+    Experiment1D_TV_Fast,
+    /// Two dimensions, “fast” spread, 2-norm-squared data fidelity with extra TV-regularised bias
+    #[clap(name = "2d_tv_fast")]
+    Experiment2D_TV_Fast,
 }
 
 macro_rules! make_float_constant {
@@ -92,6 +101,25 @@
     ([0.30, 0.70], 5.0)
 ];
 
+/// The $\{0,1\}$-valued characteristic function of a ball as a [`Mapping`].
+#[derive(Debug,Copy,Clone,Serialize,PartialEq)]
+struct BallCharacteristic<F : Float, const N : usize> {
+    pub center : Loc<F, N>,
+    pub radius : F,
+}
+
+impl<F : Float, const N : usize> Mapping<Loc<F, N>> for BallCharacteristic<F, N> {
+    type Codomain =F;
+
+    fn apply<I : Instance<Loc<F, N>>>(&self, i : I) -> F {
+        if self.center.dist2(i) <= self.radius {
+            F::ONE
+        } else {
+            F::ZERO
+        }
+    }
+}
+
 //#[replace_float_literals(F::cast_from(literal))]
 impl DefaultExperiment {
     /// Convert the experiment shorthand into a runnable experiment configuration.
@@ -115,23 +143,70 @@
         make_float_constant!(Variance1 = 0.05.powi(2));
         make_float_constant!(CutOff1 = 0.15);
         make_float_constant!(Hat1 = 0.16);
+        make_float_constant!(HatBias = 0.05);
 
         // We use a different step length for PDPS in 2D experiments
-        let pdps_2d = || {
-            let τ0 = 3.0;
-            PDPSConfig {
-                τ0,
-                σ0 : 0.99 / τ0,
+        // let pdps_2d = (DefaultAlgorithm::PDPS,
+        //     AlgorithmOverrides {
+        //         tau0 : Some(3.0),
+        //         sigma0 : Some(0.99 / 3.0),
+        //         .. Default::default()
+        //     }
+        // );
+        // let radon_pdps_2d = (DefaultAlgorithm::RadonPDPS,
+        //     AlgorithmOverrides {
+        //         tau0 : Some(3.0),
+        //         sigma0 : Some(0.99 / 3.0),
+        //         .. Default::default()
+        //     }
+        // );
+        let sliding_fb_cut_gaussian = (DefaultAlgorithm::SlidingFB,
+            AlgorithmOverrides {
+                theta0 : Some(0.3),
                 .. Default::default()
             }
-        };
-
+        );
+        // let higher_cpos = |alg| (alg,
+        //     AlgorithmOverrides {
+        //         transport_tolerance_pos : Some(1000.0),
+        //         .. Default::default()
+        //     }
+        // );
+        let higher_cpos_merging = |alg| (alg,
+            AlgorithmOverrides {
+                transport_tolerance_pos : Some(1000.0),
+                merge : Some(true),
+                fitness_merging : Some(true),
+                .. Default::default()
+            }
+        );
+        let higher_cpos_merging_steptune = |alg| (alg,
+            AlgorithmOverrides {
+                transport_tolerance_pos : Some(1000.0),
+                theta0 : Some(0.3),
+                merge : Some(true),
+                fitness_merging : Some(true),
+                .. Default::default()
+            }
+        );
+        let much_higher_cpos_merging_steptune = |alg| (alg,
+            AlgorithmOverrides {
+                transport_tolerance_pos : Some(10000.0),
+                sigma0 : Some(0.15),
+                theta0 : Some(0.3),
+                merge : Some(true),
+                fitness_merging : Some(true),
+                .. Default::default()
+            }
+        );
         //  We add a hash of the experiment name to the configured
         // noise seed to not use the same noise for different experiments.
         let mut h = DefaultHasher::new();
         name.hash(&mut h);
         let noise_seed = cli.noise_seed.unwrap_or(BASE_SEED) + h.finish();
 
+        let default_merge_radius = 0.01;
+
         use DefaultExperiment::*;
         Ok(match self {
             Experiment1D => {
@@ -140,7 +215,7 @@
                 Box::new(Named { name, data : ExperimentV2 {
                     domain : [[0.0, 1.0]].into(),
                     sensor_count : [N_SENSORS_1D],
-                    regularisation : Regularisation::NonnegRadon(cli.alpha.unwrap_or(0.09)),
+                    regularisation : Regularisation::NonnegRadon(cli.alpha.unwrap_or(0.08)),
                     noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.2))?,
                     dataterm : DataTerm::L2Squared,
                     μ_hat : MU_TRUE_1D_BASIC.into(),
@@ -149,7 +224,12 @@
                     kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::new(),
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::from([
+                        sliding_fb_cut_gaussian,
+                        higher_cpos_merging(DefaultAlgorithm::RadonFB),
+                        higher_cpos_merging(DefaultAlgorithm::RadonSlidingFB),
+                    ]),
                 }})
             },
             Experiment1DFast => {
@@ -166,7 +246,11 @@
                     kernel : base_spread,
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::new(),
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::from([
+                        higher_cpos_merging(DefaultAlgorithm::RadonFB),
+                        higher_cpos_merging(DefaultAlgorithm::RadonSlidingFB),
+                    ]),
                 }})
             },
             Experiment2D => {
@@ -184,8 +268,11 @@
                     kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::from([
-                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::from([
+                        sliding_fb_cut_gaussian,
+                        higher_cpos_merging(DefaultAlgorithm::RadonFB),
+                        higher_cpos_merging(DefaultAlgorithm::RadonSlidingFB),
                     ]),
                 }})
             },
@@ -203,8 +290,10 @@
                     kernel : base_spread,
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::from([
-                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::from([
+                        higher_cpos_merging(DefaultAlgorithm::RadonFB),
+                        higher_cpos_merging(DefaultAlgorithm::RadonSlidingFB),
                     ]),
                 }})
             },
@@ -226,7 +315,8 @@
                     kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::new(),
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::new(),
                 }})
             },
             Experiment1D_L1_Fast => {
@@ -246,7 +336,8 @@
                     kernel : base_spread,
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::new(),
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::new(),
                 }})
             },
             Experiment2D_L1 => {
@@ -267,8 +358,8 @@
                     kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::from([
-                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::from([
                     ]),
                 }})
             },
@@ -289,9 +380,65 @@
                     kernel : base_spread,
                     kernel_plot_width,
                     noise_seed,
-                    algorithm_defaults: HashMap::from([
-                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    default_merge_radius,
+                    algorithm_overrides: HashMap::from([
+                    ]),
+                }})
+            },
+            Experiment1D_TV_Fast => {
+                let base_spread = HatConv { radius : HatBias };
+                Box::new(Named { name, data : ExperimentBiased {
+                    λ : 0.02,
+                    bias : MappingSum::new([
+                        Weighted::new(1.0, BallCharacteristic{ center : 0.3.into(), radius : 0.2 }),
+                        Weighted::new(0.5, BallCharacteristic{ center : 0.6.into(), radius : 0.3 }),
                     ]),
+                    base : ExperimentV2 {
+                        domain : [[0.0, 1.0]].into(),
+                        sensor_count : [N_SENSORS_1D],
+                        regularisation : Regularisation::NonnegRadon(cli.alpha.unwrap_or(0.2)),
+                        noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.1))?,
+                        dataterm : DataTerm::L2Squared,
+                        μ_hat : MU_TRUE_1D_BASIC.into(),
+                        sensor : BallIndicator { r : SensorWidth1D, exponent : Linfinity },
+                        spread : base_spread,
+                        kernel : base_spread,
+                        kernel_plot_width,
+                        noise_seed,
+                        default_merge_radius,
+                        algorithm_overrides: HashMap::from([
+                            higher_cpos_merging_steptune(DefaultAlgorithm::RadonForwardPDPS),
+                            higher_cpos_merging_steptune(DefaultAlgorithm::RadonSlidingPDPS),
+                        ]),
+                    },
+                }})
+            },
+            Experiment2D_TV_Fast => {
+                let base_spread = HatConv { radius : Hat1 };
+                Box::new(Named { name, data : ExperimentBiased {
+                    λ : 0.005,
+                    bias : MappingSum::new([
+                        Weighted::new(1.0, BallCharacteristic{ center : [0.3, 0.3].into(), radius : 0.2 }),
+                        Weighted::new(0.5, BallCharacteristic{ center : [0.6, 0.6].into(), radius : 0.3 }),
+                    ]),
+                    base : ExperimentV2 {
+                        domain : [[0.0, 1.0]; 2].into(),
+                        sensor_count : [N_SENSORS_2D; 2],
+                        regularisation : Regularisation::NonnegRadon(cli.alpha.unwrap_or(0.06)),
+                        noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.15))?, //0.25
+                        dataterm : DataTerm::L2Squared,
+                        μ_hat : MU_TRUE_2D_BASIC.into(),
+                        sensor : BallIndicator { r : SensorWidth2D, exponent : Linfinity },
+                        spread : base_spread,
+                        kernel : base_spread,
+                        kernel_plot_width,
+                        noise_seed,
+                        default_merge_radius,
+                        algorithm_overrides: HashMap::from([
+                            much_higher_cpos_merging_steptune(DefaultAlgorithm::RadonForwardPDPS),
+                            much_higher_cpos_merging_steptune(DefaultAlgorithm::RadonSlidingPDPS),
+                        ]),
+                    },
                 }})
             },
         })
--- a/src/fb.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/fb.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -6,10 +6,7 @@
  * Valkonen T. - _Proximal methods for point source localisation_,
    [arXiv:2212.02991](https://arxiv.org/abs/2212.02991).
 
-The main routine is [`pointsource_fb_reg`]. It is based on [`generic_pointsource_fb_reg`], which is
-also used by our [primal-dual proximal splitting][crate::pdps] implementation.
-
-FISTA-type inertia can also be enabled through [`FBConfig::meta`].
+The main routine is [`pointsource_fb_reg`].
 
 ## Problem
 
@@ -76,650 +73,94 @@
 $$
 </p>
 
-We solve this with either SSN or FB via [`quadratic_nonneg`] as determined by
-[`InnerSettings`] in [`FBGenericConfig::inner`].
+We solve this with either SSN or FB as determined by
+[`crate::subproblem::InnerSettings`] in [`FBGenericConfig::inner`].
 */
 
+use colored::Colorize;
 use numeric_literals::replace_float_literals;
-use serde::{Serialize, Deserialize};
-use colored::Colorize;
-use nalgebra::{DVector, DMatrix};
+use serde::{Deserialize, Serialize};
 
-use alg_tools::iterate::{
-    AlgIteratorFactory,
-    AlgIteratorState,
-};
 use alg_tools::euclidean::Euclidean;
-use alg_tools::linops::Apply;
-use alg_tools::sets::Cube;
-use alg_tools::loc::Loc;
-use alg_tools::mapping::Mapping;
-use alg_tools::bisection_tree::{
-    BTFN,
-    PreBTFN,
-    Bounds,
-    BTNodeLookup,
-    BTNode,
-    BTSearch,
-    P2Minimise,
-    SupportGenerator,
-    LocalAnalysis,
-    Bounded,
-};
+use alg_tools::instance::Instance;
+use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::linops::{Mapping, GEMV};
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 
+use crate::dataterm::{calculate_residual, DataTerm, L2Squared};
+use crate::forward_model::{AdjointProductBoundedBy, ForwardModel};
+use crate::measures::merging::SpikeMerging;
+use crate::measures::{DiscreteMeasure, RNDM};
+use crate::plot::{PlotLookup, Plotting, SeqPlotter};
+pub use crate::prox_penalty::{FBGenericConfig, ProxPenalty};
+use crate::regularisation::RegTerm;
 use crate::types::*;
-use crate::measures::{
-    DiscreteMeasure,
-    DeltaMeasure,
-};
-use crate::measures::merging::{
-    SpikeMergingMethod,
-    SpikeMerging,
-};
-use crate::forward_model::ForwardModel;
-use crate::seminorms::{
-    DiscreteMeasureOp, Lipschitz
-};
-use crate::subproblem::{
-    nonneg::quadratic_nonneg,
-    unconstrained::quadratic_unconstrained,
-    InnerSettings,
-    InnerMethod,
-};
-use crate::tolerance::Tolerance;
-use crate::plot::{
-    SeqPlotter,
-    Plotting,
-    PlotLookup
-};
-use crate::regularisation::{
-    NonnegRadonRegTerm,
-    RadonRegTerm,
-};
-
-/// Method for constructing $μ$ on each iteration
-#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
-#[allow(dead_code)]
-pub enum InsertionStyle {
-    /// Resuse previous $μ$ from previous iteration, optimising weights
-    /// before inserting new spikes.
-    Reuse,
-    /// Start each iteration with $μ=0$.
-    Zero,
-}
-
-/// Meta-algorithm type
-#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
-#[allow(dead_code)]
-pub enum FBMetaAlgorithm {
-    /// No meta-algorithm
-    None,
-    /// FISTA-style inertia
-    InertiaFISTA,
-}
 
 /// Settings for [`pointsource_fb_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
-pub struct FBConfig<F : Float> {
+pub struct FBConfig<F: Float> {
     /// Step length scaling
-    pub τ0 : F,
-    /// Meta-algorithm to apply
-    pub meta : FBMetaAlgorithm,
+    pub τ0: F,
     /// Generic parameters
-    pub insertion : FBGenericConfig<F>,
-}
-
-/// Settings for the solution of the stepwise optimality condition in algorithms based on
-/// [`generic_pointsource_fb_reg`].
-#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
-#[serde(default)]
-pub struct FBGenericConfig<F : Float> {
-    /// Method for constructing $μ$ on each iteration; see [`InsertionStyle`].
-    pub insertion_style : InsertionStyle,
-    /// Tolerance for point insertion.
-    pub tolerance : Tolerance<F>,
-    /// Stop looking for predual maximum (where to isert a new point) below
-    /// `tolerance` multiplied by this factor.
-    pub insertion_cutoff_factor : F,
-    /// Settings for branch and bound refinement when looking for predual maxima
-    pub refinement : RefinementSettings<F>,
-    /// Maximum insertions within each outer iteration
-    pub max_insertions : usize,
-    /// Pair `(n, m)` for maximum insertions `m` on first `n` iterations.
-    pub bootstrap_insertions : Option<(usize, usize)>,
-    /// Inner method settings
-    pub inner : InnerSettings<F>,
-    /// Spike merging method
-    pub merging : SpikeMergingMethod<F>,
-    /// Tolerance multiplier for merges
-    pub merge_tolerance_mult : F,
-    /// Spike merging method after the last step
-    pub final_merging : SpikeMergingMethod<F>,
-    /// Iterations between merging heuristic tries
-    pub merge_every : usize,
-    /// Save $μ$ for postprocessing optimisation
-    pub postprocessing : bool
+    pub generic: FBGenericConfig<F>,
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> Default for FBConfig<F> {
+impl<F: Float> Default for FBConfig<F> {
     fn default() -> Self {
         FBConfig {
-            τ0 : 0.99,
-            meta : FBMetaAlgorithm::None,
-            insertion : Default::default()
-        }
-    }
-}
-
-#[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> Default for FBGenericConfig<F> {
-    fn default() -> Self {
-        FBGenericConfig {
-            insertion_style : InsertionStyle::Reuse,
-            tolerance : Default::default(),
-            insertion_cutoff_factor : 1.0,
-            refinement : Default::default(),
-            max_insertions : 100,
-            //bootstrap_insertions : None,
-            bootstrap_insertions : Some((10, 1)),
-            inner : InnerSettings {
-                method : InnerMethod::SSN,
-                .. Default::default()
-            },
-            merging : SpikeMergingMethod::None,
-            //merging : Default::default(),
-            final_merging : Default::default(),
-            merge_every : 10,
-            merge_tolerance_mult : 2.0,
-            postprocessing : false,
+            τ0: 0.99,
+            generic: Default::default(),
         }
     }
 }
 
-/// Trait for specialisation of [`generic_pointsource_fb_reg`] to basic FB, FISTA.
-///
-/// The idea is that the residual $Aμ - b$ in the forward step can be replaced by an arbitrary
-/// value. For example, to implement [primal-dual proximal splitting][crate::pdps] we replace it
-/// with the dual variable $y$. We can then also implement alternative data terms, as the
-/// (pre)differential of $F(μ)=F\_0(Aμ-b)$ is $F\'(μ) = A\_*F\_0\'(Aμ-b)$. In the case of the
-/// quadratic fidelity $F_0(y)=\frac{1}{2}\\|y\\|_2^2$ in a Hilbert space, of course,
-/// $F\_0\'(Aμ-b)=Aμ-b$ is the residual.
-pub trait FBSpecialisation<F : Float, Observable : Euclidean<F>, const N : usize> : Sized {
-    /// Updates the residual and does any necessary pruning of `μ`.
-    ///
-    /// Returns the new residual and possibly a new step length.
-    ///
-    /// The measure `μ` may also be modified to apply, e.g., inertia to it.
-    /// The updated residual should correspond to the residual at `μ`.
-    /// See the [trait documentation][FBSpecialisation] for the use and meaning of the residual.
-    ///
-    /// The parameter `μ_base` is the base point of the iteration, typically the previous iterate,
-    /// but for, e.g., FISTA has inertia applied to it.
-    fn update(
-        &mut self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-        μ_base : &DiscreteMeasure<Loc<F, N>, F>,
-    ) -> (Observable, Option<F>);
-
-    /// Calculates the data term value corresponding to iterate `μ` and available residual.
-    ///
-    /// Inertia and other modifications, as deemed, necessary, should be applied to `μ`.
-    ///
-    /// The blanket implementation correspondsn to the 2-norm-squared data fidelity
-    /// $\\|\text{residual}\\|\_2^2/2$.
-    fn calculate_fit(
-        &self,
-        _μ : &DiscreteMeasure<Loc<F, N>, F>,
-        residual : &Observable
-    ) -> F {
-        residual.norm2_squared_div2()
-    }
-
-    /// Calculates the data term value at $μ$.
-    ///
-    /// Unlike [`Self::calculate_fit`], no inertia, etc., should be applied to `μ`.
-    fn calculate_fit_simple(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-    ) -> F;
-
-    /// Returns the final iterate after any necessary postprocess pruning, merging, etc.
-    fn postprocess(self, mut μ : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>)
-    -> DiscreteMeasure<Loc<F, N>, F>
-    where  DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
-        μ.merge_spikes_fitness(merging,
-                               |μ̃| self.calculate_fit_simple(μ̃),
-                               |&v| v);
-        μ.prune();
-        μ
-    }
-
-    /// Returns measure to be used for value calculations, which may differ from μ.
-    fn value_μ<'c, 'b : 'c>(&'b self, μ : &'c DiscreteMeasure<Loc<F, N>, F>)
-    -> &'c DiscreteMeasure<Loc<F, N>, F> {
-        μ
-    }
-}
-
-/// Specialisation of [`generic_pointsource_fb_reg`] to basic μFB.
-struct BasicFB<
-    'a,
-    F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
-    const N : usize
-> {
-    /// The data
-    b : &'a A::Observable,
-    /// The forward operator
-    opA : &'a A,
-}
-
-/// Implementation of [`FBSpecialisation`] for basic μFB forward-backward splitting.
-#[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float + ToNalgebraRealField , A : ForwardModel<Loc<F, N>, F>, const N : usize>
-FBSpecialisation<F, A::Observable, N> for BasicFB<'a, F, A, N> {
-    fn update(
-        &mut self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-        _μ_base : &DiscreteMeasure<Loc<F, N>, F>
-    ) -> (A::Observable, Option<F>) {
-        μ.prune();
-        //*residual = self.opA.apply(μ) - self.b;
-        let mut residual = self.b.clone();
-        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
-        (residual, None)
-    }
-
-    fn calculate_fit_simple(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-    ) -> F {
-        let mut residual = self.b.clone();
-        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
-        residual.norm2_squared_div2()
-    }
-}
-
-/// Specialisation of [`generic_pointsource_fb_reg`] to FISTA.
-struct FISTA<
-    'a,
-    F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
-    const N : usize
-> {
-    /// The data
-    b : &'a A::Observable,
-    /// The forward operator
-    opA : &'a A,
-    /// Current inertial parameter
-    λ : F,
-    /// Previous iterate without inertia applied.
-    /// We need to store this here because `μ_base` passed to [`FBSpecialisation::update`] will
-    /// have inertia applied to it, so is not useful to use.
-    μ_prev : DiscreteMeasure<Loc<F, N>, F>,
-}
-
-/// Implementation of [`FBSpecialisation`] for μFISTA inertial forward-backward splitting.
-#[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float + ToNalgebraRealField, A : ForwardModel<Loc<F, N>, F>, const N : usize>
-FBSpecialisation<F, A::Observable, N> for FISTA<'a, F, A, N> {
-    fn update(
-        &mut self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-        _μ_base : &DiscreteMeasure<Loc<F, N>, F>
-    ) -> (A::Observable, Option<F>) {
-        // Update inertial parameters
-        let λ_prev = self.λ;
-        self.λ = 2.0 * λ_prev / ( λ_prev + (4.0 + λ_prev * λ_prev).sqrt() );
-        let θ = self.λ / λ_prev - self.λ;
-        // Perform inertial update on μ.
-        // This computes μ ← (1 + θ) * μ - θ * μ_prev, pruning spikes where both μ
-        // and μ_prev have zero weight. Since both have weights from the finite-dimensional
-        // subproblem with a proximal projection step, this is likely to happen when the
-        // spike is not needed. A copy of the pruned μ without artithmetic performed is
-        // stored in μ_prev.
-        μ.pruning_sub(1.0 + θ, θ, &mut self.μ_prev);
-
-        //*residual = self.opA.apply(μ) - self.b;
-        let mut residual = self.b.clone();
-        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
-        (residual, None)
-    }
-
-    fn calculate_fit_simple(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-    ) -> F {
-        let mut residual = self.b.clone();
-        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
-        residual.norm2_squared_div2()
-    }
-
-    fn calculate_fit(
-        &self,
-        _μ : &DiscreteMeasure<Loc<F, N>, F>,
-        _residual : &A::Observable
-    ) -> F {
-        self.calculate_fit_simple(&self.μ_prev)
-    }
-
-    // For FISTA we need to do a final pruning as well, due to the limited
-    // pruning that can be done on each step.
-    fn postprocess(mut self, μ_base : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>)
-    -> DiscreteMeasure<Loc<F, N>, F>
-    where  DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
-        let mut μ = self.μ_prev;
-        self.μ_prev = μ_base;
-        μ.merge_spikes_fitness(merging,
-                               |μ̃| self.calculate_fit_simple(μ̃),
-                               |&v| v);
-        μ.prune();
-        μ
-    }
-
-    fn value_μ<'c, 'b : 'c>(&'c self, _μ : &'c DiscreteMeasure<Loc<F, N>, F>)
-    -> &'c DiscreteMeasure<Loc<F, N>, F> {
-        &self.μ_prev
-    }
-}
-
-
-/// Abstraction of regularisation terms for [`generic_pointsource_fb_reg`].
-pub trait RegTerm<F : Float + ToNalgebraRealField, const N : usize>
-: for<'a> Apply<&'a DiscreteMeasure<Loc<F, N>, F>, Output = F> {
-    /// Approximately solve the problem
-    /// <div>$$
-    ///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + τ G(x)
-    /// $$</div>
-    /// for $G$ depending on the trait implementation.
-    ///
-    /// The parameter `mA` is $A$. An estimate for its opeator norm should be provided in
-    /// `mA_normest`. The initial iterate and output is `x`. The current main tolerance is `ε`.
-    ///
-    /// Returns the number of iterations taken.
-    fn solve_findim(
-        &self,
-        mA : &DMatrix<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        mA_normest : F,
-        ε : F,
-        config : &FBGenericConfig<F>
-    ) -> usize;
-
-    /// Find a point where `d` may violate the tolerance `ε`.
-    ///
-    /// If `skip_by_rough_check` is set, do not find the point if a rough check indicates that we
-    /// are in bounds. `ε` is the current main tolerance and `τ` a scaling factor for the
-    /// regulariser.
-    ///
-    /// Returns `None` if `d` is in bounds either based on the rough check, or a more precise check
-    /// terminating early. Otherwise returns a possibly violating point, the value of `d` there,
-    /// and a boolean indicating whether the found point is in bounds.
-    fn find_tolerance_violation<G, BT>(
-        &self,
-        d : &mut BTFN<F, G, BT, N>,
-        τ : F,
-        ε : F,
-        skip_by_rough_check : bool,
-        config : &FBGenericConfig<F>,
-    ) -> Option<(Loc<F, N>, F, bool)>
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N>;
-
-    /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
-    ///
-    /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
-    fn verify_merge_candidate<G, BT>(
-        &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-    ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N>;
-
-    fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>>;
-
-    /// Returns a scaling factor for the tolerance sequence.
-    ///
-    /// Typically this is the regularisation parameter.
-    fn tolerance_scaling(&self) -> F;
+pub(crate) fn prune_with_stats<F: Float, const N: usize>(μ: &mut RNDM<F, N>) -> usize {
+    let n_before_prune = μ.len();
+    μ.prune();
+    debug_assert!(μ.len() <= n_before_prune);
+    n_before_prune - μ.len()
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float + ToNalgebraRealField, const N : usize> RegTerm<F, N> for NonnegRadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F> {
-    fn solve_findim(
-        &self,
-        mA : &DMatrix<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        mA_normest : F,
-        ε : F,
-        config : &FBGenericConfig<F>
-    ) -> usize {
-        let inner_tolerance = ε * config.inner.tolerance_mult;
-        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        let inner_τ = config.inner.τ0 / mA_normest;
-        quadratic_nonneg(config.inner.method, mA, g, τ * self.α(), x,
-                         inner_τ, inner_it)
-    }
-
-    #[inline]
-    fn find_tolerance_violation<G, BT>(
-        &self,
-        d : &mut BTFN<F, G, BT, N>,
-        τ : F,
-        ε : F,
-        skip_by_rough_check : bool,
-        config : &FBGenericConfig<F>,
-    ) -> Option<(Loc<F, N>, F, bool)>
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
-        let τα = τ * self.α();
-        let keep_below = τα + ε;
-        let maximise_above = τα + ε * config.insertion_cutoff_factor;
-        let refinement_tolerance = ε * config.refinement.tolerance_mult;
-
-        // If preliminary check indicates that we are in bonds, and if it otherwise matches
-        // the insertion strategy, skip insertion.
-        if skip_by_rough_check && d.bounds().upper() <= keep_below {
-            None
-        } else {
-            // If the rough check didn't indicate no insertion needed, find maximising point.
-            d.maximise_above(maximise_above, refinement_tolerance, config.refinement.max_steps)
-             .map(|(ξ, v_ξ)| (ξ, v_ξ, v_ξ <= keep_below))
-        }
-    }
-
-    fn verify_merge_candidate<G, BT>(
-        &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-    ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
-        let τα = τ * self.α();
-        let refinement_tolerance = ε * config.refinement.tolerance_mult;
-        let merge_tolerance = config.merge_tolerance_mult * ε;
-        let keep_below = τα + merge_tolerance;
-        let keep_supp_above = τα - merge_tolerance;
-        let bnd = d.bounds();
-
-        return (
-            bnd.lower() >= keep_supp_above
-            ||
-            μ.iter_spikes().map(|&DeltaMeasure{ α : β, ref x }| {
-                (β == 0.0) || d.apply(x) >= keep_supp_above
-            }).all(std::convert::identity)
-         ) && (
-            bnd.upper() <= keep_below
-            ||
-            d.has_upper_bound(keep_below, refinement_tolerance, config.refinement.max_steps)
-        )
-    }
-
-    fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>> {
-        let τα = τ * self.α();
-        Some(Bounds(τα - ε,  τα + ε))
-    }
-
-    fn tolerance_scaling(&self) -> F {
-        self.α()
-    }
+pub(crate) fn postprocess<
+    F: Float,
+    V: Euclidean<F> + Clone,
+    A: GEMV<F, RNDM<F, N>, Codomain = V>,
+    D: DataTerm<F, V, N>,
+    const N: usize,
+>(
+    mut μ: RNDM<F, N>,
+    config: &FBGenericConfig<F>,
+    dataterm: D,
+    opA: &A,
+    b: &V,
+) -> RNDM<F, N>
+where
+    RNDM<F, N>: SpikeMerging<F>,
+    for<'a> &'a RNDM<F, N>: Instance<RNDM<F, N>>,
+{
+    μ.merge_spikes_fitness(
+        config.final_merging_method(),
+        |μ̃| dataterm.calculate_fit_op(μ̃, opA, b),
+        |&v| v,
+    );
+    μ.prune();
+    μ
 }
 
-#[replace_float_literals(F::cast_from(literal))]
-impl<F : Float + ToNalgebraRealField, const N : usize> RegTerm<F, N> for RadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F> {
-    fn solve_findim(
-        &self,
-        mA : &DMatrix<F::MixedType>,
-        g : &DVector<F::MixedType>,
-        τ : F,
-        x : &mut DVector<F::MixedType>,
-        mA_normest: F,
-        ε : F,
-        config : &FBGenericConfig<F>
-    ) -> usize {
-        let inner_tolerance = ε * config.inner.tolerance_mult;
-        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        let inner_τ = config.inner.τ0 / mA_normest;
-        quadratic_unconstrained(config.inner.method, mA, g, τ * self.α(), x,
-                                inner_τ, inner_it)
-    }
-
-   fn find_tolerance_violation<G, BT>(
-        &self,
-        d : &mut BTFN<F, G, BT, N>,
-        τ : F,
-        ε : F,
-        skip_by_rough_check : bool,
-        config : &FBGenericConfig<F>,
-    ) -> Option<(Loc<F, N>, F, bool)>
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
-        let τα = τ * self.α();
-        let keep_below = τα + ε;
-        let keep_above = -τα - ε;
-        let maximise_above = τα + ε * config.insertion_cutoff_factor;
-        let minimise_below = -τα - ε * config.insertion_cutoff_factor;
-        let refinement_tolerance = ε * config.refinement.tolerance_mult;
-
-        // If preliminary check indicates that we are in bonds, and if it otherwise matches
-        // the insertion strategy, skip insertion.
-        if skip_by_rough_check && Bounds(keep_above, keep_below).superset(&d.bounds()) {
-            None
-        } else {
-            // If the rough check didn't indicate no insertion needed, find maximising point.
-            let mx = d.maximise_above(maximise_above, refinement_tolerance,
-                                      config.refinement.max_steps);
-            let mi = d.minimise_below(minimise_below, refinement_tolerance,
-                                      config.refinement.max_steps);
-
-            match (mx, mi) {
-                (None, None) => None,
-                (Some((ξ, v_ξ)), None) => Some((ξ, v_ξ, keep_below >= v_ξ)),
-                (None, Some((ζ, v_ζ))) => Some((ζ, v_ζ, keep_above <= v_ζ)),
-                (Some((ξ, v_ξ)), Some((ζ, v_ζ))) => {
-                    if v_ξ - τα > τα - v_ζ {
-                        Some((ξ, v_ξ, keep_below >= v_ξ))
-                    } else {
-                        Some((ζ, v_ζ, keep_above <= v_ζ))
-                    }
-                }
-            }
-        }
-    }
-
-    fn verify_merge_candidate<G, BT>(
-        &self,
-        d : &mut BTFN<F, G, BT, N>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-        τ : F,
-        ε : F,
-        config : &FBGenericConfig<F>,
-    ) -> bool
-    where BT : BTSearch<F, N, Agg=Bounds<F>>,
-          G : SupportGenerator<F, N, Id=BT::Data>,
-          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
-                           + LocalAnalysis<F, Bounds<F>, N> {
-        let τα = τ * self.α();
-        let refinement_tolerance = ε * config.refinement.tolerance_mult;
-        let merge_tolerance = config.merge_tolerance_mult * ε;
-        let keep_below = τα + merge_tolerance;
-        let keep_above = -τα - merge_tolerance;
-        let keep_supp_pos_above = τα - merge_tolerance;
-        let keep_supp_neg_below = -τα + merge_tolerance;
-        let bnd = d.bounds();
-
-        return (
-            (bnd.lower() >= keep_supp_pos_above && bnd.upper() <= keep_supp_neg_below)
-            ||
-            μ.iter_spikes().map(|&DeltaMeasure{ α : β, ref x }| {
-                use std::cmp::Ordering::*;
-                match β.partial_cmp(&0.0) {
-                    Some(Greater) => d.apply(x) >= keep_supp_pos_above,
-                    Some(Less) => d.apply(x) <= keep_supp_neg_below,
-                    _ => true,
-                }
-            }).all(std::convert::identity)
-        ) && (
-            bnd.upper() <= keep_below
-            ||
-            d.has_upper_bound(keep_below, refinement_tolerance,
-                              config.refinement.max_steps)
-        ) && (
-            bnd.lower() >= keep_above
-            ||
-            d.has_lower_bound(keep_above, refinement_tolerance,
-                              config.refinement.max_steps)
-        )
-    }
-
-    fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>> {
-        let τα = τ * self.α();
-        Some(Bounds(-τα - ε,  τα + ε))
-    }
-
-    fn tolerance_scaling(&self) -> F {
-        self.α()
-    }
-}
-
-
-/// Generic implementation of [`pointsource_fb_reg`].
+/// Iteratively solve the pointsource localisation problem using forward-backward splitting.
 ///
-/// The method can be specialised to even primal-dual proximal splitting through the
-/// [`FBSpecialisation`] parameter `specialisation`.
-/// The settings in `config` have their [respective documentation](FBGenericConfig). `opA` is the
+/// The settings in `config` have their [respective documentation](FBConfig). `opA` is the
 /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
 /// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution
 /// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control
 /// as documented in [`alg_tools::iterate`].
 ///
+/// For details on the mathematical formulation, see the [module level](self) documentation.
+///
 /// The implementation relies on [`alg_tools::bisection_tree::BTFN`] presentations of
 /// sums of simple functions usign bisection trees, and the related
 /// [`alg_tools::bisection_tree::Aggregator`]s, to efficiently search for component functions
@@ -729,233 +170,103 @@
 ///
 /// Returns the final iterate.
 #[replace_float_literals(F::cast_from(literal))]
-pub fn generic_pointsource_fb_reg<
-    'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, Spec, Reg, const N : usize
->(
-    opA : &'a A,
-    reg : Reg,
-    op𝒟 : &'a 𝒟,
-    mut τ : F,
-    config : &FBGenericConfig<F>,
-    iterator : I,
-    mut plotter : SeqPlotter<F, N>,
-    mut residual : A::Observable,
-    mut specialisation : Spec
-) -> DiscreteMeasure<Loc<F, N>, F>
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<IterInfo<F, N>>,
-      Spec : FBSpecialisation<F, A::Observable, N>,
-      A::Observable : std::ops::MulAssign<F>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<𝒟, FloatType=F>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      Reg : RegTerm<F, N> {
-
+pub fn pointsource_fb_reg<F, I, A, Reg, P, const N: usize>(
+    opA: &A,
+    b: &A::Observable,
+    reg: Reg,
+    prox_penalty: &P,
+    fbconfig: &FBConfig<F>,
+    iterator: I,
+    mut plotter: SeqPlotter<F, N>,
+) -> RNDM<F, N>
+where
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<IterInfo<F, N>>,
+    for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable>,
+    A: ForwardModel<RNDM<F, N>, F> + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType = F>,
+    A::PreadjointCodomain: RealMapping<F, N>,
+    PlotLookup: Plotting<N>,
+    RNDM<F, N>: SpikeMerging<F>,
+    Reg: RegTerm<F, N>,
+    P: ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
+{
     // Set up parameters
-    let quiet = iterator.is_quiet();
-    let op𝒟norm = op𝒟.opnorm_bound();
+    let config = &fbconfig.generic;
+    let τ = fbconfig.τ0 / opA.adjoint_product_bound(prox_penalty).unwrap();
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
     let tolerance = config.tolerance * τ * reg.tolerance_scaling();
     let mut ε = tolerance.initial();
 
-    // Initialise operators
-    let preadjA = opA.preadjoint();
-
     // Initialise iterates
     let mut μ = DiscreteMeasure::new();
-
-    let mut inner_iters = 0;
-    let mut this_iters = 0;
-    let mut pruned = 0;
-    let mut merged = 0;
+    let mut residual = -b;
 
-    let μ_diff = |μ_new : &DiscreteMeasure<Loc<F, N>, F>,
-                  μ_base : &DiscreteMeasure<Loc<F, N>, F>| {
-        let mut ν : DiscreteMeasure<Loc<F, N>, F> = match config.insertion_style {
-            InsertionStyle::Reuse => {
-                μ_new.iter_spikes()
-                        .zip(μ_base.iter_masses().chain(std::iter::repeat(0.0)))
-                        .map(|(δ, α_base)| (δ.x, α_base - δ.α))
-                        .collect()
-            },
-            InsertionStyle::Zero => {
-                μ_new.iter_spikes()
-                        .map(|δ| -δ)
-                        .chain(μ_base.iter_spikes().copied())
-                        .collect()
-            }
-        };
-        ν.prune(); // Potential small performance improvement
-        ν
+    // Statistics
+    let full_stats = |residual: &A::Observable, μ: &RNDM<F, N>, ε, stats| IterInfo {
+        value: residual.norm2_squared_div2() + reg.apply(μ),
+        n_spikes: μ.len(),
+        ε,
+        //postprocessing: config.postprocessing.then(|| μ.clone()),
+        ..stats
     };
+    let mut stats = IterInfo::new();
 
     // Run the algorithm
-    iterator.iterate(|state| {
-        // Maximum insertion count and measure difference calculation depend on insertion style.
-        let (m, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) {
-            (i, Some((l, k))) if i <= l => (k, false),
-            _ => (config.max_insertions, !quiet),
-        };
-        let max_insertions = match config.insertion_style {
-            InsertionStyle::Zero => {
-                todo!("InsertionStyle::Zero does not currently work with FISTA, so diabled.");
-                // let n = μ.len();
-                // μ = DiscreteMeasure::new();
-                // n + m
-            },
-            InsertionStyle::Reuse => m,
-        };
-
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) {
         // Calculate smooth part of surrogate model.
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        residual *= -τ;
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let minus_τv = preadjA.apply(r);     // minus_τv = -τA^*(Aμ^k-b)
-        // TODO: should avoid a second copy of μ here; μ_base already stores a copy.
-        let ω0 = op𝒟.apply(μ.clone());       // 𝒟μ^k
-        //let g = &minus_τv + ω0;            // Linear term of surrogate model
+        let mut τv = opA.preadjoint().apply(residual * τ);
 
         // Save current base point
         let μ_base = μ.clone();
-            
-        // Add points to support until within error tolerance or maximum insertion count reached.
-        let mut count = 0;
-        let (within_tolerances, d) = 'insertion: loop {
-            if μ.len() > 0 {
-                // Form finite-dimensional subproblem. The subproblem references to the original μ^k
-                // from the beginning of the iteration are all contained in the immutable c and g.
-                let à = op𝒟.findim_matrix(μ.iter_locations());
-                let g̃ = DVector::from_iterator(μ.len(),
-                                               μ.iter_locations()
-                                                .map(|ζ| minus_τv.apply(ζ) + ω0.apply(ζ))
-                                                .map(F::to_nalgebra_mixed));
-                let mut x = μ.masses_dvector();
 
-                // The gradient of the forward component of the inner objective is C^*𝒟Cx - g̃.
-                // We have |C^*𝒟Cx|_2 = sup_{|z|_2 ≤ 1} ⟨z, C^*𝒟Cx⟩ = sup_{|z|_2 ≤ 1} ⟨Cz|𝒟Cx⟩
-                // ≤ sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟Cx|_∞ ≤  sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟| |Cx|_ℳ
-                // ≤ sup_{|z|_2 ≤ 1} |z|_1 |𝒟| |x|_1 ≤ sup_{|z|_2 ≤ 1} n |z|_2 |𝒟| |x|_2
-                // = n |𝒟| |x|_2, where n is the number of points. Therefore
-                let Ã_normest = op𝒟norm * F::cast_from(μ.len());
-
-                // Solve finite-dimensional subproblem.
-                inner_iters += reg.solve_findim(&Ã, &g̃, τ, &mut x, Ã_normest, ε, config);
-
-                // Update masses of μ based on solution of finite-dimensional subproblem.
-                μ.set_masses_dvector(&x);
-            }
-
-            // Form d = ω0 - τv - 𝒟μ = -𝒟(μ - μ^k) - τv for checking the proximate optimality
-            // conditions in the predual space, and finding new points for insertion, if necessary.
-            let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ, &μ_base));
+        // Insert and reweigh
+        let (maybe_d, _within_tolerances) = prox_penalty.insert_and_reweigh(
+            &mut μ, &mut τv, &μ_base, None, τ, ε, config, &reg, &state, &mut stats,
+        );
 
-            // If no merging heuristic is used, let's be more conservative about spike insertion,
-            // and skip it after first round. If merging is done, being more greedy about spike
-            // insertion also seems to improve performance.
-            let skip_by_rough_check = if let SpikeMergingMethod::None = config.merging {
-                false
-            } else {
-                count > 0
-            };
-
-            // Find a spike to insert, if needed
-            let (ξ, _v_ξ, in_bounds) =  match reg.find_tolerance_violation(
-                &mut d, τ, ε, skip_by_rough_check, config
-            ) {
-                None => break 'insertion (true, d),
-                Some(res) => res,
-            };
-
-            // Break if maximum insertion count reached
-            if count >= max_insertions {
-                break 'insertion (in_bounds, d)
-            }
-
-            // No point in optimising the weight here; the finite-dimensional algorithm is fast.
-            μ += DeltaMeasure { x : ξ, α : 0.0 };
-            count += 1;
-        };
-
-        if !within_tolerances && warn_insertions {
-            // Complain (but continue) if we failed to get within tolerances
-            // by inserting more points.
-            let err = format!("Maximum insertions reached without achieving \
-                                subproblem solution tolerance");
-            println!("{}", err.red());
+        // Prune and possibly merge spikes
+        if config.merge_now(&state) {
+            stats.merged += prox_penalty.merge_spikes(
+                &mut μ,
+                &mut τv,
+                &μ_base,
+                None,
+                τ,
+                ε,
+                config,
+                &reg,
+                Some(|μ̃: &RNDM<F, N>| L2Squared.calculate_fit_op(μ̃, opA, b)),
+            );
         }
 
-        // Merge spikes
-        if state.iteration() % config.merge_every == 0 {
-            let n_before_merge = μ.len();
-            μ.merge_spikes(config.merging, |μ_candidate| {
-                let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ_candidate, &μ_base));
+        stats.pruned += prune_with_stats(&mut μ);
+
+        // Update residual
+        residual = calculate_residual(&μ, opA, b);
+
+        let iter = state.iteration();
+        stats.this_iters += 1;
 
-                reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, &config)
-                   .then_some(())
-            });
-            debug_assert!(μ.len() >= n_before_merge);
-            merged += μ.len() - n_before_merge;
-        }
-
-        let n_before_prune = μ.len();
-        (residual, τ) = match specialisation.update(&mut μ, &μ_base) {
-            (r, None) => (r, τ),
-            (r, Some(new_τ)) => (r, new_τ)
-        };
-        debug_assert!(μ.len() <= n_before_prune);
-        pruned += n_before_prune - μ.len();
-
-        this_iters += 1;
+        // Give statistics if needed
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv), &μ);
+            full_stats(
+                &residual,
+                &μ,
+                ε,
+                std::mem::replace(&mut stats, IterInfo::new()),
+            )
+        });
 
         // Update main tolerance for next iteration
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
+        ε = tolerance.update(ε, iter);
+    }
 
-        // Give function value if needed
-        state.if_verbose(|| {
-            let value_μ = specialisation.value_μ(&μ);
-            // Plot if so requested
-            plotter.plot_spikes(
-                format!("iter {} end; {}", state.iteration(), within_tolerances), &d,
-                "start".to_string(), Some(&minus_τv),
-                reg.target_bounds(τ, ε_prev), value_μ,
-            );
-            // Calculate mean inner iterations and reset relevant counters.
-            // Return the statistics
-            let res = IterInfo {
-                value : specialisation.calculate_fit(&μ, &residual) + reg.apply(value_μ),
-                n_spikes : value_μ.len(),
-                inner_iters,
-                this_iters,
-                merged,
-                pruned,
-                ε : ε_prev,
-                postprocessing: config.postprocessing.then(|| value_μ.clone()),
-            };
-            inner_iters = 0;
-            this_iters = 0;
-            merged = 0;
-            pruned = 0;
-            res
-        })
-    });
-
-    specialisation.postprocess(μ, config.final_merging)
+    postprocess(μ, config, L2Squared, opA, b)
 }
 
-/// Iteratively solve the pointsource localisation problem using forward-backward splitting
+/// Iteratively solve the pointsource localisation problem using inertial forward-backward splitting.
 ///
 /// The settings in `config` have their [respective documentation](FBConfig). `opA` is the
 /// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
@@ -965,118 +276,114 @@
 ///
 /// For details on the mathematical formulation, see the [module level](self) documentation.
 ///
+/// The implementation relies on [`alg_tools::bisection_tree::BTFN`] presentations of
+/// sums of simple functions usign bisection trees, and the related
+/// [`alg_tools::bisection_tree::Aggregator`]s, to efficiently search for component functions
+/// active at a specific points, and to maximise their sums. Through the implementation of the
+/// [`alg_tools::bisection_tree::BT`] bisection trees, it also relies on the copy-on-write features
+/// of [`std::sync::Arc`] to only update relevant parts of the bisection tree when adding functions.
+///
 /// Returns the final iterate.
 #[replace_float_literals(F::cast_from(literal))]
-pub fn pointsource_fb_reg<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, Reg, const N : usize>(
-    opA : &'a A,
-    b : &A::Observable,
-    reg : Reg,
-    op𝒟 : &'a 𝒟,
-    config : &FBConfig<F>,
-    iterator : I,
-    plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<IterInfo<F, N>>,
-      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<𝒟, FloatType=F>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      Reg : RegTerm<F, N> {
+pub fn pointsource_fista_reg<F, I, A, Reg, P, const N: usize>(
+    opA: &A,
+    b: &A::Observable,
+    reg: Reg,
+    prox_penalty: &P,
+    fbconfig: &FBConfig<F>,
+    iterator: I,
+    mut plotter: SeqPlotter<F, N>,
+) -> RNDM<F, N>
+where
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<IterInfo<F, N>>,
+    for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable>,
+    A: ForwardModel<RNDM<F, N>, F> + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType = F>,
+    A::PreadjointCodomain: RealMapping<F, N>,
+    PlotLookup: Plotting<N>,
+    RNDM<F, N>: SpikeMerging<F>,
+    Reg: RegTerm<F, N>,
+    P: ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
+{
+    // Set up parameters
+    let config = &fbconfig.generic;
+    let τ = fbconfig.τ0 / opA.adjoint_product_bound(prox_penalty).unwrap();
+    let mut λ = 1.0;
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
 
-    let initial_residual = -b;
-    let τ = config.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut μ_prev = DiscreteMeasure::new();
+    let mut residual = -b;
+    let mut warned_merging = false;
 
-    match config.meta {
-        FBMetaAlgorithm::None => generic_pointsource_fb_reg(
-            opA, reg, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual,
-            BasicFB{ b, opA },
-        ),
-        FBMetaAlgorithm::InertiaFISTA => generic_pointsource_fb_reg(
-            opA, reg, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual,
-            FISTA{ b, opA, λ : 1.0, μ_prev : DiscreteMeasure::new() },
-        ),
-    }
-}
+    // Statistics
+    let full_stats = |ν: &RNDM<F, N>, ε, stats| IterInfo {
+        value: L2Squared.calculate_fit_op(ν, opA, b) + reg.apply(ν),
+        n_spikes: ν.len(),
+        ε,
+        // postprocessing: config.postprocessing.then(|| ν.clone()),
+        ..stats
+    };
+    let mut stats = IterInfo::new();
 
-//
-// Deprecated interfaces
-//
+    // Run the algorithm
+    for state in iterator.iter_init(|| full_stats(&μ, ε, stats.clone())) {
+        // Calculate smooth part of surrogate model.
+        let mut τv = opA.preadjoint().apply(residual * τ);
 
-#[deprecated(note = "Use `pointsource_fb_reg`")]
-pub fn pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, const N : usize>(
-    opA : &'a A,
-    b : &A::Observable,
-    α : F,
-    op𝒟 : &'a 𝒟,
-    config : &FBConfig<F>,
-    iterator : I,
-    plotter : SeqPlotter<F, N>
-) -> DiscreteMeasure<Loc<F, N>, F>
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<IterInfo<F, N>>,
-      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-      A::Observable : std::ops::MulAssign<F>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<𝒟, FloatType=F>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+        // Save current base point
+        let μ_base = μ.clone();
+
+        // Insert new spikes and reweigh
+        let (maybe_d, _within_tolerances) = prox_penalty.insert_and_reweigh(
+            &mut μ, &mut τv, &μ_base, None, τ, ε, config, &reg, &state, &mut stats,
+        );
 
-    pointsource_fb_reg(opA, b, NonnegRadonRegTerm(α), op𝒟, config, iterator, plotter)
-}
+        // (Do not) merge spikes.
+        if config.merge_now(&state) && !warned_merging {
+            let err = format!("Merging not supported for μFISTA");
+            println!("{}", err.red());
+            warned_merging = true;
+        }
 
+        // Update inertial prameters
+        let λ_prev = λ;
+        λ = 2.0 * λ_prev / (λ_prev + (4.0 + λ_prev * λ_prev).sqrt());
+        let θ = λ / λ_prev - λ;
 
-#[deprecated(note = "Use `generic_pointsource_fb_reg`")]
-pub fn generic_pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, Spec, const N : usize>(
-    opA : &'a A,
-    α : F,
-    op𝒟 : &'a 𝒟,
-    τ : F,
-    config : &FBGenericConfig<F>,
-    iterator : I,
-    plotter : SeqPlotter<F, N>,
-    residual : A::Observable,
-    specialisation : Spec,
-) -> DiscreteMeasure<Loc<F, N>, F>
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<IterInfo<F, N>>,
-      Spec : FBSpecialisation<F, A::Observable, N>,
-      A::Observable : std::ops::MulAssign<F>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<𝒟, FloatType=F>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+        // Perform inertial update on μ.
+        // This computes μ ← (1 + θ) * μ - θ * μ_prev, pruning spikes where both μ
+        // and μ_prev have zero weight. Since both have weights from the finite-dimensional
+        // subproblem with a proximal projection step, this is likely to happen when the
+        // spike is not needed. A copy of the pruned μ without artithmetic performed is
+        // stored in μ_prev.
+        let n_before_prune = μ.len();
+        μ.pruning_sub(1.0 + θ, θ, &mut μ_prev);
+        //let μ_new = (&μ * (1.0 + θ)).sub_matching(&(&μ_prev * θ));
+        // μ_prev = μ;
+        // μ = μ_new;
+        debug_assert!(μ.len() <= n_before_prune);
+        stats.pruned += n_before_prune - μ.len();
 
-      generic_pointsource_fb_reg(opA, NonnegRadonRegTerm(α), op𝒟, τ, config, iterator, plotter,
-                                 residual, specialisation)
+        // Update residual
+        residual = calculate_residual(&μ, opA, b);
+
+        let iter = state.iteration();
+        stats.this_iters += 1;
+
+        // Give statistics if needed
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv), &μ_prev);
+            full_stats(&μ_prev, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
+
+    postprocess(μ_prev, config, L2Squared, opA, b)
 }
--- a/src/forward_model.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/forward_model.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -2,677 +2,82 @@
 Forward models from discrete measures to observations.
 */
 
-use numeric_literals::replace_float_literals;
-use nalgebra::base::{
-    DMatrix,
-    DVector
-};
-use std::iter::Zip;
-use std::ops::RangeFrom;
-use std::marker::PhantomData;
-
-pub use alg_tools::linops::*;
+use alg_tools::error::DynError;
 use alg_tools::euclidean::Euclidean;
-use alg_tools::norms::{
-    L1, Linfinity, Norm
-};
-use alg_tools::bisection_tree::*;
-use alg_tools::mapping::RealMapping;
-use alg_tools::lingrid::*;
-use alg_tools::iter::{MapX, Mappable};
-use alg_tools::nalgebra_support::ToNalgebraRealField;
-use alg_tools::tabledump::write_csv;
-use alg_tools::error::DynError;
+use alg_tools::instance::Instance;
+pub use alg_tools::linops::*;
+use alg_tools::norms::{Norm, NormExponent, L2};
 
+use crate::measures::Radon;
 use crate::types::*;
-use crate::measures::*;
-use crate::seminorms::{
-    Lipschitz,
-    ConvolutionOp,
-    SimpleConvolutionKernel,
-};
-use crate::kernels::{
-    Convolution,
-    AutoConvolution,
-    BoundedBy,
-};
-
-pub type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F,N>, F>;
+pub mod bias;
+pub mod sensor_grid;
 
 /// `ForwardeModel`s are bounded preadjointable linear operators  $A ∈ 𝕃(𝒵(Ω); E)$
 /// where $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by
-/// [`DiscreteMeasure`], and $E$ is a [`Euclidean`] space.
-pub trait ForwardModel<Domain, F : Float + ToNalgebraRealField>
-: BoundedLinear<DiscreteMeasure<Domain, F>, Codomain=Self::Observable, FloatType=F>
-+ GEMV<F, DiscreteMeasure<Domain, F>, Self::Observable>
-+ Linear<DeltaMeasure<Domain, F>, Codomain=Self::Observable>
-+ Preadjointable<DiscreteMeasure<Domain, F>, Self::Observable> {
+/// [`crate::measures::DiscreteMeasure`], and $E$ is a [`Euclidean`] space.
+pub trait ForwardModel<Domain: Space, F: Float = f64, E: NormExponent = Radon>:
+    BoundedLinear<Domain, E, L2, F, Codomain = Self::Observable>
+    + GEMV<F, Domain, Self::Observable>
+    + Preadjointable<Domain, Self::Observable>
+where
+    for<'a> Self::Observable: Instance<Self::Observable>,
+    Domain: Norm<F, E>,
+{
     /// The codomain or value space (of “observables”) for this operator.
     /// It is assumed to be a [`Euclidean`] space, and therefore also (identified with)
     /// the domain of the preadjoint.
-    type Observable : Euclidean<F, Output=Self::Observable>
-                      + AXPY<F>
-                      + Clone;
-
-    /// Return A_*A and A_* b
-    fn findim_quadratic_model(
-        &self,
-        μ : &DiscreteMeasure<Domain, F>,
-        b : &Self::Observable
-    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>);
+    type Observable: Euclidean<F, Output = Self::Observable> + AXPY<F> + Space + Clone;
 
     /// Write an observable into a file.
-    fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError;
+    fn write_observable(&self, b: &Self::Observable, prefix: String) -> DynError;
 
     /// Returns a zero observable
     fn zero_observable(&self) -> Self::Observable;
-
-    /// Returns an empty (uninitialised) observable.
-    ///
-    /// This is used as a placeholder for temporary [`std::mem::replace`] move operations.
-    fn empty_observable(&self) -> Self::Observable;
-}
-
-pub type ShiftedSensor<F, S, P, const N : usize> = Shift<Convolution<S, P>, F, N>;
-
-/// Trait for physical convolution models. Has blanket implementation for all cases.
-pub trait Spread<F : Float, const N : usize>
-: 'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F> {}
-
-impl<F, T, const N : usize> Spread<F, N> for T
-where F : Float,
-      T : 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N> {}
-
-/// Trait for compactly supported sensors. Has blanket implementation for all cases.
-pub trait Sensor<F : Float, const N : usize> : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
-
-impl<F, T, const N : usize> Sensor<F, N> for T
-where F : Float,
-      T : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
-
-
-pub trait SensorGridBT<F, S, P, const N : usize> :
-Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N> {}
-
-impl<F, S, P, T, const N : usize>
-SensorGridBT<F, S, P, N>
-for T
-where T : Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>,
-      F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N> {}
-
-// We need type alias bounds to access associated types
-#[allow(type_alias_bounds)]
-type SensorGridBTFN<F, S, P, BT : SensorGridBT<F, S, P, N>, const N : usize>
-= BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
-
-/// Sensor grid forward model
-#[derive(Clone)]
-pub struct SensorGrid<F, S, P, BT, const N : usize>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      BT : SensorGridBT<F, S, P, N>, {
-    domain : Cube<F, N>,
-    sensor_count : [usize; N],
-    sensor : S,
-    spread : P,
-    base_sensor : Convolution<S, P>,
-    bt : BT,
 }
 
-impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    pub fn new(
-        domain : Cube<F, N>,
-        sensor_count : [usize; N],
-        sensor : S,
-        spread : P,
-        depth : BT::Depth
-    ) -> Self {
-        let base_sensor = Convolution(sensor.clone(), spread.clone());
-        let bt = BT::new(domain, depth);
-        let mut sensorgrid = SensorGrid {
-            domain,
-            sensor_count,
-            sensor,
-            spread,
-            base_sensor,
-            bt,
-        };
-
-        for (x, id) in sensorgrid.grid().into_iter().zip(0usize..) {
-            let s = sensorgrid.shifted_sensor(x);
-            sensorgrid.bt.insert(id, &s);
-        }
-
-        sensorgrid
-    }
-
-    pub fn grid(&self) -> LinGrid<F, N> {
-        lingrid_centered(&self.domain, &self.sensor_count)
-    }
-
-    pub fn n_sensors(&self) -> usize {
-        self.sensor_count.iter().product()
-    }
-
-    #[inline]
-    fn shifted_sensor(&self, x : Loc<F, N>) -> ShiftedSensor<F, S, P, N> {
-        self.base_sensor.clone().shift(x)
-    }
-
-    #[inline]
-    fn _zero_observable(&self) -> DVector<F> {
-        DVector::zeros(self.n_sensors())
-    }
-}
-
-impl<F, S, P, BT, const N : usize> Apply<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    #[inline]
-    fn apply(&self, μ : RNDM<F, N>) -> DVector<F> {
-        self.apply(&μ)
-    }
-}
-
-impl<'a, F, S, P, BT, const N : usize> Apply<&'a RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    fn apply(&self, μ : &'a RNDM<F, N>) ->  DVector<F> {
-        let mut res = self._zero_observable();
-        self.apply_add(&mut res, μ);
-        res
-    }
+/// Trait for operators $A$ for which $A_*A$ is bounded by some other operator.
+pub trait AdjointProductBoundedBy<Domain: Space, D>: Linear<Domain> {
+    type FloatType: Float;
+    /// Return $L$ such that $A_*A ≤ LD$.
+    fn adjoint_product_bound(&self, other: &D) -> Option<Self::FloatType>;
 }
 
-impl<F, S, P, BT, const N : usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-    type Codomain = DVector<F>;
-}
-
-
-#[replace_float_literals(F::cast_from(literal))]
-impl<F, S, P, BT, const N : usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    fn gemv(&self, y : &mut DVector<F>, α : F, μ : &RNDM<F, N>, β : F) {
-        let grid = self.grid();
-        if β == 0.0 {
-            y.fill(0.0)
-        } else if β != 1.0 {
-            *y *= β; // Need to multiply first, as we have to be able to add to y.
-        }
-        if α == 1.0 {
-            self.apply_add(y, μ)
-        } else {
-            for δ in μ.iter_spikes() {
-                for &d in self.bt.iter_at(&δ.x) {
-                    let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-                    y[d] += sensor.apply(&δ.x) * (α * δ.α);
-                }
-            }
-        }
-    }
-
-    fn apply_add(&self, y : &mut DVector<F>, μ : &RNDM<F, N>) {
-        let grid = self.grid();
-        for δ in μ.iter_spikes() {
-            for &d in self.bt.iter_at(&δ.x) {
-                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-                y[d] += sensor.apply(&δ.x) * δ.α;
-            }
-        }
-    }
-
-}
-
-impl<F, S, P, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    #[inline]
-    fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> DVector<F> {
-        self.apply(&δ)
-    }
-}
-
-impl<'a, F, S, P, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output =  DVector<F>;
-
-    fn apply(&self, δ : &DeltaMeasure<Loc<F, N>, F>) -> DVector<F> {
-        let mut res = DVector::zeros(self.n_sensors());
-        let grid = self.grid();
-        for &d in self.bt.iter_at(&δ.x) {
-            let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-            res[d] += sensor.apply(&δ.x) * δ.α;
-        }
-        res
-    }
-}
-
-impl<F, S, P, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-    type Codomain = DVector<F>;
-}
-
-impl<F, S, P, BT, const N : usize> BoundedLinear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N, Agg=Bounds<F>>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
-    type FloatType = F;
-
-    /// An estimate on the operator norm in $𝕃(ℳ(Ω); ℝ^n)$ with $ℳ(Ω)$ equipped
-    /// with the Radon norm, and $ℝ^n$ with the Euclidean norm.
-    fn opnorm_bound(&self) -> F {
-        // With {x_i}_{i=1}^n the grid centres and φ the kernel, we have
-        // |Aμ|_2 = sup_{|z|_2 ≤ 1} ⟨z,Αμ⟩ = sup_{|z|_2 ≤ 1} ⟨A^*z|μ⟩
-        // ≤ sup_{|z|_2 ≤ 1} |A^*z|_∞ |μ|_ℳ
-        // = sup_{|z|_2 ≤ 1} |∑ φ(· - x_i)z_i|_∞ |μ|_ℳ
-        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ ∑ |z_i| |μ|_ℳ
-        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ √n |z|_2 |μ|_ℳ
-        // = |φ|_∞ √n |μ|_ℳ.
-        // Hence
-        let n = F::cast_from(self.n_sensors());
-        self.base_sensor.bounds().uniform() * n.sqrt()
-    }
-}
-
-type SensorGridPreadjoint<'a, A, F, const N : usize> = PreadjointHelper<'a, A, RNDM<F,N>>;
-
-
-impl<F, S, P, BT, const N : usize>
-Preadjointable<RNDM<F, N>, DVector<F>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-    type PreadjointCodomain = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
-    type Preadjoint<'a> = SensorGridPreadjoint<'a, Self, F, N> where Self : 'a;
-
-    fn preadjoint(&self) -> Self::Preadjoint<'_> {
-        PreadjointHelper::new(self)
-    }
-}
-
-#[derive(Clone,Debug)]
-pub struct SensorGridSupportGenerator<F, S, P, const N : usize>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N> {
-    base_sensor : Convolution<S, P>,
-    grid : LinGrid<F, N>,
-    weights : DVector<F>
+/// Trait for operators $A$ for which $A_*A$ is bounded by a diagonal operator.
+pub trait AdjointProductPairBoundedBy<Domain: Space, D1, D2>: Linear<Domain> {
+    type FloatType: Float;
+    /// Return $(L, L_z)$ such that $A_*A ≤ (L_1 D_1, L_2 D_2)$.
+    fn adjoint_product_pair_bound(
+        &self,
+        other1: &D1,
+        other_2: &D2,
+    ) -> Option<(Self::FloatType, Self::FloatType)>;
 }
 
-impl<F, S, P, const N : usize> SensorGridSupportGenerator<F, S, P, N>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N> {
-
-    #[inline]
-    fn construct_sensor(&self, id : usize, w : F) -> Weighted<ShiftedSensor<F, S, P, N>, F> {
-        let x = self.grid.entry_linear_unchecked(id);
-        self.base_sensor.clone().shift(x).weigh(w)
-    }
-
-    #[inline]
-    fn construct_sensor_and_id<'a>(&'a self, (id, w) : (usize, &'a F))
-    -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) {
-        (id.into(), self.construct_sensor(id, *w))
-    }
-}
-
-impl<F, S, P, const N : usize> SupportGenerator<F, N>
-for SensorGridSupportGenerator<F, S, P, N>
-where F : Float,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N> {
-    type Id = usize;
-    type SupportType = Weighted<ShiftedSensor<F, S, P, N>, F>;
-    type AllDataIter<'a> = MapX<'a, Zip<RangeFrom<usize>,
-                                        std::slice::Iter<'a, F>>,
-                                Self,
-                                (Self::Id, Self::SupportType)>
-                           where Self : 'a;
-
-    #[inline]
-    fn support_for(&self, d : Self::Id) -> Self::SupportType {
-        self.construct_sensor(d, self.weights[d])
-    }
-
-    #[inline]
-    fn support_count(&self) -> usize {
-        self.weights.len()
+/*
+/// Trait for [`ForwardModel`]s whose preadjoint has Lipschitz values.
+pub trait LipschitzValues {
+    type FloatType : Float;
+    /// Return (if one exists) a factor $L$ such that $A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        None
     }
 
-    #[inline]
-    fn all_data(&self) -> Self::AllDataIter<'_> {
-        (0..).zip(self.weights.as_slice().iter()).mapX(self, Self::construct_sensor_and_id)
-    }
-}
-
-/// Helper structure for constructing preadjoints of `S` where `S : Linear<X>`.
-/// [`Linear`] needs to be implemented for each instance, but [`Adjointable`]
-/// and [`BoundedLinear`] have blanket implementations.
-#[derive(Clone,Debug)]
-pub struct PreadjointHelper<'a, S : 'a, X> {
-    forward_op : &'a S,
-    _domain : PhantomData<X>
-}
-
-impl<'a, S : 'a, X> PreadjointHelper<'a, S, X> {
-    pub fn new(forward_op : &'a S) -> Self {
-        PreadjointHelper { forward_op, _domain: PhantomData }
-    }
-}
-
-impl<'a, X, Ypre, S> Adjointable<Ypre, X>
-for PreadjointHelper<'a, S, X>
-where Self : Linear<Ypre>,
-      S : Clone + Linear<X> {
-    type AdjointCodomain = S::Codomain;
-    type Adjoint<'b> = S where Self : 'b;
-    fn adjoint(&self) -> Self::Adjoint<'_> {
-        self.forward_op.clone()
-    }
-}
-
-impl<'a, X, Ypre, S> BoundedLinear<Ypre>
-for PreadjointHelper<'a, S, X>
-where Self : Linear<Ypre>,
-      S : 'a + Clone + BoundedLinear<X> {
-    type FloatType = S::FloatType;
-    fn opnorm_bound(&self) -> Self::FloatType {
-        self.forward_op.opnorm_bound()
-    }
-}
-
-
-impl<'a, 'b, F, S, P, BT, const N : usize> Apply<&'b DVector<F>>
-for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output = SensorGridBTFN<F, S, P, BT, N>;
-
-    fn apply(&self, x : &'b DVector<F>) -> Self::Output {
-        self.apply(x.clone())
-    }
-}
-
-impl<'a, F, S, P, BT, const N : usize> Apply<DVector<F>>
-for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Output = SensorGridBTFN<F, S, P, BT, N>;
-
-    fn apply(&self, x : DVector<F>) -> Self::Output {
-        let fwd = &self.forward_op;
-        let generator = SensorGridSupportGenerator{
-            base_sensor : fwd.base_sensor.clone(),
-            grid : fwd.grid(),
-            weights : x
-        };
-        BTFN::new_refresh(&fwd.bt, generator)
+    /// Return (if one exists) a factor $L$ such that $∇A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_diff_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        None
     }
 }
-
-impl<'a, F, S, P, BT, const N : usize> Linear<DVector<F>>
-for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
-where F : Float,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type Codomain = SensorGridBTFN<F, S, P, BT, N>;
-}
-
-impl<F, S, P, BT, const N : usize> ForwardModel<Loc<F, N>, F>
-for SensorGrid<F, S, P, BT, N>
-where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
-      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-    type Observable = DVector<F>;
-
-    fn findim_quadratic_model(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-        b : &Self::Observable
-    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>) {
-        assert_eq!(b.len(), self.n_sensors());
-        let mut mA = DMatrix::zeros(self.n_sensors(), μ.len());
-        let grid = self.grid();
-        for (mut mAcol, δ) in mA.column_iter_mut().zip(μ.iter_spikes()) {
-            for &d in self.bt.iter_at(&δ.x) {
-                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
-                mAcol[d] += sensor.apply(&δ.x);
-            }
-        }
-        let mAt = mA.transpose();
-        (&mAt * mA, &mAt * b)
-    }
+*/
 
-    fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError {
-        let it = self.grid().into_iter().zip(b.iter()).map(|(x, &v)| (x, v));
-        write_csv(it, prefix + ".txt")
-    }
-
-    #[inline]
-    fn zero_observable(&self) -> Self::Observable {
-        self._zero_observable()
-    }
-
-    #[inline]
-    fn empty_observable(&self) -> Self::Observable {
-        DVector::zeros(0)
-    }
-
-}
-
-/// Implements the calculation a factor $L$ such that $A_*A ≤ L 𝒟$ for $A$ the forward model
-/// and $𝒟$ a seminorm of suitable form.
-///
-/// **This assumes (but does not check) that the sensors are not overlapping.**
-#[replace_float_literals(F::cast_from(literal))]
-impl<F, BT, S, P, K, const N : usize> Lipschitz<ConvolutionOp<F, K, BT, N>>
-for SensorGrid<F, S, P, BT, N>
-where F : Float + nalgebra::RealField + ToNalgebraRealField,
-      BT : SensorGridBT<F, S, P, N>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N>,
-      K : SimpleConvolutionKernel<F, N>,
-      AutoConvolution<P> : BoundedBy<F, K> {
-
-    type FloatType = F;
-
-    fn lipschitz_factor(&self, seminorm : &ConvolutionOp<F, K, BT, N>) -> Option<F> {
-        // Sensors should not take on negative values to allow
-        // A_*A to be upper bounded by a simple convolution of `spread`.
-        if self.sensor.bounds().lower() < 0.0 {
-            return None
-        }
-
-        // Calculate the factor $L_1$ for betwee $ℱ[ψ * ψ] ≤ L_1 ℱ[ρ]$ for $ψ$ the base spread
-        // and $ρ$ the kernel of the seminorm.
-        let l1 = AutoConvolution(self.spread.clone()).bounding_factor(seminorm.kernel())?;
-
-        // Calculate the factor for transitioning from $A_*A$ to `AutoConvolution<P>`, where A
-        // consists of several `Convolution<S, P>` for the physical model `P` and the sensor `S`.
-        let l0 = self.sensor.norm(Linfinity) * self.sensor.norm(L1);
+/// Trait for [`ForwardModel`]s that satisfy bounds on curvature.
+pub trait BoundedCurvature {
+    type FloatType: Float;
 
-        // The final transition factor is:
-        Some(l0 * l1)
-    }
+    /// Returns factor $ℓ_F$ and $ℓ_r$ such that
+    /// $B_{F'(μ)} dγ ≤ ℓ_F c_2$ and $⟨F'(μ)+F'(μ+Δ)|Δ⟩ ≤ ℓ_r|γ|(c_2)$,
+    /// where $Δ=(π_♯^1-π_♯^0)γ$.
+    fn curvature_bound_components(&self) -> (Option<Self::FloatType>, Option<Self::FloatType>);
 }
-
-macro_rules! make_sensorgridsupportgenerator_scalarop_rhs {
-    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
-        impl<F, S, P, const N : usize>
-        std::ops::$trait_assign<F>
-        for SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            fn $fn_assign(&mut self, t : F) {
-                self.weights.$fn_assign(t);
-            }
-        }
-
-        impl<F, S, P, const N : usize>
-        std::ops::$trait<F>
-        for SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(mut self, t : F) -> Self::Output {
-                std::ops::$trait_assign::$fn_assign(&mut self.weights, t);
-                self
-            }
-        }
-
-        impl<'a, F, S, P, const N : usize>
-        std::ops::$trait<F>
-        for &'a SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(self, t : F) -> Self::Output {
-                SensorGridSupportGenerator{
-                    base_sensor : self.base_sensor.clone(),
-                    grid : self.grid,
-                    weights : (&self.weights).$fn(t)
-                }
-            }
-        }
-    }
-}
-
-make_sensorgridsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
-make_sensorgridsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
-
-macro_rules! make_sensorgridsupportgenerator_unaryop {
-    ($trait:ident, $fn:ident) => {
-        impl<F, S, P, const N : usize>
-        std::ops::$trait
-        for SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(mut self) -> Self::Output {
-                self.weights = self.weights.$fn();
-                self
-            }
-        }
-
-        impl<'a, F, S, P, const N : usize>
-        std::ops::$trait
-        for &'a SensorGridSupportGenerator<F, S, P, N>
-        where F : Float,
-              S : Sensor<F, N>,
-              P : Spread<F, N>,
-              Convolution<S, P> : Spread<F, N> {
-            type Output = SensorGridSupportGenerator<F, S, P, N>;
-            fn $fn(self) -> Self::Output {
-                SensorGridSupportGenerator{
-                    base_sensor : self.base_sensor.clone(),
-                    grid : self.grid,
-                    weights : (&self.weights).$fn()
-                }
-            }
-        }
-    }
-}
-
-make_sensorgridsupportgenerator_unaryop!(Neg, neg);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/forward_model/bias.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,108 @@
+/*!
+Simple parametric forward model.
+ */
+
+use super::{AdjointProductBoundedBy, AdjointProductPairBoundedBy, BoundedCurvature, ForwardModel};
+use crate::measures::RNDM;
+use alg_tools::direct_product::Pair;
+use alg_tools::error::DynError;
+use alg_tools::linops::{IdOp, Linear, RowOp, ZeroOp, AXPY};
+use alg_tools::mapping::Space;
+use alg_tools::norms::{Norm, NormExponent, PairNorm, L2};
+use alg_tools::types::{ClosedAdd, Float};
+use numeric_literals::replace_float_literals;
+
+impl<Domain, F, A, E> ForwardModel<Pair<Domain, A::Observable>, F, PairNorm<E, L2, L2>>
+    for RowOp<A, IdOp<A::Observable>>
+where
+    E: NormExponent,
+    Domain: Space + Norm<F, E>,
+    F: Float,
+    A::Observable: ClosedAdd + Norm<F, L2> + 'static,
+    A: ForwardModel<Domain, F, E> + 'static,
+{
+    type Observable = A::Observable;
+
+    fn write_observable(&self, b: &Self::Observable, prefix: String) -> DynError {
+        self.0.write_observable(b, prefix)
+    }
+
+    /// Returns a zero observable
+    fn zero_observable(&self) -> Self::Observable {
+        self.0.zero_observable()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<Domain, F, A, D, Z> AdjointProductPairBoundedBy<Pair<Domain, Z>, D, IdOp<Z>>
+    for RowOp<A, IdOp<Z>>
+where
+    Domain: Space,
+    F: Float,
+    Z: Clone + Space + ClosedAdd,
+    A: AdjointProductBoundedBy<Domain, D, FloatType = F, Codomain = Z>,
+    A::Codomain: ClosedAdd,
+{
+    type FloatType = F;
+
+    fn adjoint_product_pair_bound(&self, d: &D, _: &IdOp<Z>) -> Option<(F, F)> {
+        self.0.adjoint_product_bound(d).map(|l_0| {
+            // [A_*; B_*][A, B] = [A_*A, A_* B; B_* A, B_* B] ≤ diag(2A_*A, 2B_*B)
+            // ≤ diag(2l_A𝒟_A, 2l_B𝒟_B), where now 𝒟_B=Id and l_B=1.
+            (2.0 * l_0, 2.0)
+        })
+    }
+}
+
+/*
+/// This `impl` is bit of an abuse as the codomain of `Apre` is a [`Pair`] of a measure predual,
+/// to which this `impl` applies, and another space.
+impl<F, Apre, Z> LipschitzValues
+for ColOp<Apre, IdOp<Z>>
+where
+    F : Float,
+    Z : Clone + Space + ClosedAdd,
+    Apre : LipschitzValues<FloatType = F>,
+{
+    type FloatType = F;
+    /// Return (if one exists) a factor $L$ such that $A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        self.0.value_unit_lipschitz_factor()
+    }
+
+    /// Return (if one exists) a factor $L$ such that $∇A_*z$ is $L$-Lipschitz for all
+    /// $z$ in the unit ball.
+    fn value_diff_unit_lipschitz_factor(&self) -> Option<Self::FloatType> {
+        self.0.value_diff_unit_lipschitz_factor()
+    }
+}
+*/
+
+impl<F, A, Z> BoundedCurvature for RowOp<A, IdOp<Z>>
+where
+    F: Float,
+    Z: Clone + Space + ClosedAdd,
+    A: BoundedCurvature<FloatType = F>,
+{
+    type FloatType = F;
+
+    fn curvature_bound_components(&self) -> (Option<Self::FloatType>, Option<Self::FloatType>) {
+        self.0.curvature_bound_components()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F, D, XD, Y, const N: usize> AdjointProductBoundedBy<RNDM<F, N>, D>
+    for ZeroOp<'a, RNDM<F, N>, XD, Y, F>
+where
+    F: Float,
+    Y: AXPY<F> + Clone,
+    D: Linear<RNDM<F, N>>,
+{
+    type FloatType = F;
+    /// Return $L$ such that $A_*A ≤ L𝒟$ is bounded by some `other` operator $𝒟$.
+    fn adjoint_product_bound(&self, _: &D) -> Option<F> {
+        Some(0.0)
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/forward_model/sensor_grid.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,645 @@
+/*!
+Sensor grid forward model
+*/
+
+use nalgebra::base::{DMatrix, DVector};
+use numeric_literals::replace_float_literals;
+use std::iter::Zip;
+use std::ops::RangeFrom;
+
+use alg_tools::bisection_tree::*;
+use alg_tools::error::DynError;
+use alg_tools::instance::Instance;
+use alg_tools::iter::{MapX, Mappable};
+use alg_tools::lingrid::*;
+pub use alg_tools::linops::*;
+use alg_tools::mapping::{DifferentiableMapping, RealMapping};
+use alg_tools::maputil::map2;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::{Linfinity, Norm, L1, L2};
+use alg_tools::tabledump::write_csv;
+
+use super::{AdjointProductBoundedBy, BoundedCurvature, ForwardModel};
+use crate::frank_wolfe::FindimQuadraticModel;
+use crate::kernels::{AutoConvolution, BoundedBy, Convolution};
+use crate::measures::{DiscreteMeasure, Radon};
+use crate::preadjoint_helper::PreadjointHelper;
+use crate::seminorms::{ConvolutionOp, SimpleConvolutionKernel};
+use crate::types::*;
+
+type RNDM<F, const N: usize> = DiscreteMeasure<Loc<F, N>, F>;
+
+pub type ShiftedSensor<F, S, P, const N: usize> = Shift<Convolution<S, P>, F, N>;
+
+/// Trait for physical convolution models. Has blanket implementation for all cases.
+pub trait Spread<F: Float, const N: usize>:
+    'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F>
+{
+}
+
+impl<F, T, const N: usize> Spread<F, N> for T
+where
+    F: Float,
+    T: 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N>,
+{
+}
+
+/// Trait for compactly supported sensors. Has blanket implementation for all cases.
+pub trait Sensor<F: Float, const N: usize>:
+    Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity>
+{
+}
+
+impl<F, T, const N: usize> Sensor<F, N> for T
+where
+    F: Float,
+    T: Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity>,
+{
+}
+
+pub trait SensorGridBT<F, S, P, const N: usize>:
+    Clone + BTImpl<F, N, Data = usize, Agg = Bounds<F>>
+where
+    F: Float,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+{
+}
+
+impl<F, S, P, T, const N: usize> SensorGridBT<F, S, P, N> for T
+where
+    T: Clone + BTImpl<F, N, Data = usize, Agg = Bounds<F>>,
+    F: Float,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+{
+}
+
+// We need type alias bounds to access associated types
+#[allow(type_alias_bounds)]
+pub type SensorGridBTFN<F, S, P, BT: SensorGridBT<F, S, P, N>, const N: usize> =
+    BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
+
+/// Sensor grid forward model
+#[derive(Clone)]
+pub struct SensorGrid<F, S, P, BT, const N: usize>
+where
+    F: Float,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+    BT: SensorGridBT<F, S, P, N>,
+{
+    domain: Cube<F, N>,
+    sensor_count: [usize; N],
+    sensor: S,
+    spread: P,
+    base_sensor: Convolution<S, P>,
+    bt: BT,
+}
+
+impl<F, S, P, BT, const N: usize> SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+{
+    /// Create a new sensor grid.
+    ///
+    /// The parameter `depth` indicates the search depth of the created [`BT`]s
+    /// for the adjoint values.
+    pub fn new(
+        domain: Cube<F, N>,
+        sensor_count: [usize; N],
+        sensor: S,
+        spread: P,
+        depth: BT::Depth,
+    ) -> Self {
+        let base_sensor = Convolution(sensor.clone(), spread.clone());
+        let bt = BT::new(domain, depth);
+        let mut sensorgrid = SensorGrid {
+            domain,
+            sensor_count,
+            sensor,
+            spread,
+            base_sensor,
+            bt,
+        };
+
+        for (x, id) in sensorgrid.grid().into_iter().zip(0usize..) {
+            let s = sensorgrid.shifted_sensor(x);
+            sensorgrid.bt.insert(id, &s);
+        }
+
+        sensorgrid
+    }
+}
+
+impl<F, S, P, BT, const N: usize> SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+{
+    /// Return the grid of sensor locations.
+    pub fn grid(&self) -> LinGrid<F, N> {
+        lingrid_centered(&self.domain, &self.sensor_count)
+    }
+
+    /// Returns the number of sensors (number of grid points)
+    pub fn n_sensors(&self) -> usize {
+        self.sensor_count.iter().product()
+    }
+
+    /// Constructs a sensor shifted by `x`.
+    #[inline]
+    fn shifted_sensor(&self, x: Loc<F, N>) -> ShiftedSensor<F, S, P, N> {
+        self.base_sensor.clone().shift(x)
+    }
+
+    #[inline]
+    fn _zero_observable(&self) -> DVector<F> {
+        DVector::zeros(self.n_sensors())
+    }
+
+    /// Returns the maximum number of overlapping sensors $N_\psi$.
+    pub fn max_overlapping(&self) -> F {
+        let w = self.base_sensor.support_hint().width();
+        let d = map2(self.domain.width(), &self.sensor_count, |wi, &i| {
+            wi / F::cast_from(i)
+        });
+        w.iter()
+            .zip(d.iter())
+            .map(|(&wi, &di)| (wi / di).ceil())
+            .reduce(F::mul)
+            .unwrap()
+    }
+}
+
+impl<F, S, P, BT, const N: usize> Mapping<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+{
+    type Codomain = DVector<F>;
+
+    #[inline]
+    fn apply<I: Instance<RNDM<F, N>>>(&self, μ: I) -> DVector<F> {
+        let mut y = self._zero_observable();
+        self.apply_add(&mut y, μ);
+        y
+    }
+}
+
+impl<F, S, P, BT, const N: usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+{
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, S, P, BT, const N: usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+{
+    fn gemv<I: Instance<RNDM<F, N>>>(&self, y: &mut DVector<F>, α: F, μ: I, β: F) {
+        let grid = self.grid();
+        if β == 0.0 {
+            y.fill(0.0)
+        } else if β != 1.0 {
+            *y *= β; // Need to multiply first, as we have to be able to add to y.
+        }
+        if α == 1.0 {
+            self.apply_add(y, μ)
+        } else {
+            for δ in μ.ref_instance() {
+                for &d in self.bt.iter_at(&δ.x) {
+                    let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                    y[d] += sensor.apply(&δ.x) * (α * δ.α);
+                }
+            }
+        }
+    }
+
+    fn apply_add<I: Instance<RNDM<F, N>>>(&self, y: &mut DVector<F>, μ: I) {
+        let grid = self.grid();
+        for δ in μ.ref_instance() {
+            for &d in self.bt.iter_at(&δ.x) {
+                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                y[d] += sensor.apply(&δ.x) * δ.α;
+            }
+        }
+    }
+}
+
+impl<F, S, P, BT, const N: usize> BoundedLinear<RNDM<F, N>, Radon, L2, F>
+    for SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N, Agg = Bounds<F>>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+{
+    /// An estimate on the operator norm in $𝕃(ℳ(Ω); ℝ^n)$ with $ℳ(Ω)$ equipped
+    /// with the Radon norm, and $ℝ^n$ with the Euclidean norm.
+    fn opnorm_bound(&self, _: Radon, _: L2) -> F {
+        // With {x_i}_{i=1}^n the grid centres and φ the kernel, we have
+        // |Aμ|_2 = sup_{|z|_2 ≤ 1} ⟨z,Αμ⟩ = sup_{|z|_2 ≤ 1} ⟨A^*z|μ⟩
+        // ≤ sup_{|z|_2 ≤ 1} |A^*z|_∞ |μ|_ℳ
+        // = sup_{|z|_2 ≤ 1} |∑ φ(· - x_i)z_i|_∞ |μ|_ℳ
+        // ≤ sup_{|z|_2 ≤ 1} |φ(y)| ∑_{i:th sensor active at y}|z_i| |μ|_ℳ
+        //      where the supremum of |∑ φ(· - x_i)z_i|_∞  is reached at y
+        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ √N_ψ |z|_2 |μ|_ℳ
+        //      where N_ψ is the maximum number of sensors that overlap, and
+        //      |z|_2 is restricted to the active sensors.
+        // = |φ|_∞ √N_ψ |μ|_ℳ.
+        // Hence
+        let n = self.max_overlapping();
+        self.base_sensor.bounds().uniform() * n.sqrt()
+    }
+}
+
+type SensorGridPreadjoint<'a, A, F, const N: usize> = PreadjointHelper<'a, A, RNDM<F, N>>;
+
+impl<F, S, P, BT, const N: usize> Preadjointable<RNDM<F, N>, DVector<F>>
+    for SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+{
+    type PreadjointCodomain = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
+    type Preadjoint<'a>
+        = SensorGridPreadjoint<'a, Self, F, N>
+    where
+        Self: 'a;
+
+    fn preadjoint(&self) -> Self::Preadjoint<'_> {
+        PreadjointHelper::new(self)
+    }
+}
+
+/*
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F, S, P, BT, const N : usize> LipschitzValues
+for SensorGridPreadjoint<'a, SensorGrid<F, S, P, BT, N>, F, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> + Lipschitz<L2, FloatType=F> + DifferentiableMapping<Loc<F,N>> + LocalAnalysis<F, BT::Agg, N>,
+      for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>,
+{
+
+    type FloatType = F;
+
+    fn value_unit_lipschitz_factor(&self) -> Option<F> {
+        // The Lipschitz factor of the sensors has to be scaled by the square root of twice
+        // the number of overlapping sensors at a single ponit, as Lipschitz estimates involve
+        // two points.
+        let fw = self.forward_op;
+        let n = fw.max_overlapping();
+        fw.base_sensor.lipschitz_factor(L2).map(|l| (2.0 * n).sqrt() * l)
+    }
+
+    fn value_diff_unit_lipschitz_factor(&self) -> Option<F> {
+        // The Lipschitz factor of the sensors has to be scaled by the square root of twice
+        // the number of overlapping sensors at a single ponit, as Lipschitz estimates involve
+        // two points.
+        let fw = self.forward_op;
+        let n = fw.max_overlapping();
+        fw.base_sensor.diff_ref().lipschitz_factor(L2).map(|l| (2.0 * n).sqrt() * l)
+    }
+}
+*/
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F, S, P, BT, const N: usize> BoundedCurvature for SensorGrid<F, S, P, BT, N>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>
+        + Lipschitz<L2, FloatType = F>
+        + DifferentiableMapping<Loc<F, N>>
+        + LocalAnalysis<F, BT::Agg, N>,
+    for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F, N>>>::Differential<'b>:
+        Lipschitz<L2, FloatType = F>,
+{
+    type FloatType = F;
+
+    /// Returns factors $ℓ_F$ and $Θ²$ such that
+    /// $B_{F'(μ)} dγ ≤ ℓ_F c_2$ and $⟨F'(μ)+F'(μ+Δ)|Δ⟩ ≤ Θ²|γ|(c_2)‖γ‖$,
+    /// where $Δ=(π_♯^1-π_♯^0)γ$.
+    ///
+    /// See Lemma 3.8, Lemma 5.10, Remark 5.14, and Example 5.15.
+    fn curvature_bound_components(&self) -> (Option<Self::FloatType>, Option<Self::FloatType>) {
+        let n_ψ = self.max_overlapping();
+        let ψ_diff_lip = self.base_sensor.diff_ref().lipschitz_factor(L2);
+        let ψ_lip = self.base_sensor.lipschitz_factor(L2);
+        let ℓ_F = ψ_diff_lip.map(|l| (2.0 * n_ψ).sqrt() * l);
+        let θ2 = ψ_lip.map(|l| 4.0 * n_ψ * l.powi(2));
+
+        (ℓ_F, θ2)
+    }
+}
+
+#[derive(Clone, Debug)]
+pub struct SensorGridSupportGenerator<F, S, P, const N: usize>
+where
+    F: Float,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+{
+    base_sensor: Convolution<S, P>,
+    grid: LinGrid<F, N>,
+    weights: DVector<F>,
+}
+
+impl<F, S, P, const N: usize> SensorGridSupportGenerator<F, S, P, N>
+where
+    F: Float,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+{
+    #[inline]
+    fn construct_sensor(&self, id: usize, w: F) -> Weighted<ShiftedSensor<F, S, P, N>, F> {
+        let x = self.grid.entry_linear_unchecked(id);
+        self.base_sensor.clone().shift(x).weigh(w)
+    }
+
+    #[inline]
+    fn construct_sensor_and_id<'a>(
+        &'a self,
+        (id, w): (usize, &'a F),
+    ) -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) {
+        (id.into(), self.construct_sensor(id, *w))
+    }
+}
+
+impl<F, S, P, const N: usize> SupportGenerator<F, N> for SensorGridSupportGenerator<F, S, P, N>
+where
+    F: Float,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+{
+    type Id = usize;
+    type SupportType = Weighted<ShiftedSensor<F, S, P, N>, F>;
+    type AllDataIter<'a>
+        = MapX<
+        'a,
+        Zip<RangeFrom<usize>, std::slice::Iter<'a, F>>,
+        Self,
+        (Self::Id, Self::SupportType),
+    >
+    where
+        Self: 'a;
+
+    #[inline]
+    fn support_for(&self, d: Self::Id) -> Self::SupportType {
+        self.construct_sensor(d, self.weights[d])
+    }
+
+    #[inline]
+    fn support_count(&self) -> usize {
+        self.weights.len()
+    }
+
+    #[inline]
+    fn all_data(&self) -> Self::AllDataIter<'_> {
+        (0..)
+            .zip(self.weights.as_slice().iter())
+            .mapX(self, Self::construct_sensor_and_id)
+    }
+}
+
+impl<F, S, P, BT, const N: usize> ForwardModel<DiscreteMeasure<Loc<F, N>, F>, F>
+    for SensorGrid<F, S, P, BT, N>
+where
+    F: Float + ToNalgebraRealField<MixedType = F> + nalgebra::RealField,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+{
+    type Observable = DVector<F>;
+
+    fn write_observable(&self, b: &Self::Observable, prefix: String) -> DynError {
+        let it = self.grid().into_iter().zip(b.iter()).map(|(x, &v)| (x, v));
+        write_csv(it, prefix + ".txt")
+    }
+
+    #[inline]
+    fn zero_observable(&self) -> Self::Observable {
+        self._zero_observable()
+    }
+}
+
+impl<F, S, P, BT, const N: usize> FindimQuadraticModel<Loc<F, N>, F> for SensorGrid<F, S, P, BT, N>
+where
+    F: Float + ToNalgebraRealField<MixedType = F> + nalgebra::RealField,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, BT::Agg, N>,
+{
+    fn findim_quadratic_model(
+        &self,
+        μ: &DiscreteMeasure<Loc<F, N>, F>,
+        b: &Self::Observable,
+    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>) {
+        assert_eq!(b.len(), self.n_sensors());
+        let mut mA = DMatrix::zeros(self.n_sensors(), μ.len());
+        let grid = self.grid();
+        for (mut mAcol, δ) in mA.column_iter_mut().zip(μ.iter_spikes()) {
+            for &d in self.bt.iter_at(&δ.x) {
+                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                mAcol[d] += sensor.apply(&δ.x);
+            }
+        }
+        let mAt = mA.transpose();
+        (&mAt * mA, &mAt * b)
+    }
+}
+
+/// Implements the calculation a factor $L$ such that $A_*A ≤ L 𝒟$ for $A$ the forward model
+/// and $𝒟$ a seminorm of suitable form.
+///
+/// **This assumes (but does not check) that the sensors are not overlapping.**
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, BT, S, P, K, const N: usize> AdjointProductBoundedBy<RNDM<F, N>, ConvolutionOp<F, K, BT, N>>
+    for SensorGrid<F, S, P, BT, N>
+where
+    F: Float + nalgebra::RealField + ToNalgebraRealField,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N>,
+    K: SimpleConvolutionKernel<F, N>,
+    AutoConvolution<P>: BoundedBy<F, K>,
+{
+    type FloatType = F;
+
+    fn adjoint_product_bound(&self, seminorm: &ConvolutionOp<F, K, BT, N>) -> Option<F> {
+        // Sensors should not take on negative values to allow
+        // A_*A to be upper bounded by a simple convolution of `spread`.
+        if self.sensor.bounds().lower() < 0.0 {
+            return None;
+        }
+
+        // Calculate the factor $L_1$ for betwee $ℱ[ψ * ψ] ≤ L_1 ℱ[ρ]$ for $ψ$ the base spread
+        // and $ρ$ the kernel of the seminorm.
+        let l1 = AutoConvolution(self.spread.clone()).bounding_factor(seminorm.kernel())?;
+
+        // Calculate the factor for transitioning from $A_*A$ to `AutoConvolution<P>`, where A
+        // consists of several `Convolution<S, P>` for the physical model `P` and the sensor `S`.
+        let l0 = self.sensor.norm(Linfinity) * self.sensor.norm(L1);
+
+        // The final transition factor is:
+        Some(l0 * l1)
+    }
+}
+
+macro_rules! make_sensorgridsupportgenerator_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F, S, P, const N: usize> std::ops::$trait_assign<F>
+            for SensorGridSupportGenerator<F, S, P, N>
+        where
+            F: Float,
+            S: Sensor<F, N>,
+            P: Spread<F, N>,
+            Convolution<S, P>: Spread<F, N>,
+        {
+            fn $fn_assign(&mut self, t: F) {
+                self.weights.$fn_assign(t);
+            }
+        }
+
+        impl<F, S, P, const N: usize> std::ops::$trait<F> for SensorGridSupportGenerator<F, S, P, N>
+        where
+            F: Float,
+            S: Sensor<F, N>,
+            P: Spread<F, N>,
+            Convolution<S, P>: Spread<F, N>,
+        {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(mut self, t: F) -> Self::Output {
+                std::ops::$trait_assign::$fn_assign(&mut self.weights, t);
+                self
+            }
+        }
+
+        impl<'a, F, S, P, const N: usize> std::ops::$trait<F>
+            for &'a SensorGridSupportGenerator<F, S, P, N>
+        where
+            F: Float,
+            S: Sensor<F, N>,
+            P: Spread<F, N>,
+            Convolution<S, P>: Spread<F, N>,
+        {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(self, t: F) -> Self::Output {
+                SensorGridSupportGenerator {
+                    base_sensor: self.base_sensor.clone(),
+                    grid: self.grid,
+                    weights: (&self.weights).$fn(t),
+                }
+            }
+        }
+    };
+}
+
+make_sensorgridsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_sensorgridsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! make_sensorgridsupportgenerator_unaryop {
+    ($trait:ident, $fn:ident) => {
+        impl<F, S, P, const N: usize> std::ops::$trait for SensorGridSupportGenerator<F, S, P, N>
+        where
+            F: Float,
+            S: Sensor<F, N>,
+            P: Spread<F, N>,
+            Convolution<S, P>: Spread<F, N>,
+        {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(mut self) -> Self::Output {
+                self.weights = self.weights.$fn();
+                self
+            }
+        }
+
+        impl<'a, F, S, P, const N: usize> std::ops::$trait
+            for &'a SensorGridSupportGenerator<F, S, P, N>
+        where
+            F: Float,
+            S: Sensor<F, N>,
+            P: Spread<F, N>,
+            Convolution<S, P>: Spread<F, N>,
+        {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(self) -> Self::Output {
+                SensorGridSupportGenerator {
+                    base_sensor: self.base_sensor.clone(),
+                    grid: self.grid,
+                    weights: (&self.weights).$fn(),
+                }
+            }
+        }
+    };
+}
+
+make_sensorgridsupportgenerator_unaryop!(Neg, neg);
+
+impl<'a, F, S, P, BT, const N: usize> Mapping<DVector<F>>
+    for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F, N>>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+{
+    type Codomain = SensorGridBTFN<F, S, P, BT, N>;
+
+    fn apply<I: Instance<DVector<F>>>(&self, x: I) -> Self::Codomain {
+        let fwd = &self.forward_op;
+        let generator = SensorGridSupportGenerator {
+            base_sensor: fwd.base_sensor.clone(),
+            grid: fwd.grid(),
+            weights: x.own(),
+        };
+        BTFN::new_refresh(&fwd.bt, generator)
+    }
+}
+
+impl<'a, F, S, P, BT, const N: usize> Linear<DVector<F>>
+    for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F, N>>
+where
+    F: Float,
+    BT: SensorGridBT<F, S, P, N>,
+    S: Sensor<F, N>,
+    P: Spread<F, N>,
+    Convolution<S, P>: Spread<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+{
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/forward_pdps.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,240 @@
+/*!
+Solver for the point source localisation problem using a
+primal-dual proximal splitting with a forward step.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+
+use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::mapping::{Mapping, DifferentiableRealMapping, Instance};
+use alg_tools::norms::Norm;
+use alg_tools::direct_product::Pair;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::linops::{
+    BoundedLinear, AXPY, GEMV, Adjointable, IdOp,
+};
+use alg_tools::convex::{Conjugable, Prox};
+use alg_tools::norms::{L2, PairNorm};
+
+use crate::types::*;
+use crate::measures::{DiscreteMeasure, Radon, RNDM};
+use crate::measures::merging::SpikeMerging;
+use crate::forward_model::{
+    ForwardModel,
+    AdjointProductPairBoundedBy,
+};
+use crate::plot::{
+    SeqPlotter,
+    Plotting,
+    PlotLookup
+};
+use crate::fb::*;
+use crate::regularisation::RegTerm;
+use crate::dataterm::calculate_residual;
+
+/// Settings for [`pointsource_forward_pdps_pair`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct ForwardPDPSConfig<F : Float> {
+    /// Primal step length scaling.
+    pub τ0 : F,
+    /// Primal step length scaling.
+    pub σp0 : F,
+    /// Dual step length scaling.
+    pub σd0 : F,
+    /// Generic parameters
+    pub insertion : FBGenericConfig<F>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for ForwardPDPSConfig<F> {
+    fn default() -> Self {
+        ForwardPDPSConfig {
+            τ0 : 0.99,
+            σd0 : 0.05,
+            σp0 : 0.99,
+            insertion : Default::default()
+        }
+    }
+}
+
+type MeasureZ<F, Z, const N : usize> = Pair<RNDM<F, N>, Z>;
+
+/// Iteratively solve the pointsource localisation with an additional variable
+/// using primal-dual proximal splitting with a forward step.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_forward_pdps_pair<
+    F, I, A, S, Reg, P, Z, R, Y, /*KOpM, */ KOpZ, H, const N : usize
+>(
+    opA : &A,
+    b : &A::Observable,
+    reg : Reg,
+    prox_penalty : &P,
+    config : &ForwardPDPSConfig<F>,
+    iterator : I,
+    mut plotter : SeqPlotter<F, N>,
+    //opKμ : KOpM,
+    opKz : &KOpZ,
+    fnR : &R,
+    fnH : &H,
+    mut z : Z,
+    mut y : Y,
+) -> MeasureZ<F, Z, N>
+where
+    F : Float + ToNalgebraRealField,
+    I : AlgIteratorFactory<IterInfo<F, N>>,
+    A : ForwardModel<
+            MeasureZ<F, Z, N>,
+            F,
+            PairNorm<Radon, L2, L2>,
+            PreadjointCodomain = Pair<S, Z>,
+        >
+        + AdjointProductPairBoundedBy<MeasureZ<F, Z, N>, P, IdOp<Z>, FloatType=F>,
+    S: DifferentiableRealMapping<F, N>,
+    for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>,
+    PlotLookup : Plotting<N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    Reg : RegTerm<F, N>,
+    P : ProxPenalty<F, S, Reg, N>,
+    KOpZ : BoundedLinear<Z, L2, L2, F, Codomain=Y>
+        + GEMV<F, Z>
+        + Adjointable<Z, Y, AdjointCodomain = Z>,
+    for<'b> KOpZ::Adjoint<'b> : GEMV<F, Y>,
+    Y : AXPY<F> + Euclidean<F, Output=Y> + Clone + ClosedAdd,
+    for<'b> &'b Y : Instance<Y>,
+    Z : AXPY<F, Owned=Z> + Euclidean<F, Output=Z> + Clone + Norm<F, L2>,
+    for<'b> &'b Z : Instance<Z>,
+    R : Prox<Z, Codomain=F>,
+    H : Conjugable<Y, F, Codomain=F>,
+    for<'b> H::Conjugate<'b> : Prox<Y>,
+{
+
+    // Check parameters
+    assert!(config.τ0 > 0.0 &&
+            config.τ0 < 1.0 &&
+            config.σp0 > 0.0 &&
+            config.σp0 < 1.0 &&
+            config.σd0 > 0.0 &&
+            config.σp0 * config.σd0 <= 1.0,
+            "Invalid step length parameters");
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut residual = calculate_residual(Pair(&μ, &z), opA, b);
+
+    // Set up parameters
+    let bigM = 0.0; //opKμ.adjoint_product_bound(prox_penalty).unwrap().sqrt();
+    let nKz = opKz.opnorm_bound(L2, L2);
+    let opIdZ = IdOp::new();
+    let (l, l_z) = opA.adjoint_product_pair_bound(prox_penalty, &opIdZ).unwrap();
+    // We need to satisfy
+    //
+    //     τσ_dM(1-σ_p L_z)/(1 - τ L) + [σ_p L_z + σ_pσ_d‖K_z‖^2] < 1
+    //                                  ^^^^^^^^^^^^^^^^^^^^^^^^^
+    // with 1 > σ_p L_z and 1 > τ L.
+    //
+    // To do so, we first solve σ_p and σ_d from standard PDPS step length condition
+    // ^^^^^ < 1. then we solve τ from  the rest.
+    let σ_d = config.σd0 / nKz;
+    let σ_p = config.σp0 / (l_z + config.σd0 * nKz);
+    // Observe that = 1 - ^^^^^^^^^^^^^^^^^^^^^ = 1 - σ_{p,0}
+    // We get the condition τσ_d M (1-σ_p L_z) < (1-σ_{p,0})*(1-τ L)
+    // ⟺ τ [ σ_d M (1-σ_p L_z) + (1-σ_{p,0}) L ] < (1-σ_{p,0})
+    let φ = 1.0 - config.σp0;
+    let a = 1.0 - σ_p * l_z;
+    let τ = config.τ0 * φ / ( σ_d * bigM * a + φ * l );
+    // Acceleration is not currently supported
+    // let γ = dataterm.factor_of_strong_convexity();
+    let ω = 1.0;
+
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.insertion.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    let starH = fnH.conjugate();
+
+    // Statistics
+    let full_stats = |residual : &A::Observable, μ : &RNDM<F, N>, z : &Z, ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + fnR.apply(z)
+                + reg.apply(μ) + fnH.apply(/* opKμ.apply(μ) + */ opKz.apply(z)),
+        n_spikes : μ.len(),
+        ε,
+        // postprocessing: config.insertion.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
+    let mut stats = IterInfo::new();
+
+    // Run the algorithm
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, &z, ε, stats.clone())) {
+        // Calculate initial transport
+        let Pair(mut τv, τz) = opA.preadjoint().apply(residual * τ);
+        let μ_base = μ.clone();
+
+        // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes.
+        let (maybe_d, _within_tolerances) = prox_penalty.insert_and_reweigh(
+            &mut μ, &mut τv, &μ_base, None,
+            τ, ε, &config.insertion,
+            &reg, &state, &mut stats,
+        );
+
+        // Merge spikes.
+        // This crucially expects the merge routine to be stable with respect to spike locations,
+        // and not to performing any pruning. That is be to done below simultaneously for γ.
+        // Merge spikes.
+        // This crucially expects the merge routine to be stable with respect to spike locations,
+        // and not to performing any pruning. That is be to done below simultaneously for γ.
+        let ins = &config.insertion;
+        if ins.merge_now(&state) {
+            stats.merged += prox_penalty.merge_spikes_no_fitness(
+                &mut μ, &mut τv, &μ_base, None, τ, ε, ins, &reg,
+                //Some(|μ̃ : &RNDM<F, N>| calculate_residual(Pair(μ̃, &z), opA, b).norm2_squared_div2()),
+            );
+        }
+
+        // Prune spikes with zero weight.
+        stats.pruned += prune_with_stats(&mut μ);
+
+        // Do z variable primal update
+        let mut z_new = τz;
+        opKz.adjoint().gemv(&mut z_new, -σ_p, &y, -σ_p/τ);
+        z_new = fnR.prox(σ_p, z_new + &z);
+        // Do dual update
+        // opKμ.gemv(&mut y, σ_d*(1.0 + ω), &μ, 1.0);    // y = y + σ_d K[(1+ω)(μ,z)^{k+1}]
+        opKz.gemv(&mut y, σ_d*(1.0 + ω), &z_new, 1.0);
+        // opKμ.gemv(&mut y, -σ_d*ω, μ_base, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        opKz.gemv(&mut y, -σ_d*ω, z, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        y = starH.prox(σ_d, y);
+        z = z_new;
+
+        // Update residual
+        residual = calculate_residual(Pair(&μ, &z), opA, b);
+
+        // Update step length parameters
+        // let ω = pdpsconfig.acceleration.accelerate(&mut τ, &mut σ, γ);
+
+        // Give statistics if requested
+        let iter = state.iteration();
+        stats.this_iters += 1;
+
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv), &μ);
+            full_stats(&residual, &μ, &z, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
+
+    let fit = |μ̃ : &RNDM<F, N>| {
+        (opA.apply(Pair(μ̃, &z))-b).norm2_squared_div2()
+        //+ fnR.apply(z) + reg.apply(μ)
+        + fnH.apply(/* opKμ.apply(&μ̃) + */ opKz.apply(&z))
+    };
+
+    μ.merge_spikes_fitness(config.insertion.final_merging_method(), fit, |&v| v);
+    μ.prune();
+    Pair(μ, z)
+}
--- a/src/fourier.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/fourier.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -3,14 +3,14 @@
 */
 
 use alg_tools::types::{Num, Float};
-use alg_tools::mapping::{RealMapping, Mapping};
+use alg_tools::mapping::{RealMapping, Mapping, Space};
 use alg_tools::bisection_tree::Weighted;
 use alg_tools::loc::Loc;
 
 /// Trait for Fourier transforms. When F is a non-complex number, the transform
 /// also has to be non-complex, i.e., the function itself symmetric.
 pub trait Fourier<F : Num> : Mapping<Self::Domain, Codomain=F> {
-    type Domain;
+    type Domain : Space;
     type Transformed : Mapping<Self::Domain, Codomain=F>;
 
     fn fourier(&self) -> Self::Transformed;
--- a/src/frank_wolfe.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/frank_wolfe.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -14,18 +14,18 @@
 */
 
 use numeric_literals::replace_float_literals;
+use nalgebra::{DMatrix, DVector};
 use serde::{Serialize, Deserialize};
 //use colored::Colorize;
 
 use alg_tools::iterate::{
     AlgIteratorFactory,
-    AlgIteratorState,
     AlgIteratorOptions,
     ValueIteratorFactory,
 };
 use alg_tools::euclidean::Euclidean;
 use alg_tools::norms::Norm;
-use alg_tools::linops::Apply;
+use alg_tools::linops::Mapping;
 use alg_tools::sets::Cube;
 use alg_tools::loc::Loc;
 use alg_tools::bisection_tree::{
@@ -40,9 +40,11 @@
 };
 use alg_tools::mapping::RealMapping;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::L2;
 
 use crate::types::*;
 use crate::measures::{
+    RNDM,
     DiscreteMeasure,
     DeltaMeasure,
     Radon,
@@ -68,10 +70,10 @@
 use crate::regularisation::{
     NonnegRadonRegTerm,
     RadonRegTerm,
+    RegTerm
 };
-use crate::fb::RegTerm;
 
-/// Settings for [`pointsource_fw`].
+/// Settings for [`pointsource_fw_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
 pub struct FWConfig<F : Float> {
@@ -106,15 +108,25 @@
             refinement : Default::default(),
             inner : Default::default(),
             variant : FWVariant::FullyCorrective,
-            merging : Default::default(),
+            merging : SpikeMergingMethod { enabled : true, ..Default::default() },
         }
     }
 }
 
-/// Helper struct for pre-initialising the finite-dimensional subproblems solver
-/// [`prepare_optimise_weights`].
-///
-/// The pre-initialisation is done by [`prepare_optimise_weights`].
+pub trait FindimQuadraticModel<Domain, F> : ForwardModel<DiscreteMeasure<Domain, F>, F>
+where
+    F : Float + ToNalgebraRealField,
+    Domain : Clone + PartialEq,
+{
+    /// Return A_*A and A_* b
+    fn findim_quadratic_model(
+        &self,
+        μ : &DiscreteMeasure<Domain, F>,
+        b : &Self::Observable
+    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>);
+}
+
+/// Helper struct for pre-initialising the finite-dimensional subproblem solver.
 pub struct FindimData<F : Float> {
     /// ‖A‖^2
     opAnorm_squared : F,
@@ -125,7 +137,7 @@
 /// Trait for finite dimensional weight optimisation.
 pub trait WeightOptim<
     F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
+    A : ForwardModel<RNDM<F, N>, F>,
     I : AlgIteratorFactory<F>,
     const N : usize
 > {
@@ -154,7 +166,7 @@
     /// Returns the number of iterations taken by the method configured in `inner`.
     fn optimise_weights<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         opA : &'a A,
         b : &A::Observable,
         findim_data : &FindimData<F>,
@@ -166,12 +178,12 @@
 /// Trait for regularisation terms supported by [`pointsource_fw_reg`].
 pub trait RegTermFW<
     F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
+    A : ForwardModel<RNDM<F, N>, F>,
     I : AlgIteratorFactory<F>,
     const N : usize
 > : RegTerm<F, N>
     + WeightOptim<F, A, I, N>
-    + for<'a> Apply<&'a DiscreteMeasure<Loc<F, N>, F>, Output = F> {
+    + Mapping<RNDM<F, N>, Codomain = F> {
 
     /// With $g = A\_\*(Aμ-b)$, returns $(x, g(x))$ for $x$ a new point to be inserted
     /// into $μ$, as determined by the regulariser.
@@ -188,7 +200,7 @@
     /// Insert point `ξ` into `μ` for the relaxed algorithm from Bredies–Pikkarainen.
     fn relaxed_insert<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         g : &A::PreadjointCodomain,
         opA : &'a A,
         ξ : Loc<F, N>,
@@ -201,18 +213,18 @@
 impl<F : Float + ToNalgebraRealField, A, I, const N : usize> WeightOptim<F, A, I, N>
 for RadonRegTerm<F>
 where I : AlgIteratorFactory<F>,
-      A : ForwardModel<Loc<F, N>, F> {
+      A : FindimQuadraticModel<Loc<F, N>, F>  {
 
     fn prepare_optimise_weights(&self, opA : &A, b : &A::Observable) -> FindimData<F> {
         FindimData{
-            opAnorm_squared : opA.opnorm_bound().powi(2),
+            opAnorm_squared : opA.opnorm_bound(Radon, L2).powi(2),
             m0 : b.norm2_squared() / (2.0 * self.α()),
         }
     }
 
     fn optimise_weights<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         opA : &'a A,
         b : &A::Observable,
         findim_data : &FindimData<F>,
@@ -232,9 +244,9 @@
         //                 = C sup_{‖x‖_1 ≤ 1} ‖Ax‖_2 = C ‖A‖_{1,2},
         // where C = √m satisfies ‖x‖_1 ≤ C ‖x‖_2. Since we are intested in ‖A_*A‖, no
         // square root is needed when we scale:
-        let inner_τ = inner.τ0 / (findim_data.opAnorm_squared * F::cast_from(μ.len()));
-        let iters = quadratic_unconstrained(inner.method, &Ã, &g̃, self.α(),
-                                            &mut x, inner_τ, iterator);
+        let normest = findim_data.opAnorm_squared * F::cast_from(μ.len());
+        let iters = quadratic_unconstrained(&Ã, &g̃, self.α(), &mut x,
+                                            normest, inner, iterator);
         // Update masses of μ based on solution of finite-dimensional subproblem.
         μ.set_masses_dvector(&x);
 
@@ -245,12 +257,19 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float + ToNalgebraRealField, A, I, S, GA, BTA, const N : usize> RegTermFW<F, A, I, N>
 for RadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F>,
-      I : AlgIteratorFactory<F>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>> {
+where
+    Cube<F, N> : P2Minimise<Loc<F, N>, F>,
+    I : AlgIteratorFactory<F>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    A : FindimQuadraticModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    // FIXME: the following *should not* be needed, they are already implied
+    RNDM<F, N> : Mapping<A::PreadjointCodomain, Codomain = F>,
+    DeltaMeasure<Loc<F, N>, F> : Mapping<A::PreadjointCodomain, Codomain = F>,
+    //A : Mapping<RNDM<F, N>, Codomain = A::Observable>,
+    //A : Mapping<DeltaMeasure<Loc<F, N>, F>, Codomain = A::Observable>,
+{
 
     fn find_insertion(
         &self,
@@ -269,7 +288,7 @@
 
     fn relaxed_insert<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         g : &A::PreadjointCodomain,
         opA : &'a A,
         ξ : Loc<F, N>,
@@ -282,7 +301,7 @@
         let v = if v_ξ.abs() <= α { 0.0 } else { m0 / α * v_ξ };
         let δ = DeltaMeasure { x : ξ, α : v };
         let dp = μ.apply(g) - δ.apply(g);
-        let d = opA.apply(&*μ) - opA.apply(&δ);
+        let d = opA.apply(&*μ) - opA.apply(δ);
         let r = d.norm2_squared();
         let s = if r == 0.0 {
             1.0
@@ -298,18 +317,18 @@
 impl<F : Float + ToNalgebraRealField, A, I, const N : usize> WeightOptim<F, A, I, N>
 for NonnegRadonRegTerm<F>
 where I : AlgIteratorFactory<F>,
-      A : ForwardModel<Loc<F, N>, F> {
+      A : FindimQuadraticModel<Loc<F, N>, F> {
 
     fn prepare_optimise_weights(&self, opA : &A, b : &A::Observable) -> FindimData<F> {
         FindimData{
-            opAnorm_squared : opA.opnorm_bound().powi(2),
+            opAnorm_squared : opA.opnorm_bound(Radon, L2).powi(2),
             m0 : b.norm2_squared() / (2.0 * self.α()),
         }
     }
 
     fn optimise_weights<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         opA : &'a A,
         b : &A::Observable,
         findim_data : &FindimData<F>,
@@ -329,9 +348,9 @@
         //                 = C sup_{‖x‖_1 ≤ 1} ‖Ax‖_2 = C ‖A‖_{1,2},
         // where C = √m satisfies ‖x‖_1 ≤ C ‖x‖_2. Since we are intested in ‖A_*A‖, no
         // square root is needed when we scale:
-        let inner_τ = inner.τ0 / (findim_data.opAnorm_squared * F::cast_from(μ.len()));
-        let iters = quadratic_nonneg(inner.method, &Ã, &g̃, self.α(),
-                                     &mut x, inner_τ, iterator);
+        let normest = findim_data.opAnorm_squared * F::cast_from(μ.len());
+        let iters = quadratic_nonneg(&Ã, &g̃, self.α(), &mut x,
+                                     normest, inner, iterator);
         // Update masses of μ based on solution of finite-dimensional subproblem.
         μ.set_masses_dvector(&x);
 
@@ -342,12 +361,17 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float + ToNalgebraRealField, A, I, S, GA, BTA, const N : usize> RegTermFW<F, A, I, N>
 for NonnegRadonRegTerm<F>
-where Cube<F, N> : P2Minimise<Loc<F, N>, F>,
-      I : AlgIteratorFactory<F>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>> {
+where
+    Cube<F, N> : P2Minimise<Loc<F, N>, F>,
+    I : AlgIteratorFactory<F>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    A : FindimQuadraticModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    // FIXME: the following *should not* be needed, they are already implied
+    RNDM<F, N> : Mapping<A::PreadjointCodomain, Codomain = F>,
+    DeltaMeasure<Loc<F, N>, F> : Mapping<A::PreadjointCodomain, Codomain = F>,
+{
 
     fn find_insertion(
         &self,
@@ -361,7 +385,7 @@
 
     fn relaxed_insert<'a>(
         &self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ : &mut RNDM<F, N>,
         g : &A::PreadjointCodomain,
         opA : &'a A,
         ξ : Loc<F, N>,
@@ -401,28 +425,26 @@
 /// `iterator` is used to iterate the steps of the method, and `plotter` may be used to
 /// save intermediate iteration states as images.
 #[replace_float_literals(F::cast_from(literal))]
-pub fn pointsource_fw_reg<'a, F, I, A, GA, BTA, S, Reg, const N : usize>(
-    opA : &'a A,
+pub fn pointsource_fw_reg<F, I, A, GA, BTA, S, Reg, const N : usize>(
+    opA : &A,
     b : &A::Observable,
     reg : Reg,
     //domain : Cube<F, N>,
     config : &FWConfig<F>,
     iterator : I,
     mut plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
+) -> RNDM<F, N>
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<IterInfo<F, N>>,
       for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
       GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+      A : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
       BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
       Cube<F, N>: P2Minimise<Loc<F, N>, F>,
       PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      RNDM<F, N> : SpikeMerging<F>,
       Reg : RegTermFW<F, A, ValueIteratorFactory<F, AlgIteratorOptions>, N> {
 
     // Set up parameters
@@ -438,26 +460,24 @@
     let mut μ = DiscreteMeasure::new();
     let mut residual = -b;
 
-    let mut inner_iters = 0;
-    let mut this_iters = 0;
-    let mut pruned = 0;
-    let mut merged = 0;
+    // Statistics
+    let full_stats = |residual : &A::Observable,
+                      ν : &RNDM<F, N>,
+                      ε, stats| IterInfo {
+        value : residual.norm2_squared_div2() + reg.apply(ν),
+        n_spikes : ν.len(),
+        ε,
+        .. stats
+    };
+    let mut stats = IterInfo::new();
 
     // Run the algorithm
-    iterator.iterate(|state| {
-        // Update tolerance
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
-        let ε_prev = ε;
-        ε = tolerance.update(ε, state.iteration());
 
         // Calculate smooth part of surrogate model.
-        //
-        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
-        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
-        // the residual and replacing it below before the end of this closure.
-        let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let mut g = -preadjA.apply(r);
+        let mut g = preadjA.apply(residual * (-1.0));
 
         // Find absolute value maximising point
         let (ξ, v_ξ) = reg.find_insertion(&mut g, refinement_tolerance,
@@ -467,108 +487,47 @@
             FWVariant::FullyCorrective => {
                 // No point in optimising the weight here: the finite-dimensional algorithm is fast.
                 μ += DeltaMeasure { x : ξ, α : 0.0 };
+                stats.inserted += 1;
                 config.inner.iterator_options.stop_target(inner_tolerance)
             },
             FWVariant::Relaxed => {
                 // Perform a relaxed initialisation of μ
                 reg.relaxed_insert(&mut μ, &g, opA, ξ, v_ξ, &findim_data);
+                stats.inserted += 1;
                 // The stop_target is only needed for the type system.
                 AlgIteratorOptions{ max_iter : 1, .. config.inner.iterator_options}.stop_target(0.0)
             }
         };
 
-        inner_iters += reg.optimise_weights(&mut μ, opA, b, &findim_data, &config.inner, inner_it);
+        stats.inner_iters += reg.optimise_weights(&mut μ, opA, b, &findim_data,
+                                                  &config.inner, inner_it);
    
         // Merge spikes and update residual for next step and `if_verbose` below.
-        let n_before_merge = μ.len();
-        residual = μ.merge_spikes_fitness(config.merging,
-                                         |μ̃| opA.apply(μ̃) - b,
-                                          A::Observable::norm2_squared);
-        assert!(μ.len() >= n_before_merge);
-        merged += μ.len() - n_before_merge;
-
+        let (r, count) = μ.merge_spikes_fitness(config.merging,
+                                                |μ̃| opA.apply(μ̃) - b,
+                                                A::Observable::norm2_squared);
+        residual = r;
+        stats.merged += count;
 
         // Prune points with zero mass
         let n_before_prune = μ.len();
         μ.prune();
         debug_assert!(μ.len() <= n_before_prune);
-        pruned += n_before_prune - μ.len();
+        stats.pruned += n_before_prune - μ.len();
 
-        this_iters +=1;
+        stats.this_iters += 1;
+        let iter = state.iteration();
 
-        // Give function value if needed
+        // Give statistics if needed
         state.if_verbose(|| {
-            plotter.plot_spikes(
-                format!("iter {} start", state.iteration()), &g,
-                "".to_string(), None::<&A::PreadjointCodomain>,
-                None, &μ
-            );
-            let res = IterInfo {
-                value : residual.norm2_squared_div2() + reg.apply(&μ),
-                n_spikes : μ.len(),
-                inner_iters,
-                this_iters,
-                merged,
-                pruned,
-                ε : ε_prev,
-                postprocessing : None,
-            };
-            inner_iters = 0;
-            this_iters = 0;
-            merged = 0;
-            pruned = 0;
-            res
-        })
-    });
+            plotter.plot_spikes(iter, Some(&g), Option::<&S>::None, &μ);
+            full_stats(&residual, &μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        // Update tolerance
+        ε = tolerance.update(ε, iter);
+    }
 
     // Return final iterate
     μ
 }
-
-//
-// Deprecated interface
-//
-
-#[deprecated(note = "Use `pointsource_fw_reg`")]
-pub fn pointsource_fw<'a, F, I, A, GA, BTA, S, const N : usize>(
-    opA : &'a A,
-    b : &A::Observable,
-    α : F,
-    //domain : Cube<F, N>,
-    config : &FWConfig<F>,
-    iterator : I,
-    plotter : SeqPlotter<F, N>,
-) -> DiscreteMeasure<Loc<F, N>, F>
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<IterInfo<F, N>>,
-      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
-
-      pointsource_fw_reg(opA, b, NonnegRadonRegTerm(α), config, iterator, plotter)
-}
-
-#[deprecated(note = "Use `WeightOptim::optimise_weights`")]
-pub fn optimise_weights<'a, F, A, I, const N : usize>(
-    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-    opA : &'a A,
-    b : &A::Observable,
-    α : F,
-    findim_data : &FindimData<F>,
-    inner : &InnerSettings<F>,
-    iterator : I
-) -> usize
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<F>,
-      A : ForwardModel<Loc<F, N>, F>
-{
-     NonnegRadonRegTerm(α).optimise_weights(μ, opA, b, findim_data, inner, iterator)
-}
--- a/src/kernels.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/kernels.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -24,4 +24,7 @@
 pub use ball_indicator::*;
 mod hat_convolution;
 pub use hat_convolution::*;
+mod linear;
+pub use linear::*;
 
+
--- a/src/kernels/ball_indicator.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/kernels/ball_indicator.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -1,6 +1,6 @@
 
 //! Implementation of the indicator function of a ball with respect to various norms.
-use float_extras::f64::{tgamma as gamma};
+use float_extras::f64::tgamma as gamma;
 use numeric_literals::replace_float_literals;
 use serde::Serialize;
 use alg_tools::types::*;
@@ -14,10 +14,16 @@
     LocalAnalysis,
     GlobalAnalysis,
 };
-use alg_tools::mapping::Apply;
+use alg_tools::mapping::{
+    Mapping,
+    Differential,
+    DifferentiableImpl,
+};
+use alg_tools::instance::Instance;
+use alg_tools::euclidean::StaticEuclidean;
 use alg_tools::maputil::array_init;
 use alg_tools::coefficients::factorial;
-
+use crate::types::*;
 use super::base::*;
 
 /// Representation of the indicator of the ball $𝔹_q = \\{ x ∈ ℝ^N \mid \\|x\\|\_q ≤ r \\}$,
@@ -36,14 +42,17 @@
 
 #[replace_float_literals(C::Type::cast_from(literal))]
 impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
-Apply<&'a Loc<C::Type, N>>
+Mapping<Loc<C::Type, N>>
 for BallIndicator<C, Exponent, N>
-where Loc<F, N> : Norm<F, Exponent> {
-    type Output = C::Type;
+where
+    Loc<F, N> : Norm<F, Exponent>
+{
+    type Codomain = C::Type;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<C::Type, N>>>(&self, x : I) -> Self::Codomain {
         let r = self.r.value();
-        let n = x.norm(self.exponent);
+        let n = x.eval(|x| x.norm(self.exponent));
         if n <= r {
             1.0
         } else {
@@ -52,14 +61,79 @@
     }
 }
 
+impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+DifferentiableImpl<Loc<C::Type, N>>
+for BallIndicator<C, Exponent, N>
+where
+    C : Constant,
+     Loc<F, N> : Norm<F, Exponent>
+{
+    type Derivative = Loc<C::Type, N>;
+
+    #[inline]
+    fn differential_impl<I : Instance<Loc<C::Type, N>>>(&self, _x : I) -> Self::Derivative {
+        Self::Derivative::origin()
+    }
+}
+
 impl<F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
-Apply<Loc<C::Type, N>>
+Lipschitz<L2>
 for BallIndicator<C, Exponent, N>
-where Loc<F, N> : Norm<F, Exponent> {
-    type Output = C::Type;
-    #[inline]
-    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
-        self.apply(&x)
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        None
+    }
+}
+
+impl<'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Lipschitz<L2>
+for Differential<'b, Loc<F, N>, BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        None
+    }
+}
+
+impl<'a, 'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Lipschitz<L2>
+for Differential<'b, Loc<F, N>, &'a BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        None
+    }
+}
+
+
+impl<'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+NormBounded<L2>
+for Differential<'b, Loc<F, N>, BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn norm_bound(&self, _l2 : L2) -> C::Type {
+        F::INFINITY
+    }
+}
+
+impl<'a, 'b, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+NormBounded<L2>
+for Differential<'b, Loc<F, N>, &'a BallIndicator<C, Exponent, N>>
+where C : Constant,
+      Loc<F, N> : Norm<F, Exponent> {
+    type FloatType = C::Type;
+
+    fn norm_bound(&self, _l2 : L2) -> C::Type {
+        F::INFINITY
     }
 }
 
@@ -188,32 +262,21 @@
 
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, F : Float, R, const N : usize> Mapping<Loc<F, N>>
 for AutoConvolution<CubeIndicator<R, N>>
 where R : Constant<Type=F> {
-    type Output = F;
+    type Codomain = F;
 
     #[inline]
-    fn apply(&self, y : &'a Loc<F, N>) -> F {
+    fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F {
         let two_r = 2.0 * self.0.r.value();
         // This is just a product of one-dimensional versions
-        y.iter().map(|&x| {
+        y.cow().iter().map(|&x| {
             0.0.max(two_r - x.abs())
         }).product()
     }
 }
 
-impl<F : Float, R, const N : usize> Apply<Loc<F, N>>
-for AutoConvolution<CubeIndicator<R, N>>
-where R : Constant<Type=F> {
-    type Output = F;
-
-    #[inline]
-    fn apply(&self, y : Loc<F, N>) -> F {
-        self.apply(&y)
-    }
-}
-
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float, R, const N : usize> Support<F, N>
 for AutoConvolution<CubeIndicator<R, N>>
--- a/src/kernels/base.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/kernels/base.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -14,15 +14,22 @@
     GlobalAnalysis,
     Bounded,
 };
-use alg_tools::mapping::Apply;
-use alg_tools::maputil::{array_init, map2};
+use alg_tools::mapping::{
+    Mapping,
+    DifferentiableImpl,
+    DifferentiableMapping,
+    Differential,
+};
+use alg_tools::instance::{Instance, Space};
+use alg_tools::maputil::{array_init, map2, map1_indexed};
 use alg_tools::sets::SetOrd;
 
 use crate::fourier::Fourier;
+use crate::types::*;
 
 /// Representation of the product of two kernels.
 ///
-/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernels typically implement [`Support`] and [`Mapping`].
 ///
 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
 #[derive(Copy,Clone,Serialize,Debug)]
@@ -33,32 +40,94 @@
     pub B
 );
 
-impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
+impl<A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
+for SupportProductFirst<A, B>
+where
+    A : Mapping<Loc<F, N>, Codomain = F>,
+    B : Mapping<Loc<F, N>, Codomain = F>,
+{
+    type Codomain = F;
+
+    #[inline]
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
+        self.0.apply(x.ref_instance()) * self.1.apply(x)
+    }
+}
+
+impl<A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
 for SupportProductFirst<A, B>
-where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
-      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
+where
+    A : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain=Loc<F, N>,
+        Codomain = F
+    >,
+    B : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain=Loc<F, N>,
+        Codomain = F,
+    >
+{
+    type Derivative = Loc<F, N>;
+
     #[inline]
-    fn apply(&self, x : Loc<F, N>) -> Self::Output {
-        self.0.apply(&x) * self.1.apply(&x)
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
+        let xr = x.ref_instance();
+        self.0.differential(xr) * self.1.apply(xr) + self.1.differential(xr) * self.0.apply(x)
     }
 }
 
-impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+impl<A, B, M : Copy, F : Float> Lipschitz<M>
 for SupportProductFirst<A, B>
-where A : Apply<&'a Loc<F, N>, Output=F>,
-      B : Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
+where A : Lipschitz<M, FloatType = F> + Bounded<F>,
+      B : Lipschitz<M, FloatType = F> + Bounded<F> {
+    type FloatType = F;
     #[inline]
-    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
-        self.0.apply(x) * self.1.apply(x)
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        // f(x)g(x) - f(y)g(y) = f(x)[g(x)-g(y)] - [f(y)-f(x)]g(y)
+        let &SupportProductFirst(ref f, ref g) = self;
+        f.lipschitz_factor(m).map(|l| l * g.bounds().uniform())
+         .zip(g.lipschitz_factor(m).map(|l| l * f.bounds().uniform()))
+         .map(|(a, b)| a + b)
     }
 }
 
+impl<'a, A, B, M : Copy, Domain, F : Float> Lipschitz<M>
+for Differential<'a, Domain, SupportProductFirst<A, B>>
+where
+    Domain : Space,
+    A : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
+    B : Clone + DifferentiableMapping<Domain> + Lipschitz<M, FloatType = F> + Bounded<F>,
+    SupportProductFirst<A, B> :  DifferentiableMapping<Domain>,
+    for<'b> A::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>,
+    for<'b> B::Differential<'b> : Lipschitz<M, FloatType = F> + NormBounded<L2, FloatType=F>
+{
+    type FloatType = F;
+    #[inline]
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        // ∇[gf] = f∇g + g∇f
+        // ⟹ ∇[gf](x) - ∇[gf](y) = f(x)∇g(x) + g(x)∇f(x) - f(y)∇g(y) + g(y)∇f(y)
+        //                        = f(x)[∇g(x)-∇g(y)] + g(x)∇f(x) - [f(y)-f(x)]∇g(y) + g(y)∇f(y)
+        //                        = f(x)[∇g(x)-∇g(y)] + g(x)[∇f(x)-∇f(y)]
+        //                          - [f(y)-f(x)]∇g(y) + [g(y)-g(x)]∇f(y)
+        let &SupportProductFirst(ref f, ref g) = self.base_fn();
+        let (df, dg) = (f.diff_ref(), g.diff_ref());
+        [
+            df.lipschitz_factor(m).map(|l| l * g.bounds().uniform()),
+            dg.lipschitz_factor(m).map(|l| l * f.bounds().uniform()),
+            f.lipschitz_factor(m).map(|l| l * dg.norm_bound(L2)),
+            g.lipschitz_factor(m).map(|l| l * df.norm_bound(L2))
+        ].into_iter().sum()
+    }
+}
+
+
 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
 for SupportProductFirst<A, B>
-where A : Support<F, N>,
-      B : Support<F, N> {
+where
+    A : Support<F, N>,
+    B : Support<F, N>
+{
     #[inline]
     fn support_hint(&self) -> Cube<F, N> {
         self.0.support_hint()
@@ -97,7 +166,7 @@
 
 /// Representation of the sum of two kernels
 ///
-/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernels typically implement [`Support`] and [`Mapping`].
 ///
 /// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
 #[derive(Copy,Clone,Serialize,Debug)]
@@ -108,33 +177,48 @@
     pub B
 );
 
-impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, A, B, F : Float, const N : usize> Mapping<Loc<F, N>>
 for SupportSum<A, B>
-where A : Apply<&'a Loc<F, N>, Output=F>,
-      B : Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
+where
+    A : Mapping<Loc<F, N>, Codomain = F>,
+    B : Mapping<Loc<F, N>, Codomain = F>,
+{
+    type Codomain = F;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
-        self.0.apply(x) + self.1.apply(x)
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
+        self.0.apply(x.ref_instance()) + self.1.apply(x)
     }
 }
 
-impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
+impl<'a, A, B, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
 for SupportSum<A, B>
-where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
-      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
-    type Output = F;
+where
+    A : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain = Loc<F, N>
+    >,
+    B : DifferentiableMapping<
+        Loc<F, N>,
+        DerivativeDomain = Loc<F, N>,
+    >
+{
+
+    type Derivative = Loc<F, N>;
+
     #[inline]
-    fn apply(&self, x : Loc<F, N>) -> Self::Output {
-        self.0.apply(&x) + self.1.apply(&x)
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Derivative {
+        self.0.differential(x.ref_instance()) + self.1.differential(x)
     }
 }
 
+
 impl<'a, A, B, F : Float, const N : usize> Support<F, N>
 for SupportSum<A, B>
 where A : Support<F, N>,
       B : Support<F, N>,
       Cube<F, N> : SetOrd {
+
     #[inline]
     fn support_hint(&self) -> Cube<F, N> {
         self.0.support_hint().common(&self.1.support_hint())
@@ -174,9 +258,42 @@
     }
 }
 
+impl<F : Float, M : Copy, A, B> Lipschitz<M> for SupportSum<A, B>
+where A : Lipschitz<M, FloatType = F>,
+      B : Lipschitz<M, FloatType = F> {
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        match (self.0.lipschitz_factor(m), self.1.lipschitz_factor(m)) {
+            (Some(l0), Some(l1)) => Some(l0 + l1),
+            _ => None
+        }
+    }
+}
+
+impl<'b, F : Float, M : Copy, A, B, Domain> Lipschitz<M>
+for Differential<'b, Domain, SupportSum<A, B>>
+where
+    Domain : Space,
+    A : Clone + DifferentiableMapping<Domain, Codomain=F>,
+    B : Clone + DifferentiableMapping<Domain, Codomain=F>,
+    SupportSum<A, B> : DifferentiableMapping<Domain, Codomain=F>,
+    for<'a> A :: Differential<'a> : Lipschitz<M, FloatType = F>,
+    for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
+{
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        let base = self.base_fn();
+        base.0.diff_ref().lipschitz_factor(m)
+            .zip(base.1.diff_ref().lipschitz_factor(m))
+            .map(|(a, b)| a + b)
+    }
+}
+
 /// Representation of the convolution of two kernels.
 ///
-/// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernels typically implement [`Support`]s and [`Mapping`].
 //
 /// Trait implementations have to be on a case-by-case basis.
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
@@ -187,9 +304,46 @@
     pub B
 );
 
+impl<F : Float, M, A, B> Lipschitz<M> for Convolution<A, B>
+where A : Norm<F, L1> ,
+      B : Lipschitz<M, FloatType = F> {
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        // For [f * g](x) = ∫ f(x-y)g(y) dy we have
+        // [f * g](x) - [f * g](z) = ∫ [f(x-y)-f(z-y)]g(y) dy.
+        // Hence |[f * g](x) - [f * g](z)| ≤ ∫ |f(x-y)-f(z-y)|g(y)| dy.
+        //                                 ≤ L|x-z| ∫ |g(y)| dy,
+        // where L is the Lipschitz factor of f.
+        self.1.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
+    }
+}
+
+impl<'b, F : Float, M, A, B, Domain> Lipschitz<M>
+for Differential<'b, Domain, Convolution<A, B>>
+where
+    Domain : Space,
+    A : Clone + Norm<F, L1> ,
+    Convolution<A, B> : DifferentiableMapping<Domain, Codomain=F>,
+    B : Clone + DifferentiableMapping<Domain, Codomain=F>,
+    for<'a> B :: Differential<'a> : Lipschitz<M, FloatType = F>
+{
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        // For [f * g](x) = ∫ f(x-y)g(y) dy we have
+        // ∇[f * g](x) - ∇[f * g](z) = ∫ [∇f(x-y)-∇f(z-y)]g(y) dy.
+        // Hence |∇[f * g](x) - ∇[f * g](z)| ≤ ∫ |∇f(x-y)-∇f(z-y)|g(y)| dy.
+        //                                 ≤ L|x-z| ∫ |g(y)| dy,
+        // where L is the Lipschitz factor of ∇f.
+        let base = self.base_fn();
+        base.1.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
+    }
+}
+
 /// Representation of the autoconvolution of a kernel.
 ///
-/// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+/// The kernel typically implements [`Support`] and [`Mapping`].
 ///
 /// Trait implementations have to be on a case-by-case basis.
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
@@ -198,34 +352,130 @@
     pub A
 );
 
+impl<F : Float, M, C> Lipschitz<M> for AutoConvolution<C>
+where C : Lipschitz<M, FloatType = F> + Norm<F, L1> {
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        self.0.lipschitz_factor(m).map(|l| l * self.0.norm(L1))
+    }
+}
+
+impl<'b, F : Float, M, C, Domain> Lipschitz<M>
+for Differential<'b, Domain, AutoConvolution<C>>
+where
+    Domain : Space,
+    C : Clone + Norm<F, L1> + DifferentiableMapping<Domain, Codomain=F>,
+    AutoConvolution<C> : DifferentiableMapping<Domain, Codomain=F>,
+    for<'a> C :: Differential<'a> : Lipschitz<M, FloatType = F>
+{
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, m : M) -> Option<F> {
+        let base = self.base_fn();
+        base.0.diff_ref().lipschitz_factor(m).map(|l| l * base.0.norm(L1))
+    }
+}
+
+
 /// Representation a multi-dimensional product of a one-dimensional kernel.
 ///
 /// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
-/// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]
+/// The kernel $G$ typically implements [`Support`] and [`Mapping`]
 /// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`].
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+#[allow(dead_code)]
 struct UniformProduct<G, const N : usize>(
     /// The one-dimensional kernel
     G
 );
 
-impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, G, F : Float, const N : usize> Mapping<Loc<F, N>>
 for UniformProduct<G, N>
-where G : Apply<Loc<F, 1>, Output=F> {
-    type Output = F;
+where
+    G : Mapping<Loc<F, 1>, Codomain = F>
+{
+    type Codomain = F;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<F, N>) -> F {
-        x.iter().map(|&y| self.0.apply(Loc([y]))).product()
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> F {
+        x.cow().iter().map(|&y| self.0.apply(Loc([y]))).product()
+    }
+}
+
+
+
+impl<'a, G, F : Float, const N : usize> DifferentiableImpl<Loc<F, N>>
+for UniformProduct<G, N>
+where
+    G : DifferentiableMapping<
+        Loc<F, 1>,
+        DerivativeDomain = F,
+        Codomain = F,
+    >
+{
+    type Derivative = Loc<F, N>;
+
+    #[inline]
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, x0 : I) -> Loc<F, N> {
+        x0.eval(|x| {
+            let vs = x.map(|y| self.0.apply(Loc([y])));
+            product_differential(x, &vs, |y| self.0.differential(Loc([y])))
+        })
     }
 }
 
-impl<G, F : Float, const N : usize> Apply<Loc<F, N>>
-for UniformProduct<G, N>
-where G : Apply<Loc<F, 1>, Output=F> {
-    type Output = F;
-    #[inline]
-    fn apply(&self, x : Loc<F, N>) -> F {
-        x.into_iter().map(|y| self.0.apply(Loc([y]))).product()
+/// Helper function to calulate the differential of $f(x)=∏_{i=1}^N g(x_i)$.
+///
+/// The vector `x` is the location, `vs` consists of the values `g(x_i)`, and
+/// `gd` calculates the derivative `g'`.
+#[inline]
+pub(crate) fn product_differential<F : Float, G : Fn(F) -> F, const N : usize>(
+    x : &Loc<F, N>,
+    vs : &Loc<F, N>,
+    gd : G
+) -> Loc<F, N> {
+    map1_indexed(x, |i, &y| {
+        gd(y) * vs.iter()
+                  .zip(0..)
+                  .filter_map(|(v, j)| (j != i).then_some(*v))
+                  .product()
+    }).into()
+}
+
+/// Helper function to calulate the Lipschitz factor of $∇f$ for $f(x)=∏_{i=1}^N g(x_i)$.
+///
+/// The parameter `bound` is a bound on $|g|_∞$, `lip` is a Lipschitz factor for $g$,
+/// `dbound` is a bound on $|∇g|_∞$, and `dlip` a Lipschitz factor for $∇g$.
+#[inline]
+pub(crate) fn product_differential_lipschitz_factor<F : Float, const N : usize>(
+    bound : F,
+    lip : F,
+    dbound : F,
+    dlip : F
+) -> F {
+    // For arbitrary ψ(x) = ∏_{i=1}^n ψ_i(x_i), we have
+    // ψ(x) - ψ(y) = ∑_i [ψ_i(x_i)-ψ_i(y_i)] ∏_{j ≠ i} ψ_j(x_j)
+    // by a simple recursive argument. In particular, if ψ_i=g for all i, j, we have
+    // |ψ(x) - ψ(y)| ≤ ∑_i L_g M_g^{n-1}|x-y|, where L_g is the Lipschitz factor of g, and
+    // M_g a bound on it.
+    //
+    // We also have in the general case ∇ψ(x) = ∑_i ∇ψ_i(x_i) ∏_{j ≠ i} ψ_j(x_j), whence
+    // using the previous formula for each i with f_i=∇ψ_i and f_j=ψ_j for j ≠ i, we get
+    //  ∇ψ(x) - ∇ψ(y) = ∑_i[ ∇ψ_i(x_i)∏_{j ≠ i} ψ_j(x_j) - ∇ψ_i(y_i)∏_{j ≠ i} ψ_j(y_j)]
+    //                = ∑_i[ [∇ψ_i(x_i) - ∇ψ_j(x_j)] ∏_{j ≠ i}ψ_j(x_j)
+    //                       + [∑_{k ≠ i} [ψ_k(x_k) - ∇ψ_k(x_k)] ∏_{j ≠ i, k}ψ_j(x_j)]∇ψ_i(x_i)].
+    // With $ψ_i=g for all i, j, it follows that
+    // |∇ψ(x) - ∇ψ(y)| ≤ ∑_i L_{∇g} M_g^{n-1} + ∑_{k ≠ i} L_g M_g^{n-2} M_{∇g}
+    //                 = n [L_{∇g} M_g^{n-1} + (n-1) L_g M_g^{n-2} M_{∇g}].
+    //                 = n M_g^{n-2}[L_{∇g} M_g + (n-1) L_g M_{∇g}].
+    if N >= 2 {
+        F::cast_from(N) * bound.powi((N-2) as i32)
+                        * (dlip * bound  + F::cast_from(N-1) * lip * dbound)
+    } else if N==1 {
+        dlip
+    } else {
+        panic!("Invalid dimension")
     }
 }
 
--- a/src/kernels/gaussian.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/kernels/gaussian.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -17,9 +17,15 @@
     Weighted,
     Bounded,
 };
-use alg_tools::mapping::Apply;
+use alg_tools::mapping::{
+    Mapping,
+    Instance,
+    Differential,
+    DifferentiableImpl,
+};
 use alg_tools::maputil::array_init;
 
+use crate::types::*;
 use crate::fourier::Fourier;
 use super::base::*;
 use super::ball_indicator::CubeIndicator;
@@ -58,28 +64,104 @@
 
 
 #[replace_float_literals(S::Type::cast_from(literal))]
-impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for Gaussian<S, N>
-where S : Constant {
-    type Output = S::Type;
+impl<'a, S, const N : usize> Mapping<Loc<S::Type, N>> for Gaussian<S, N>
+where
+    S : Constant
+{
+    type Codomain = S::Type;
+
     // This is not normalised to neither to have value 1 at zero or integral 1
     // (unless the cut-off ε=0).
     #[inline]
-    fn apply(&self, x : &'a Loc<S::Type, N>) -> Self::Output {
-        let d_squared = x.norm2_squared();
+    fn apply<I : Instance<Loc<S::Type, N>>>(&self, x : I) -> Self::Codomain {
+        let d_squared = x.eval(|x| x.norm2_squared());
         let σ2 = self.variance.value();
         let scale = self.scale();
         (-d_squared / (2.0 * σ2)).exp() / scale
     }
 }
 
-impl<S, const N : usize> Apply<Loc<S::Type, N>> for Gaussian<S, N>
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, S, const N : usize> DifferentiableImpl<Loc<S::Type, N>> for Gaussian<S, N>
+where S : Constant {
+    type Derivative = Loc<S::Type, N>;
+
+    #[inline]
+    fn differential_impl<I : Instance<Loc<S::Type, N>>>(&self, x0 : I) -> Self::Derivative {
+        let x = x0.cow();
+        let f = -self.apply(&*x) / self.variance.value();
+        *x * f
+    }
+}
+
+
+// To calculate the the Lipschitz factors, we consider
+// f(t)    = e^{-t²/2}
+// f'(t)   = -t f(t)       which has max at t=1 by f''(t)=0
+// f''(t)  = (t²-1)f(t)    which has max at t=√3 by f'''(t)=0
+// f'''(t) = -(t³-3t)
+// So f has the Lipschitz factor L=f'(1), and f' has the Lipschitz factor L'=f''(√3).
+//
+// Now g(x) = Cf(‖x‖/σ) for a scaling factor C is the Gaussian.
+// Thus ‖g(x)-g(y)‖ = C‖f(‖x‖/σ)-f(‖y‖/σ)‖ ≤ (C/σ)L‖x-y‖,
+// so g has the Lipschitz factor (C/σ)f'(1) = (C/σ)exp(-0.5).
+//
+// Also ∇g(x)= Cx/(σ‖x‖)f'(‖x‖/σ)       (*)
+//            = -(C/σ²)xf(‖x‖/σ)
+//            = -C/σ (x/σ) f(‖x/σ‖)
+// ∇²g(x) = -(C/σ)[Id/σ f(‖x‖/σ) + x ⊗ x/(σ²‖x‖) f'(‖x‖/σ)]
+//        = (C/σ²)[-Id + x ⊗ x/σ²]f(‖x‖/σ).
+// Thus ‖∇²g(x)‖ = (C/σ²)‖-Id + x ⊗ x/σ²‖f(‖x‖/σ), where
+// ‖-Id + x ⊗ x/σ²‖ = ‖[-Id + x ⊗ x/σ²](x/‖x‖)‖ = |-1 + ‖x²/σ^2‖|.
+// This means that  ‖∇²g(x)‖ = (C/σ²)|f''(‖x‖/σ)|, which is maximised with ‖x‖/σ=√3.
+// Hence the Lipschitz factor of ∇g is (C/σ²)f''(√3) = (C/σ²)2e^{-3/2}.
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<S, const N : usize> Lipschitz<L2> for Gaussian<S, N>
 where S : Constant {
-    type Output = S::Type;
-    // This is not normalised to neither to have value 1 at zero or integral 1
-    // (unless the cut-off ε=0).
-    #[inline]
-    fn apply(&self, x : Loc<S::Type, N>) -> Self::Output {
-        self.apply(&x)
+    type FloatType = S::Type;
+    fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> {
+        Some((-0.5).exp() / (self.scale() * self.variance.value().sqrt()))
+    }
+}
+
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, S : Constant, const N : usize> Lipschitz<L2>
+for Differential<'a, Loc<S::Type, N>, Gaussian<S, N>> {
+    type FloatType = S::Type;
+    
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<S::Type> {
+        let g = self.base_fn();
+        let σ2 = g.variance.value();
+        let scale = g.scale();
+        Some(2.0*(-3.0/2.0).exp()/(σ2*scale))
+    }
+}
+
+// From above, norm bounds on the differnential can be calculated as achieved
+// for f' at t=1, i.e., the bound is |f'(1)|.
+// For g then |C/σ f'(1)|.
+// It follows that the norm bounds on the differential are just the Lipschitz
+// factors of the undifferentiated function, given how the latter is calculed above.
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'b, S : Constant, const N : usize> NormBounded<L2>
+for Differential<'b, Loc<S::Type, N>, Gaussian<S, N>> {
+    type FloatType = S::Type;
+    
+    fn norm_bound(&self, _l2 : L2) -> S::Type {
+        self.base_fn().lipschitz_factor(L2).unwrap()
+    }
+}
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'b, 'a, S : Constant, const N : usize> NormBounded<L2>
+for Differential<'b, Loc<S::Type, N>, &'a Gaussian<S, N>> {
+    type FloatType = S::Type;
+    
+    fn norm_bound(&self, _l2 : L2) -> S::Type {
+        self.base_fn().lipschitz_factor(L2).unwrap()
     }
 }
 
@@ -169,19 +251,19 @@
                                                                        Gaussian<S, N>>;
 
 
-/// This implements $χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$
-/// where $a,b>0$ and $f$ is a gaussian kernel on $ℝ^n$.
+/// This implements $g := χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$ where $a,b>0$ and $f$ is
+/// a gaussian kernel on $ℝ^n$. For an expression for $g$, see Lemma 3.9 in the manuscript.
 #[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, C, S, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, F : Float, R, C, S, const N : usize> Mapping<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F>,
       S : Constant<Type=F> {
 
-    type Output = F;
+    type Codomain = F;
 
     #[inline]
-    fn apply(&self, y : &'a Loc<F, N>) -> F {
+    fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F {
         let Convolution(ref ind,
                         SupportProductFirst(ref cut,
                                             ref gaussian)) = self;
@@ -192,7 +274,7 @@
         let c = 0.5; // 1/(σ√(2π) * σ√(π/2) = 1/2
         
         // This is just a product of one-dimensional versions
-        y.product_map(|x| {
+        y.cow().product_map(|x| {
             let c1 = -(a.min(b + x)); //(-a).max(-x-b);
             let c2 = a.min(b - x);
             if c1 >= c2 {
@@ -207,20 +289,143 @@
     }
 }
 
-impl<F : Float, R, C, S, const N : usize> Apply<Loc<F, N>>
+/// This implements the differential of $g := χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$ where $a,b>0$
+/// and $f$ is a gaussian kernel on $ℝ^n$. For an expression for the value of $g$, from which the
+/// derivative readily arises (at points of differentiability), see Lemma 3.9 in the manuscript.
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, S, const N : usize> DifferentiableImpl<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F>,
       S : Constant<Type=F> {
 
-    type Output = F;
+    type Derivative = Loc<F, N>;
 
+    /// Although implemented, this function is not differentiable.
     #[inline]
-    fn apply(&self, y : Loc<F, N>) -> F {
-        self.apply(&y)
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, y0 : I) -> Loc<F, N> {
+        let Convolution(ref ind,
+                        SupportProductFirst(ref cut,
+                                            ref gaussian)) = self;
+        let y = y0.cow();
+        let a = cut.r.value();
+        let b = ind.r.value();
+        let σ = gaussian.variance.value().sqrt();
+        let t = F::SQRT_2 * σ;
+        let c = 0.5; // 1/(σ√(2π) * σ√(π/2) = 1/2
+        let c_mul_erf_scale_div_t = c * F::FRAC_2_SQRT_PI / t;
+        
+        // Calculate the values for all component functions of the
+        // product. This is just the loop from apply above.
+        let unscaled_vs = y.map(|x| {
+            let c1 = -(a.min(b + x)); //(-a).max(-x-b);
+            let c2 = a.min(b - x);
+            if c1 >= c2 {
+                0.0
+            } else {
+                let e1 = F::cast_from(erf((c1 / t).as_()));
+                let e2 = F::cast_from(erf((c2 / t).as_()));
+                debug_assert!(e2 >= e1);
+                c * (e2 - e1)
+            }
+        });
+        // This computes the gradient for each coordinate
+        product_differential(&*y, &unscaled_vs, |x| {
+            let c1 = -(a.min(b + x)); //(-a).max(-x-b);
+            let c2 = a.min(b - x);
+            if c1 >= c2 {
+                0.0
+            } else {
+                // erf'(z) = (2/√π)*exp(-z^2), and we get extra factor 1/(√2*σ) = -1/t
+                // from the chain rule (the minus comes from inside c_1 or c_2, and changes the
+                // order of de2 and de1 in the final calculation).
+                let de1 = if b + x < a {
+                    (-((b+x)/t).powi(2)).exp()
+                } else {
+                    0.0
+                };
+                let de2 = if b - x < a {
+                    (-((b-x)/t).powi(2)).exp()
+                } else {
+                    0.0
+                };
+                c_mul_erf_scale_div_t * (de1 - de2)
+            }
+        })
     }
 }
 
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, S, const N : usize> Lipschitz<L1>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, L1 : L1) -> Option<F> {
+        // To get the product Lipschitz factor, we note that for any ψ_i, we have
+        // ∏_{i=1}^N φ_i(x_i) - ∏_{i=1}^N φ_i(y_i)
+        // = [φ_1(x_1)-φ_1(y_1)] ∏_{i=2}^N φ_i(x_i)
+        //   + φ_1(y_1)[ ∏_{i=2}^N φ_i(x_i) - ∏_{i=2}^N φ_i(y_i)]
+        // = ∑_{j=1}^N [φ_j(x_j)-φ_j(y_j)]∏_{i > j} φ_i(x_i) ∏_{i < j} φ_i(y_i)
+        // Thus
+        // |∏_{i=1}^N φ_i(x_i) - ∏_{i=1}^N φ_i(y_i)|
+        // ≤ ∑_{j=1}^N |φ_j(x_j)-φ_j(y_j)| ∏_{j ≠ i} \max_i |φ_i|
+        //
+        // Thus we need 1D Lipschitz factors, and the maximum for φ = θ * ψ.
+        //
+        // We have
+        // θ * ψ(x) = 0 if c_1(x) ≥ c_2(x)
+        //          = (1/2)[erf(c_2(x)/(√2σ)) - erf(c_1(x)/(√2σ))] if c_1(x) < c_2(x),
+        // where c_1(x) = max{-x-b,-a} = -min{b+x,a} and c_2(x)=min{b-x,a}, C is the Gaussian
+        // normalisation factor, and erf(s) = (2/√π) ∫_0^s e^{-t^2} dt.
+        // Thus, if c_1(x) < c_2(x) and c_1(y) < c_2(y), we have
+        // θ * ψ(x) - θ * ψ(y) = (1/√π)[∫_{c_1(x)/(√2σ)}^{c_1(y)/(√2σ) e^{-t^2} dt
+        //                       - ∫_{c_2(x)/(√2σ)}^{c_2(y)/(√2σ)] e^{-t^2} dt]
+        // Thus
+        // |θ * ψ(x) - θ * ψ(y)| ≤ (1/√π)/(√2σ)(|c_1(x)-c_1(y)|+|c_2(x)-c_2(y)|)
+        //                       ≤ 2(1/√π)/(√2σ)|x-y|
+        //                       ≤ √2/(√πσ)|x-y|.
+        //
+        // For the product we also need the value θ * ψ(0), which is
+        // (1/2)[erf(min{a,b}/(√2σ))-erf(max{-b,-a}/(√2σ)]
+        //  = (1/2)[erf(min{a,b}/(√2σ))-erf(-min{a,b}/(√2σ))]
+        //  = erf(min{a,b}/(√2σ))
+        //
+        // If c_1(x) ≥ c_2(x), then x ∉ [-(a+b), a+b]. If also y is outside that range,
+        // θ * ψ(x) = θ * ψ(y). If only y is in the range [-(a+b), a+b], we can replace
+        // x by -(a+b) or (a+b), either of which is closer to y and still θ * ψ(x)=0.
+        // Thus same calculations as above work for the Lipschitz factor.
+        let Convolution(ref ind,
+                        SupportProductFirst(ref cut,
+                                            ref gaussian)) = self;
+        let a = cut.r.value();
+        let b = ind.r.value();
+        let σ = gaussian.variance.value().sqrt();
+        let π = F::PI;
+        let t = F::SQRT_2 * σ;
+        let l1d = F::SQRT_2 / (π.sqrt() * σ);
+        let e0 = F::cast_from(erf((a.min(b) / t).as_()));
+        Some(l1d * e0.powi(N as i32-1))
+    }
+}
+
+/*
+impl<'a, F : Float, R, C, S, const N : usize> Lipschitz<L2>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+    type FloatType = F;
+    #[inline]
+    fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> {
+        self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt())
+    }
+}
+*/
+
 impl<F : Float, R, C, S, const N : usize>
 Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
 where R : Constant<Type=F>,
--- a/src/kernels/hat.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/kernels/hat.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -14,8 +14,9 @@
     GlobalAnalysis,
     Bounded,
 };
-use alg_tools::mapping::Apply;
-use alg_tools::maputil::{array_init};
+use alg_tools::mapping::{Mapping, Instance};
+use alg_tools::maputil::array_init;
+use crate::types::Lipschitz;
 
 /// Representation of the hat function $f(x)=1-\\|x\\|\_1/ε$ of `width` $ε$ on $ℝ^N$.
 #[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
@@ -25,26 +26,17 @@
 }
 
 #[replace_float_literals(C::Type::cast_from(literal))]
-impl<'a, C : Constant, const N : usize> Apply<&'a Loc<C::Type, N>> for Hat<C, N> {
-    type Output = C::Type;
+impl<'a, C : Constant, const N : usize> Mapping<Loc<C::Type, N>> for Hat<C, N> {
+    type Codomain = C::Type;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<C::Type, N>>>(&self, x : I) -> Self::Codomain {
         let ε = self.width.value();
-        0.0.max(1.0-x.norm(L1)/ε)
+        0.0.max(1.0-x.cow().norm(L1)/ε)
     }
 }
 
 #[replace_float_literals(C::Type::cast_from(literal))]
-impl<C : Constant, const N : usize> Apply<Loc<C::Type, N>> for Hat<C, N> {
-    type Output = C::Type;
-    #[inline]
-    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
-        self.apply(&x)
-    }
-}
-
-
-#[replace_float_literals(C::Type::cast_from(literal))]
 impl<'a, C : Constant, const N : usize> Support<C::Type, N> for Hat<C, N> {
     #[inline]
     fn support_hint(&self) -> Cube<C::Type,N> {
@@ -94,6 +86,26 @@
     }
 }
 
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Lipschitz<L1> for Hat<C, N> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l1 : L1) -> Option<C::Type> {
+        Some(1.0/self.width.value())
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Lipschitz<L2> for Hat<C, N> {
+    type FloatType = C::Type;
+
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<C::Type> {
+        self.lipschitz_factor(L1).map(|l1|
+            <L2 as Dominated<C::Type, L1, Loc<C::Type,N>>>::from_norm(&L2, l1, L1)
+        )
+    }
+}
+
 impl<'a, C : Constant, const N : usize>
 LocalAnalysis<C::Type, Bounds<C::Type>, N>
 for Hat<C, N> {
--- a/src/kernels/hat_convolution.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/kernels/hat_convolution.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -14,9 +14,15 @@
     GlobalAnalysis,
     Bounded,
 };
-use alg_tools::mapping::Apply;
+use alg_tools::mapping::{
+    Mapping,
+    Instance,
+    DifferentiableImpl,
+    Differential,
+};
 use alg_tools::maputil::array_init;
 
+use crate::types::Lipschitz;
 use super::base::*;
 use super::ball_indicator::CubeIndicator;
 
@@ -38,6 +44,31 @@
 ///         -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\
 ///     \end{cases}
 /// $$
+// Hence
+// $$
+//     (h\*h)'(y) =
+//     \begin{cases}
+//         2 (y+1)^2 & -1<y\leq -\frac{1}{2}, \\\\
+//         -6 y^2-4 y & -\frac{1}{2}<y\leq 0, \\\\
+//         6 y^2-4 y & 0<y<\frac{1}{2}, \\\\
+//         -2 (y-1)^2 & \frac{1}{2}\leq y<1. \\\\
+//     \end{cases}
+// $$
+// as well as
+// $$
+//     (h\*h)''(y) =
+//     \begin{cases}
+//         4 (y+1) & -1<y\leq -\frac{1}{2}, \\\\
+//         -12 y-4 & -\frac{1}{2}<y\leq 0, \\\\
+//         12 y-4 & 0<y<\frac{1}{2}, \\\\
+//         -4 (y-1) & \frac{1}{2}\leq y<1. \\\\
+//     \end{cases}
+// $$
+// This is maximised at y=±1/2 with value 2, and minimised at y=0 with value -4.
+// Now observe that
+// $$
+//     [∇f(x\_1, …, x\_n)]_j = \frac{4}{σ} (h\*h)'(x\_j/σ) \prod\_{j ≠ i} \frac{4}{σ} (h\*h)(x\_i/σ)
+// $$
 #[derive(Copy,Clone,Debug,Serialize,Eq)]
 pub struct HatConv<S : Constant, const N : usize> {
     /// The parameter $σ$ of the kernel.
@@ -60,24 +91,85 @@
     }
 }
 
-impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for HatConv<S, N>
+impl<'a, S, const N : usize> Mapping<Loc<S::Type, N>> for HatConv<S, N>
 where S : Constant {
-    type Output = S::Type;
+    type Codomain = S::Type;
+
     #[inline]
-    fn apply(&self, y : &'a Loc<S::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<S::Type, N>>>(&self, y : I) -> Self::Codomain {
         let σ = self.radius();
-        y.product_map(|x| {
+        y.cow().product_map(|x| {
             self.value_1d_σ1(x  / σ) / σ
         })
     }
 }
 
-impl<'a, S, const N : usize> Apply<Loc<S::Type, N>> for HatConv<S, N>
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<S, const N : usize> Lipschitz<L1> for HatConv<S, N>
+where S : Constant {
+    type FloatType = S::Type;
+    #[inline]
+    fn lipschitz_factor(&self, L1 : L1) -> Option<Self::FloatType> {
+        // For any ψ_i, we have
+        // ∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)
+        // = [ψ_1(x_1)-ψ_1(y_1)] ∏_{i=2}^N ψ_i(x_i)
+        //   + ψ_1(y_1)[ ∏_{i=2}^N ψ_i(x_i) - ∏_{i=2}^N ψ_i(y_i)]
+        // = ∑_{j=1}^N [ψ_j(x_j)-ψ_j(y_j)]∏_{i > j} ψ_i(x_i) ∏_{i < j} ψ_i(y_i)
+        // Thus
+        // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)|
+        // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_j |ψ_j|
+        let σ = self.radius();
+        let l1d = self.lipschitz_1d_σ1() / (σ*σ);
+        let m1d = self.value_1d_σ1(0.0) / σ;
+        Some(l1d * m1d.powi(N as i32 - 1))
+    }
+}
+
+impl<S, const N : usize> Lipschitz<L2> for HatConv<S, N>
+where S : Constant {
+    type FloatType = S::Type;
+    #[inline]
+    fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> {
+        self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt())
+    }
+}
+
+
+impl<'a, S, const N : usize> DifferentiableImpl<Loc<S::Type, N>> for HatConv<S, N>
 where S : Constant {
-    type Output = S::Type;
+    type Derivative = Loc<S::Type, N>;
+
     #[inline]
-    fn apply(&self, y : Loc<S::Type, N>) -> Self::Output {
-        self.apply(&y)
+    fn differential_impl<I : Instance<Loc<S::Type, N>>>(&self, y0 : I) -> Self::Derivative {
+        let y = y0.cow();
+        let σ = self.radius();
+        let σ2 = σ * σ;
+        let vs = y.map(|x| {
+            self.value_1d_σ1(x  / σ) / σ
+        });
+        product_differential(&*y, &vs, |x| {
+            self.diff_1d_σ1(x  / σ) / σ2
+        })
+    }
+}
+
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, F : Float, S, const N : usize> Lipschitz<L2>
+for Differential<'a, Loc<F, N>, HatConv<S, N>>
+where S : Constant<Type=F> {
+    type FloatType = F;
+
+    #[inline]
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<F> {
+        let h = self.base_fn();
+        let σ = h.radius();
+        Some(product_differential_lipschitz_factor::<F, N>(
+            h.value_1d_σ1(0.0) / σ,
+            h.lipschitz_1d_σ1() / (σ*σ),
+            h.maxabsdiff_1d_σ1() / (σ*σ),
+            h.lipschitz_diff_1d_σ1() / (σ*σ),
+        ))
     }
 }
 
@@ -97,6 +189,54 @@
             (4.0/3.0) + 8.0 * y * y * (y - 1.0)
         }
     }
+
+    /// Computes the differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn diff_1d_σ1(&self, x : F) -> F {
+        let y = x.abs();
+        if y >= 1.0 {
+            0.0
+        } else if y > 0.5 {
+            - 8.0 * (y - 1.0).powi(2)
+        } else /* 0 ≤ y ≤ 0.5 */ {
+            (24.0 * y - 16.0) * y
+        }
+    }
+
+    /// Computes the Lipschitz factor of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn lipschitz_1d_σ1(&self) -> F {
+        // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis
+        2.0
+    }
+
+    /// Computes the maximum absolute differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn maxabsdiff_1d_σ1(&self) -> F {
+        // Maximal absolute differential achieved at ±0.5 by diff_1d_σ1 analysis
+        2.0
+    }
+
+    /// Computes the second differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    #[allow(dead_code)]
+    fn diff2_1d_σ1(&self, x : F) -> F {
+        let y = x.abs();
+        if y >= 1.0 {
+            0.0
+        } else if y > 0.5 {
+            - 16.0 * (y - 1.0)
+        } else /* 0 ≤ y ≤ 0.5 */ {
+            48.0 * y - 16.0
+        }
+    }
+
+    /// Computes the differential of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn lipschitz_diff_1d_σ1(&self) -> F {
+        // Maximal absolute second differential achieved at 0 by diff2_1d_σ1 analysis
+        16.0
+    }
 }
 
 impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N>
@@ -159,21 +299,21 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<'a, F : Float, R, C, const N : usize> Apply<&'a Loc<F, N>>
+impl<'a, F : Float, R, C, const N : usize> Mapping<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F> {
 
-    type Output = F;
+    type Codomain = F;
 
     #[inline]
-    fn apply(&self, y : &'a Loc<F, N>) -> F {
+    fn apply<I : Instance<Loc<F, N>>>(&self, y : I) -> F {
         let Convolution(ref ind, ref hatconv) = self;
         let β = ind.r.value();
         let σ = hatconv.radius();
 
         // This is just a product of one-dimensional versions
-        y.product_map(|x| {
+        y.cow().product_map(|x| {
             // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution
             // we have
             // $$
@@ -188,24 +328,66 @@
     }
 }
 
-impl<'a, F : Float, R, C, const N : usize> Apply<Loc<F, N>>
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, const N : usize> DifferentiableImpl<Loc<F, N>>
 for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F> {
 
-    type Output = F;
+    type Derivative = Loc<F, N>;
 
     #[inline]
-    fn apply(&self, y : Loc<F, N>) -> F {
-        self.apply(&y)
+    fn differential_impl<I : Instance<Loc<F, N>>>(&self, y0 : I) -> Loc<F, N> {
+        let y = y0.cow();
+        let Convolution(ref ind, ref hatconv) = self;
+        let β = ind.r.value();
+        let σ = hatconv.radius();
+        let σ2 = σ * σ;
+
+        let vs = y.map(|x| {
+            self.value_1d_σ1(x / σ, β / σ)
+        });
+        product_differential(&*y, &vs, |x| {
+            self.diff_1d_σ1(x  / σ, β / σ) / σ2
+        })
     }
 }
 
 
+/// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
+/// If $b > d$, add $g()$ to the result.
+#[inline]
+#[replace_float_literals(F::cast_from(literal))]
+fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F,
+                g : impl Fn() -> F) -> F {
+    if b < c {
+        0.0
+    } else if b <= d {
+        if a <= c {
+            f(b) - f(c)
+        } else {
+            f(b) - f(a)
+        }
+    } else /* b > d */ {
+        g() + if a <= c {
+            f(d) - f(c)
+        } else if a < d {
+            f(d) - f(a)
+        } else {
+            0.0
+        }
+    }
+}
+
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F> {
+      
+    /// Calculates the value of the 1D hat convolution further convolved by a interval indicator.
+    /// As both functions are piecewise polynomials, this is implemented by explicit integral over
+    /// all subintervals of polynomiality of the cube indicator, using easily formed
+    /// antiderivatives.
     #[inline]
     pub fn value_1d_σ1(&self, x : F, β : F) -> F {
         // The integration interval
@@ -218,34 +400,10 @@
             y * y
         }
         
-        /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
-        /// If $b > d$, add $g()$ to the result.
-        #[inline]
-        fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F,
-                       g : impl Fn() -> F) -> F {
-            if b < c {
-                0.0
-            } else if b <= d {
-                if a <= c {
-                    f(b) - f(c)
-                } else {
-                    f(b) - f(a)
-                }
-            } else /* b > d */ {
-                g() + if a <= c {
-                    f(d) - f(c)
-                } else if a < d {
-                    f(d) - f(a)
-                } else {
-                    0.0
-                }
-            }
-        }
-
         // Observe the factor 1/6 at the front from the antiderivatives below.
         // The factor 4 is from normalisation of the original function.
         (4.0/6.0) * i(a, b, -1.0, -0.5,
-                // (2/3) (y+1)^3  on  -1 < y ≤ - 1/2
+                // (2/3) (y+1)^3  on  -1 < y ≤ -1/2
                 // The antiderivative is  (2/12)(y+1)^4 = (1/6)(y+1)^4
                 |y| pow4(y+1.0),
                 || i(a, b, -0.5, 0.0,
@@ -266,8 +424,53 @@
                 )
         )
     }
+
+    /// Calculates the derivative of the 1D hat convolution further convolved by a interval
+    /// indicator. The implementation is similar to [`Self::value_1d_σ1`], using the fact that
+    /// $(θ * ψ)' = θ * ψ'$.
+    #[inline]
+    pub fn diff_1d_σ1(&self, x : F, β : F) -> F {
+        // The integration interval
+        let a = x - β;
+        let b = x + β;
+
+        // The factor 4 is from normalisation of the original function.
+        4.0 * i(a, b, -1.0, -0.5,
+                // (2/3) (y+1)^3  on  -1 < y ≤ -1/2
+                |y| (2.0/3.0) * (y + 1.0).powi(3),
+                || i(a, b, -0.5, 0.0,
+                    // -2 y^3 - 2 y^2 + 1/3  on  -1/2 < y ≤ 0
+                    |y| -2.0*(y + 1.0) * y * y + (1.0/3.0),
+                    || i(a, b, 0.0, 0.5,
+                            // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
+                            |y| 2.0*(y - 1.0) * y * y + (1.0/3.0),
+                            || i(a, b, 0.5, 1.0,
+                                // -(2/3) (y-1)^3  on  1/2 < y ≤ 1
+                                |y| -(2.0/3.0) * (y - 1.0).powi(3),
+                                || 0.0
+                            )
+                    )
+                )
+        )
+    }
 }
 
+/*
+impl<'a, F : Float, R, C, const N : usize> Lipschitz<L2>
+for Differential<Loc<F, N>, Convolution<CubeIndicator<R, N>, HatConv<C, N>>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+
+    type FloatType = F;
+
+    #[inline]
+    fn lipschitz_factor(&self, _l2 : L2) -> Option<F> {
+        dbg!("unimplemented");
+        None
+    }
+}
+*/
+
 impl<F : Float, R, C, const N : usize>
 Convolution<CubeIndicator<R, N>, HatConv<C, N>>
 where R : Constant<Type=F>,
@@ -409,7 +612,7 @@
 #[cfg(test)]
 mod tests {
     use alg_tools::lingrid::linspace;
-    use alg_tools::mapping::Apply;
+    use alg_tools::mapping::Mapping;
     use alg_tools::norms::Linfinity;
     use alg_tools::loc::Loc;
     use crate::kernels::{BallIndicator, CubeIndicator, Convolution};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/linear.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,94 @@
+//! Implementation of the linear function
+
+use numeric_literals::replace_float_literals;
+use serde::Serialize;
+use alg_tools::types::*;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+    Bounded,
+};
+use alg_tools::mapping::{Mapping, Instance};
+use alg_tools::maputil::array_init;
+use alg_tools::euclidean::Euclidean;
+
+/// Representation of the hat function $f(x)=1-\\|x\\|\_1/ε$ of `width` $ε$ on $ℝ^N$.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct Linear<F : Float, const N : usize> {
+    /// The parameter $ε>0$.
+    pub v : Loc<F, N>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float, const N : usize> Mapping<Loc<F, N>> for Linear<F, N> {
+    type Codomain = F;
+
+    #[inline]
+    fn apply<I : Instance<Loc<F, N>>>(&self, x : I) -> Self::Codomain {
+        x.eval(|x| self.v.dot(x))
+    }
+}
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, const N : usize> Support<F, N> for Linear<F, N> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F,N> {
+        array_init(|| [F::NEG_INFINITY, F::INFINITY]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, _x : &Loc<F,N>) -> bool {
+        true
+    }
+    
+    /*fn fully_in_support(&self, _cube : &Cube<F,N>) -> bool {
+        todo!("Not implemented, but not used at the moment")
+    }*/
+
+    #[inline]
+    fn bisection_hint(&self, _cube : &Cube<F,N>) -> [Option<F>; N] {
+        [None; N]
+    }
+}
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, const N : usize>
+GlobalAnalysis<F, Bounds<F>>
+for Linear<F, N> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        Bounds(F::NEG_INFINITY, F::INFINITY)
+    }
+}
+
+impl<'a, F : Float, const N : usize>
+LocalAnalysis<F, Bounds<F>, N>
+for Linear<F, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        let (lower, upper) = cube.iter_corners()
+                                 .map(|x| self.apply(x))
+                                 .fold((F::INFINITY, F::NEG_INFINITY), |(lower, upper), v| {
+                                      (lower.min(v), upper.max(v))
+                                 });
+        Bounds(lower, upper)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, const N : usize>
+Norm<F, Linfinity>
+for Linear<F, N> {
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> F {
+        self.bounds().upper()
+    }
+}
+
--- a/src/kernels/mollifier.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/kernels/mollifier.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -2,7 +2,7 @@
 //! Implementation of the standard mollifier
 
 use rgsl::hypergeometric::hyperg_U;
-use float_extras::f64::{tgamma as gamma};
+use float_extras::f64::tgamma as gamma;
 use numeric_literals::replace_float_literals;
 use serde::Serialize;
 use alg_tools::types::*;
@@ -17,7 +17,7 @@
     LocalAnalysis,
     GlobalAnalysis
 };
-use alg_tools::mapping::Apply;
+use alg_tools::mapping::{Mapping, Instance};
 use alg_tools::maputil::array_init;
 
 /// Reresentation of the (unnormalised) standard mollifier.
@@ -36,13 +36,14 @@
 }
 
 #[replace_float_literals(C::Type::cast_from(literal))]
-impl<'a, C : Constant, const N : usize> Apply<&'a Loc<C::Type, N>> for Mollifier<C, N> {
-    type Output = C::Type;
+impl<C : Constant, const N : usize> Mapping<Loc<C::Type, N>> for Mollifier<C, N> {
+    type Codomain = C::Type;
+
     #[inline]
-    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+    fn apply<I : Instance<Loc<C::Type, N>>>(&self, x : I) -> Self::Codomain {
         let ε = self.width.value();
         let ε2 = ε*ε;
-        let n2 = x.norm2_squared();
+        let n2 = x.eval(|x| x.norm2_squared());
         if n2 < ε2 {
             (n2 / (n2 - ε2)).exp()
         } else {
@@ -51,13 +52,6 @@
     }
 }
 
-impl<C : Constant, const N : usize> Apply<Loc<C::Type, N>> for Mollifier<C, N> {
-    type Output = C::Type;
-    #[inline]
-    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
-        self.apply(&x)
-    }
-}
 
 impl<'a, C : Constant, const N : usize> Support<C::Type, N> for Mollifier<C, N> {
     #[inline]
--- a/src/main.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/main.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -10,12 +10,13 @@
 // Linear operators may be written e.g. as `opA`, to keep the capital letters of mathematical
 // convention while referring to the type (trait) of the operator as `A`.
 #![allow(non_snake_case)]
-// We need the drain filter for inertial prune.
-#![feature(drain_filter)]
+// Need to create parse errors
+#![feature(dec2flt)]
 
 use clap::Parser;
 use serde::{Serialize, Deserialize};
 use serde_json;
+use serde_with::skip_serializing_none;
 use itertools::Itertools;
 use std::num::NonZeroUsize;
 
@@ -30,11 +31,17 @@
 pub mod kernels;
 pub mod seminorms;
 pub mod forward_model;
+pub mod preadjoint_helper;
 pub mod plot;
 pub mod subproblem;
 pub mod tolerance;
 pub mod regularisation;
+pub mod dataterm;
+pub mod prox_penalty;
 pub mod fb;
+pub mod sliding_fb;
+pub mod sliding_pdps;
+pub mod forward_pdps;
 pub mod frank_wolfe;
 pub mod pdps;
 pub mod run;
@@ -49,12 +56,12 @@
     AlgorithmConfig,
 };
 use experiments::DefaultExperiment;
-use measures::merging::SpikeMergingMethod;
 use DefaultExperiment::*;
 use DefaultAlgorithm::*;
 
 /// Command line parameters
-#[derive(Parser, Debug, Serialize)]
+#[skip_serializing_none]
+#[derive(Parser, Debug, Serialize, Default, Clone)]
 #[clap(
     about = env!("CARGO_PKG_DESCRIPTION"),
     author = env!("CARGO_PKG_AUTHORS"),
@@ -89,7 +96,7 @@
     /// Not all algorithms are available for all the experiments.
     /// In particular, only PDPS is available for the experiments with L¹ data term.
     #[arg(value_enum, value_name = "ALGORITHM", long, short = 'a',
-           default_values_t = [FB, FISTA, PDPS, FW, FWRelax])]
+           default_values_t = [FB, PDPS, SlidingFB, FW, RadonFB])]
     algorithm : Vec<DefaultAlgorithm>,
 
     /// Saved algorithm configration(s) to use on the experiments
@@ -112,6 +119,10 @@
     /// Number of threads. Overrides the maximum number.
     num_threads : Option<usize>,
 
+    #[arg(long, default_value_t = false)]
+    /// Load saved value ranges (if exists) to do partial update.
+    load_valuerange : bool,
+
     #[clap(flatten, next_help_heading = "Experiment overrides")]
     /// Experiment setup overrides
     experiment_overrides : ExperimentOverrides<float>,
@@ -122,7 +133,8 @@
 }
 
 /// Command line experiment setup overrides
-#[derive(Parser, Debug, Serialize, Deserialize)]
+#[skip_serializing_none]
+#[derive(Parser, Debug, Serialize, Deserialize, Default, Clone)]
 pub struct ExperimentOverrides<F : ClapFloat> {
     #[arg(long)]
     /// Regularisation parameter override.
@@ -145,7 +157,8 @@
 }
 
 /// Command line algorithm parametrisation overrides
-#[derive(Parser, Debug, Serialize, Deserialize)]
+#[skip_serializing_none]
+#[derive(Parser, Debug, Serialize, Deserialize, Default, Clone)]
 pub struct AlgorithmOverrides<F : ClapFloat> {
     #[arg(long, value_names = &["COUNT", "EACH"])]
     /// Override bootstrap insertion iterations for --algorithm.
@@ -162,21 +175,44 @@
     tau0 : Option<F>,
 
     #[arg(long, requires = "algorithm")]
+    /// Second primal step length parameter override for SlidingPDPS.
+    ///
+    /// Only use if running just a single algorithm, as different algorithms have different
+    /// regularisation parameters.
+    sigmap0 : Option<F>,
+
+    #[arg(long, requires = "algorithm")]
     /// Dual step length parameter override for --algorithm.
     ///
     /// Only use if running just a single algorithm, as different algorithms have different
     /// regularisation parameters. Only affects PDPS.
     sigma0 : Option<F>,
 
+    #[arg(long)]
+    /// Normalised transport step length for sliding methods.
+    theta0 : Option<F>,
+
+    #[arg(long)]
+    /// A posteriori transport tolerance multiplier (C_pos)
+    transport_tolerance_pos : Option<F>,
+
+    #[arg(long)]
+    /// Transport adaptation factor. Must be in (0, 1).
+    transport_adaptation : Option<F>,
+
+    #[arg(long)]
+    /// Minimal step length parameter for sliding methods.
+    tau0_min : Option<F>,
+
     #[arg(value_enum, long)]
     /// PDPS acceleration, when available.
     acceleration : Option<pdps::Acceleration>,
 
-    #[arg(long)]
-    /// Perform postprocess weight optimisation for saved iterations
-    ///
-    /// Only affects FB, FISTA, and PDPS.
-    postprocessing : Option<bool>,
+    // #[arg(long)]
+    // /// Perform postprocess weight optimisation for saved iterations
+    // ///
+    // /// Only affects FB, FISTA, and PDPS.
+    // postprocessing : Option<bool>,
 
     #[arg(value_name = "n", long)]
     /// Merging frequency, if merging enabled (every n iterations)
@@ -184,18 +220,26 @@
     /// Only affects FB, FISTA, and PDPS.
     merge_every : Option<usize>,
 
-    #[arg(value_enum, long)]//, value_parser = SpikeMergingMethod::<float>::value_parser())]
-    /// Merging strategy
-    ///
-    /// Either the string "none", or a radius value for heuristic merging.
-    merging : Option<SpikeMergingMethod<F>>,
+    #[arg(long)]
+    /// Enable merging (default: determined by algorithm)
+    merge : Option<bool>,
+
+    #[arg(long)]
+    /// Merging radius (default: determined by experiment)
+    merge_radius : Option<F>,
 
-    #[arg(value_enum, long)]//, value_parser = SpikeMergingMethod::<float>::value_parser())]
-    /// Final merging strategy
-    ///
-    /// Either the string "none", or a radius value for heuristic merging.
-    /// Only affects FB, FISTA, and PDPS.
-    final_merging : Option<SpikeMergingMethod<F>>,
+    #[arg(long)]
+    /// Interpolate when merging (default : determined by algorithm)
+    merge_interp : Option<bool>,
+
+    #[arg(long)]
+    /// Enable final merging (default: determined by algorithm)
+    final_merging : Option<bool>,
+
+    #[arg(long)]
+    /// Enable fitness-based merging for relevant FB-type methods.
+    /// This has worse convergence guarantees that merging based on optimality conditions.
+    fitness_merging : Option<bool>,
 
     #[arg(long, value_names = &["ε", "θ", "p"])]
     /// Set the tolerance to ε_k = ε/(1+θk)^p
@@ -230,9 +274,15 @@
     for experiment_shorthand in cli.experiments.iter().unique() {
         let experiment = experiment_shorthand.get_experiment(&cli.experiment_overrides).unwrap();
         let mut algs : Vec<Named<AlgorithmConfig<float>>>
-            = cli.algorithm.iter()
-                            .map(|alg| experiment.algorithm_defaults(*alg, &cli.algoritm_overrides))
-                            .collect();
+            = cli.algorithm
+                 .iter()
+                 .map(|alg| {
+                    let cfg = alg.default_config()
+                                 .cli_override(&experiment.algorithm_overrides(*alg))
+                                 .cli_override(&cli.algoritm_overrides);
+                    alg.to_named(cfg)
+                 })
+                 .collect();
         for filename in cli.saved_algorithm.iter() {
             let f = std::fs::File::open(filename).unwrap();
             let alg = serde_json::from_reader(f).unwrap();
--- a/src/measures.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/measures.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -7,3 +7,4 @@
 mod discrete;
 pub use discrete::*;
 pub mod merging;
+
--- a/src/measures/base.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/measures/base.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -16,3 +16,6 @@
     type Domain;
 }
 
+/// Decomposition of measures
+pub struct MeasureDecomp;
+
--- a/src/measures/delta.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/measures/delta.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -7,8 +7,9 @@
 use crate::types::*;
 use std::ops::{Div, Mul, DivAssign, MulAssign, Neg};
 use serde::ser::{Serialize, Serializer, SerializeStruct};
-use alg_tools::norms::{Norm, Dist};
-use alg_tools::linops::{Apply, Linear};
+use alg_tools::norms::Norm;
+use alg_tools::linops::{Mapping, Linear};
+use alg_tools::instance::{Instance, Space};
 
 /// Representation of a delta measure.
 ///
@@ -50,43 +51,50 @@
 }
 
 
-impl<Domain : PartialEq, F : Float> Measure<F> for DeltaMeasure<Domain, F> {
+impl<Domain, F : Float> Measure<F> for DeltaMeasure<Domain, F> {
     type Domain = Domain;
 }
 
-impl<Domain : PartialEq, F : Float> Norm<F, Radon> for DeltaMeasure<Domain, F> {
+impl<Domain, F : Float> Norm<F, Radon> for DeltaMeasure<Domain, F> {
     #[inline]
     fn norm(&self, _ : Radon) -> F {
         self.α.abs()
     }
 }
 
-impl<Domain : PartialEq, F : Float> Dist<F, Radon> for DeltaMeasure<Domain, F> {
+// impl<Domain : PartialEq, F : Float> Dist<F, Radon> for DeltaMeasure<Domain, F> {
+//     #[inline]
+//     fn dist(&self, other : &Self, _ : Radon) -> F {
+//         if self.x == other. x {
+//             (self.α - other.α).abs()
+//         } else {
+//             self.α.abs() + other.α.abs()
+//         }
+//     }
+// }
+
+impl<Domain, G, F : Num> Mapping<G> for DeltaMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{
+    type Codomain = G::Codomain;
+
     #[inline]
-    fn dist(&self, other : &Self, _ : Radon) -> F {
-        if self.x == other. x {
-            (self.α - other.α).abs()
-        } else {
-            self.α.abs() + other.α.abs()
-        }
+    fn apply<I : Instance<G>>(&self, g : I) -> Self::Codomain {
+        g.eval(|g̃| g̃.apply(&self.x) * self.α)
     }
 }
 
-impl<'b, Domain, G, F : Num, V : Mul<F, Output=V>> Apply<G> for DeltaMeasure<Domain, F>
-where G: for<'a> Apply<&'a Domain, Output = V>,
-      V : Mul<F> {
-    type Output = V;
-
-    #[inline]
-    fn apply(&self, g : G) -> Self::Output {
-        g.apply(&self.x) * self.α
-    }
-}
-
-impl<Domain, G, F : Num, V : Mul<F, Output=V>> Linear<G> for DeltaMeasure<Domain, F>
-where G: for<'a> Apply<&'a Domain, Output = V> {
-    type Codomain = V;
-}
+impl<Domain, G, F : Num> Linear<G> for DeltaMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{ }
 
 // /// Partial blanket implementation of [`DeltaMeasure`] as a linear functional of [`Mapping`]s.
 // /// A full blanket implementation is not possible due to annoying Rust limitations: only [`Apply`]
@@ -141,12 +149,13 @@
     }
 }
 
-/*impl<F : Num> From<(F, F)> for DeltaMeasure<Loc<F, 1>, F> {
+impl<'a, Domain : Clone, F : Num> From<&'a DeltaMeasure<Domain, F>> for DeltaMeasure<Domain, F> {
     #[inline]
-    fn from((x, α) : (F, F)) -> Self {
-        DeltaMeasure{x: Loc([x]), α: α}
+    fn from(d : &'a DeltaMeasure<Domain, F>) -> Self {
+        d.clone()
     }
-}*/
+}
+
 
 impl<Domain, F : Num> DeltaMeasure<Domain, F> {
     /// Set the mass of the spike.
@@ -186,6 +195,26 @@
     }
 }
 
+impl<Domain, F : Num> IntoIterator for DeltaMeasure<Domain, F> {
+    type Item =  Self;
+    type IntoIter =  std::iter::Once<Self>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        std::iter::once(self)
+    }
+}
+
+impl<'a, Domain, F : Num> IntoIterator for &'a DeltaMeasure<Domain, F> {
+    type Item =  Self;
+    type IntoIter =  std::iter::Once<Self>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        std::iter::once(self)
+    }
+}
+
 
 macro_rules! make_delta_scalarop_rhs {
     ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
--- a/src/measures/discrete.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/measures/discrete.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -11,9 +11,11 @@
 
 use alg_tools::norms::Norm;
 use alg_tools::tabledump::TableDump;
-use alg_tools::linops::{Apply, Linear};
+use alg_tools::linops::{Mapping, Linear};
 use alg_tools::iter::{MapF,Mappable};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::collection::Collection;
+use alg_tools::instance::{Instance, Decomposition, MyCow, EitherDecomp, Space};
 
 use crate::types::*;
 use super::base::*;
@@ -29,6 +31,8 @@
     pub(super) spikes : Vec<DeltaMeasure<Domain, F>>,
 }
 
+pub type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F, N>, F>;
+
 /// Iterator over the [`DeltaMeasure`] spikes of a [`DiscreteMeasure`].
 pub type SpikeIter<'a, Domain, F> = std::slice::Iter<'a, DeltaMeasure<Domain, F>>;
 
@@ -59,6 +63,20 @@
         self.spikes.len()
     }
 
+    /// Replace with the zero measure.
+    #[inline]
+    pub fn clear(&mut self) {
+        self.spikes.clear()
+    }
+
+    /// Remove `i`:th spike, not maintaining order.
+    ///
+    /// Panics if indiex is out of bounds.
+    #[inline]
+    pub fn swap_remove(&mut self, i : usize) -> DeltaMeasure<Domain, F>{
+        self.spikes.swap_remove(i)
+    }
+
     /// Iterate over (references to) the [`DeltaMeasure`] spikes in this measure
     #[inline]
     pub fn iter_spikes(&self) -> SpikeIter<'_, Domain, F> {
@@ -95,6 +113,13 @@
         self.spikes.iter_mut().zip(iter).for_each(|(δ, α)| δ.set_mass(α));
     }
 
+    /// Update the locations of all the spikes to those produced by an iterator.
+    #[inline]
+    pub fn set_locations<'a, I : Iterator<Item=&'a Domain>>(&mut self, iter : I) 
+    where Domain : 'static + Clone {
+        self.spikes.iter_mut().zip(iter.cloned()).for_each(|(δ, α)| δ.set_location(α));
+    }
+
     // /// Map the masses of all the spikes using a function and an iterator
     // #[inline]
     // pub fn zipmap_masses<
@@ -107,50 +132,162 @@
     /// Prune all spikes with zero mass.
     #[inline]
     pub fn prune(&mut self) {
-        self.spikes.retain(|δ| δ.α != F::ZERO);
+        self.prune_by(|δ| δ.α != F::ZERO);
+    }
+
+    /// Prune spikes by the predicate `g`.
+    #[inline]
+    pub fn prune_by<G : FnMut(&DeltaMeasure<Domain, F>) -> bool>(&mut self, g : G) {
+        self.spikes.retain(g);
+    }
+
+    /// Add the spikes produced by `iter` to this measure.
+    #[inline]
+    pub fn extend<I : Iterator<Item=DeltaMeasure<Domain, F>>>(
+        &mut self,
+        iter : I
+    ) {
+        self.spikes.extend(iter);
+    }
+
+    /// Add a spike to the measure
+    #[inline]
+    pub fn push(&mut self, δ : DeltaMeasure<Domain, F>) {
+        self.spikes.push(δ);
+    }
+
+    /// Iterate over triples of masses and locations of two discrete measures, which are assumed
+    /// to have equal locations of same spike indices.
+    pub fn both_matching<'a>(&'a self, other : &'a DiscreteMeasure<Domain, F>) ->
+      impl Iterator<Item=(F, F, &'a Domain)> {
+        let m = self.len().max(other.len());
+        self.iter_spikes().map(Some).chain(std::iter::repeat(None))
+            .zip(other.iter_spikes().map(Some).chain(std::iter::repeat(None)))
+            .take(m)
+            .map(|(oδ, orδ)| {
+                match (oδ, orδ) {
+                    (Some(δ), Some(rδ)) => (δ.α, rδ.α, &δ.x), // Assumed δ.x=rδ.x
+                    (Some(δ), None)     => (δ.α, F::ZERO,  &δ.x),
+                    (None, Some(rδ))    => (F::ZERO, rδ.α, &rδ.x),
+                    (None, None)        => panic!("This cannot happen!"),
+                }
+            })
+    }
+
+    /// Subtract `other` from `self`, assuming equal locations of same spike indices
+    pub fn sub_matching(&self, other : &DiscreteMeasure<Domain, F>) -> DiscreteMeasure<Domain, F>
+    where Domain : Clone {
+        self.both_matching(other)
+            .map(|(α, β, x)| (x.clone(), α - β))
+            .collect()
+    }
+
+    /// Add `other` to `self`, assuming equal locations of same spike indices
+    pub fn add_matching(&self, other : &DiscreteMeasure<Domain, F>) -> DiscreteMeasure<Domain, F>
+    where Domain : Clone {
+        self.both_matching(other)
+            .map(|(α, β, x)| (x.clone(), α + β))
+            .collect()
+    }
+
+    /// Calculate the Radon-norm distance of `self` to `other`,
+    /// assuming equal locations of same spike indices.
+    pub fn dist_matching(&self, other : &DiscreteMeasure<Domain, F>) -> F where F : Float {
+        self.both_matching(other)
+            .map(|(α, β, _)| (α-β).abs())
+            .sum()
+    }
+}
+
+impl<Domain, F : Num> IntoIterator for DiscreteMeasure<Domain, F> {
+    type Item =  DeltaMeasure<Domain, F>;
+    type IntoIter = std::vec::IntoIter<DeltaMeasure<Domain, F>>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        self.spikes.into_iter()
+    }
+}
+
+impl<'a, Domain, F : Num> IntoIterator for &'a DiscreteMeasure<Domain, F> {
+    type Item =  &'a DeltaMeasure<Domain, F>;
+    type IntoIter =  SpikeIter<'a, Domain, F>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        self.spikes.iter()
+    }
+}
+
+impl<Domain, F : Num> Sum<DeltaMeasure<Domain, F>> for DiscreteMeasure<Domain, F>  {
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = DeltaMeasure<Domain, F>>
+    {
+        Self::from_iter(iter)
+    }
+}
+
+impl<'a, Domain : Clone, F : Num> Sum<&'a DeltaMeasure<Domain, F>>
+    for DiscreteMeasure<Domain, F>
+{
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = &'a DeltaMeasure<Domain, F>>
+    {
+        Self::from_iter(iter.cloned())
+    }
+}
+
+impl<Domain, F : Num> Sum<DiscreteMeasure<Domain, F>> for DiscreteMeasure<Domain, F>  {
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = DiscreteMeasure<Domain, F>>
+    {
+        Self::from_iter(iter.map(|μ| μ.into_iter()).flatten())
+    }
+}
+
+impl<'a, Domain : Clone, F : Num> Sum<&'a DiscreteMeasure<Domain, F>>
+    for DiscreteMeasure<Domain, F>
+{
+    // Required method
+    fn sum<I>(iter: I) -> Self
+    where
+        I : Iterator<Item = &'a DiscreteMeasure<Domain, F>>
+    {
+        Self::from_iter(iter.map(|μ| μ.iter_spikes()).flatten().cloned())
     }
 }
 
 impl<Domain : Clone, F : Float> DiscreteMeasure<Domain, F> {
     /// Computes `μ1 ← θ * μ1 - ζ * μ2`, pruning entries where both `μ1` (`self`) and `μ2` have
-    // zero weight. `μ2` will contain copy of pruned original `μ1` without arithmetic performed.
-    /// **This expects `self` and `μ2` to have matching coordinates in each index**.
+    // zero weight. `μ2` will contain a pruned copy of pruned original `μ1` without arithmetic
+    /// performed. **This expects `self` and `μ2` to have matching coordinates in each index**.
     // `μ2` can be than `self`, but not longer.
     pub fn pruning_sub(&mut self, θ : F, ζ : F, μ2 : &mut Self) {
-        let mut μ2_get = 0;
-        let mut μ2_insert = 0;
-        self.spikes.drain_filter(|&mut DeltaMeasure{ α : ref mut α_ref, ref x }| {
-            // Get weight of spike in μ2, zero if out of bounds.
-            let β = μ2.spikes.get(μ2_get).map_or(F::ZERO, DeltaMeasure::get_mass);
-            μ2_get += 1;
-
-            if *α_ref == F::ZERO && β == F::ZERO {
-                // Prune
-                true
+        for δ in &self[μ2.len()..] {
+            μ2.push(DeltaMeasure{ x : δ.x.clone(), α : F::ZERO});
+        }
+        debug_assert_eq!(self.len(), μ2.len());
+        let mut dest = 0;
+        for i in 0..self.len() {
+            let α = self[i].α;
+            let α_new = θ * α - ζ * μ2[i].α;
+            if dest < i {
+                μ2[dest] = DeltaMeasure{ x : self[i].x.clone(), α };
+                self[dest] = DeltaMeasure{ x : self[i].x.clone(), α : α_new };
             } else {
-                // Save self weight
-                let α = *α_ref;
-                // Modify self
-                *α_ref = θ * α - ζ * β;
-                // Make copy of old self weight in μ2
-                let δ = DeltaMeasure{ α, x : x.clone() };
-                match μ2.spikes.get_mut(μ2_insert) {
-                    Some(replace) => {
-                        *replace = δ;
-                    },
-                    None => {
-                        debug_assert_eq!(μ2.len(), μ2_insert);
-                        μ2.spikes.push(δ);
-                    },
-                }
-                μ2_insert += 1;
-                // Keep
-                false
+                μ2[i].α = α;
+                self[i].α = α_new;
             }
-        });
-        // Truncate μ2 to same length as self.
-        μ2.spikes.truncate(μ2_insert);
-        debug_assert_eq!(μ2.len(), self.len());
+            dest += 1;
+        }
+        self.spikes.truncate(dest);
+        μ2.spikes.truncate(dest);
     }
 }
 
@@ -174,23 +311,61 @@
     pub fn set_masses_dvector(&mut self, x : &DVector<F::MixedType>) {
         self.set_masses(x.iter().map(|&α| F::from_nalgebra_mixed(α)));
     }
+
+    // /// Extracts the masses of the spikes as a [`Vec`].
+    // pub fn masses_vec(&self) -> Vec<F::MixedType> {
+    //     self.iter_masses()
+    //         .map(|α| α.to_nalgebra_mixed())
+    //         .collect()
+    // }
+
+    // /// Sets the masses of the spikes from the values of a [`Vec`].
+    // pub fn set_masses_vec(&mut self, x : &Vec<F::MixedType>) {
+    //     self.set_masses(x.iter().map(|&α| F::from_nalgebra_mixed(α)));
+    // }
 }
 
-impl<Domain, F :Num> Index<usize> for DiscreteMeasure<Domain, F> {
-    type Output = DeltaMeasure<Domain, F>;
+// impl<Domain, F :Num> Index<usize> for DiscreteMeasure<Domain, F> {
+//     type Output = DeltaMeasure<Domain, F>;
+//     #[inline]
+//     fn index(&self, i : usize) -> &Self::Output {
+//         self.spikes.index(i)
+//     }
+// }
+
+// impl<Domain, F :Num> IndexMut<usize> for DiscreteMeasure<Domain, F> {
+//     #[inline]
+//     fn index_mut(&mut self, i : usize) -> &mut Self::Output {
+//         self.spikes.index_mut(i)
+//     }
+// }
+
+impl<
+    Domain,
+    F : Num,
+    I : std::slice::SliceIndex<[DeltaMeasure<Domain, F>]>
+> Index<I>
+for DiscreteMeasure<Domain, F> {
+    type Output = <I as std::slice::SliceIndex<[DeltaMeasure<Domain, F>]>>::Output;
     #[inline]
-    fn index(&self, i : usize) -> &Self::Output {
+    fn index(&self, i : I) -> &Self::Output {
         self.spikes.index(i)
     }
 }
 
-impl<Domain, F :Num> IndexMut<usize> for DiscreteMeasure<Domain, F> {
+impl<
+    Domain,
+    F : Num,
+    I : std::slice::SliceIndex<[DeltaMeasure<Domain, F>]>
+> IndexMut<I>
+for DiscreteMeasure<Domain, F> {
     #[inline]
-    fn index_mut(&mut self, i : usize) -> &mut Self::Output {
+    fn index_mut(&mut self, i : I) -> &mut Self::Output {
         self.spikes.index_mut(i)
     }
 }
 
+
 impl<Domain, F : Num, D : Into<DeltaMeasure<Domain, F>>, const K : usize> From<[D; K]>
 for DiscreteMeasure<Domain, F> {
     #[inline]
@@ -199,6 +374,45 @@
     }
 }
 
+impl<Domain, F : Num> From<Vec<DeltaMeasure<Domain, F>>>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from(spikes : Vec<DeltaMeasure<Domain, F>>) -> Self {
+        DiscreteMeasure{ spikes }
+    }
+}
+
+impl<'a, Domain, F : Num, D> From<&'a [D]>
+for DiscreteMeasure<Domain, F>
+where &'a D : Into<DeltaMeasure<Domain, F>> {
+    #[inline]
+    fn from(list : &'a [D]) -> Self {
+        list.into_iter().map(|d| d.into()).collect()
+    }
+}
+
+
+impl<Domain, F : Num> From<DeltaMeasure<Domain, F>>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from(δ : DeltaMeasure<Domain, F>) -> Self {
+        DiscreteMeasure{
+            spikes : vec!(δ)
+        }
+    }
+}
+
+impl<'a, Domain : Clone, F : Num> From<&'a DeltaMeasure<Domain, F>>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from(δ : &'a DeltaMeasure<Domain, F>) -> Self {
+        DiscreteMeasure{
+            spikes : vec!(δ.clone())
+        }
+    }
+}
+
+
 impl<Domain, F : Num, D : Into<DeltaMeasure<Domain, F>>> FromIterator<D>
 for DiscreteMeasure<Domain, F> {
     #[inline]
@@ -258,19 +472,28 @@
     }
 }
 
-impl<Domain, G, F : Num, Y : Sum + Mul<F, Output=Y>> Apply<G> for DiscreteMeasure<Domain, F>
-where G: for<'a> Apply<&'a Domain, Output = Y> {
-    type Output = Y;
+impl<Domain, G, F : Num> Mapping<G> for DiscreteMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Sum + Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain, Codomain=F> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{
+    type Codomain = G::Codomain;
+
     #[inline]
-    fn apply(&self, g : G) -> Y {
-        self.spikes.iter().map(|m| g.apply(&m.x) * m.α).sum()
+    fn apply<I : Instance<G>>(&self, g : I) -> Self::Codomain {
+        g.eval(|g| self.spikes.iter().map(|m| g.apply(&m.x) * m.α).sum())
     }
 }
 
-impl<Domain, G, F : Num, Y : Sum + Mul<F, Output=Y>> Linear<G> for DiscreteMeasure<Domain, F>
-where G : for<'a> Apply<&'a Domain, Output = Y> {
-    type Codomain = Y;
-}
+impl<Domain, G, F : Num> Linear<G> for DiscreteMeasure<Domain, F>
+where
+    Domain : Space,
+    G::Codomain : Sum + Mul<F, Output=G::Codomain>,
+    G : Mapping<Domain, Codomain=F> + Clone + Space,
+    for<'b> &'b Domain : Instance<Domain>,
+{ }
 
 
 /// Helper trait for constructing arithmetic operations for combinations
@@ -278,6 +501,7 @@
 trait Lift<F : Num, Domain> {
     type Producer : Iterator<Item=DeltaMeasure<Domain, F>>;
 
+    #[allow(dead_code)]
     /// Lifts `self` into a [`DiscreteMeasure`].
     fn lift(self) -> DiscreteMeasure<Domain, F>;
 
@@ -574,3 +798,217 @@
 
 make_discrete_scalarop_lhs!(Mul, mul; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
 make_discrete_scalarop_lhs!(Div, div; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
+
+impl<F : Num, Domain> Collection for DiscreteMeasure<Domain, F> {
+    type Element = DeltaMeasure<Domain, F>;
+    type RefsIter<'a> = std::slice::Iter<'a, Self::Element> where Self : 'a;
+
+    #[inline]
+    fn iter_refs(&self) -> Self::RefsIter<'_> {
+        self.iter_spikes()
+    }
+}
+
+impl<Domain : Clone, F : Num> Space for DiscreteMeasure<Domain, F> {
+    type Decomp = MeasureDecomp;
+}
+
+pub type SpikeSlice<'b, Domain, F> = &'b [DeltaMeasure<Domain, F>];
+
+pub type EitherSlice<'b, Domain, F> = EitherDecomp<
+    Vec<DeltaMeasure<Domain, F>>,
+    SpikeSlice<'b, Domain, F>
+>;
+
+impl<F : Num, Domain : Clone> Decomposition<DiscreteMeasure<Domain, F>> for MeasureDecomp {
+    type Decomposition<'b> = EitherSlice<'b, Domain, F> where DiscreteMeasure<Domain, F> : 'b;
+    type Reference<'b> = SpikeSlice<'b, Domain, F> where DiscreteMeasure<Domain, F> : 'b;
+
+    /// Left the lightweight reference type into a full decomposition type.
+    fn lift<'b>(r : Self::Reference<'b>) -> Self::Decomposition<'b> {
+        EitherDecomp::Borrowed(r)
+    }
+}
+
+impl<F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for DiscreteMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Owned(self.spikes)
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        self.spikes.as_slice()
+    }
+
+    fn cow<'b>(self) -> MyCow<'b, DiscreteMeasure<Domain, F>> where Self : 'b {
+        MyCow::Owned(self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a DiscreteMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(self.spikes.as_slice())
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        self.spikes.as_slice()
+    }
+
+    fn cow<'b>(self) -> MyCow<'b, DiscreteMeasure<Domain, F>> where Self : 'b {
+        MyCow::Borrowed(self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.clone()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for EitherSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        self
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        match self {
+            EitherDecomp::Owned(v) => v.as_slice(),
+            EitherDecomp::Borrowed(s) => s,
+        }
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        match self {
+            EitherDecomp::Owned(v) => v.into(),
+            EitherDecomp::Borrowed(s) => s.into(),
+        }
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a EitherSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        match self {
+            EitherDecomp::Owned(v) => EitherDecomp::Borrowed(v.as_slice()),
+            EitherDecomp::Borrowed(s) => EitherDecomp::Borrowed(s),
+        }
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        match self {
+            EitherDecomp::Owned(v) => v.as_slice(),
+            EitherDecomp::Borrowed(s) => s,
+        }
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        match self {
+            EitherDecomp::Owned(v) => v.as_slice(),
+            EitherDecomp::Borrowed(s) => s
+        }.into()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for SpikeSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(self)
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        self
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.into()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a SpikeSlice<'a, Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(*self)
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        *self
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        (*self).into()
+    }
+}
+
+impl<F : Num, Domain : Clone > Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for DeltaMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Owned(vec![self])
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        std::slice::from_ref(self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.into()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Instance<DiscreteMeasure<Domain, F>, MeasureDecomp>
+for &'a DeltaMeasure<Domain, F>
+{
+    fn decompose<'b>(self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Decomposition<'b>
+    where Self : 'b, DiscreteMeasure<Domain, F> : 'b {
+        EitherDecomp::Borrowed(std::slice::from_ref(self))
+    }
+  
+    fn ref_instance(&self)
+        -> <MeasureDecomp as Decomposition<DiscreteMeasure<Domain, F>>>::Reference<'_>
+    {
+        std::slice::from_ref(*self)
+    }
+
+    fn own(self) -> DiscreteMeasure<Domain, F> {
+        self.into()
+    }
+}
--- a/src/measures/merging.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/measures/merging.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -7,60 +7,35 @@
 */
 
 use numeric_literals::replace_float_literals;
+use serde::{Deserialize, Serialize};
 use std::cmp::Ordering;
-use serde::{Serialize, Deserialize};
 //use clap::builder::{PossibleValuesParser, PossibleValue};
 use alg_tools::nanleast::NaNLeast;
 
-use crate::types::*;
 use super::delta::*;
 use super::discrete::*;
+use crate::types::*;
 
 /// Spike merging heuristic selection
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[allow(dead_code)]
-pub enum SpikeMergingMethod<F> {
-    /// Try to merge spikes within a given radius of eachother
-    HeuristicRadius(F),
-    /// No merging
-    None,
-}
-
-// impl<F : Float> SpikeMergingMethod<F> {
-//     /// This is for [`clap`] to display command line help.
-//     pub fn value_parser() -> PossibleValuesParser {
-//         PossibleValuesParser::new([
-//             PossibleValue::new("none").help("No merging"),
-//             PossibleValue::new("<radius>").help("Heuristic merging within indicated radius")
-//         ])
-//     }
-// }
-
-impl<F : ClapFloat> std::fmt::Display for SpikeMergingMethod<F> {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
-        match self {
-            Self::None => write!(f, "none"),
-            Self::HeuristicRadius(r) => std::fmt::Display::fmt(r, f),
-        }
-    }
-}
-
-impl<F : ClapFloat> std::str::FromStr for SpikeMergingMethod<F> {
-    type Err = F::Err;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        if s == "none" {
-            Ok(Self::None)
-        } else {
-            Ok(Self::HeuristicRadius(F::from_str(s)?))
-        }
-    }
+pub struct SpikeMergingMethod<F> {
+    // Merging radius
+    pub(crate) radius: F,
+    // Enabled
+    pub(crate) enabled: bool,
+    // Interpolate merged points
+    pub(crate) interp: bool,
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> Default for SpikeMergingMethod<F> {
+impl<F: Float> Default for SpikeMergingMethod<F> {
     fn default() -> Self {
-        SpikeMergingMethod::HeuristicRadius(0.02)
+        SpikeMergingMethod {
+            radius: 0.01,
+            enabled: false,
+            interp: true,
+        }
     }
 }
 
@@ -75,13 +50,18 @@
     /// an arbitrary value. This method will return that value for the *last* accepted merge, or
     /// [`None`] if no merge was accepted.
     ///
-    /// This method is stable with respect to spike locations:  on merge, the weight of existing
-    /// spikes is set to zero, and a new one inserted at the end of the spike vector.
-    fn merge_spikes<G, V>(&mut self, method : SpikeMergingMethod<F>, accept : G) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V> {
-        match method {
-            SpikeMergingMethod::HeuristicRadius(ρ) => self.do_merge_spikes_radius(ρ, accept),
-            SpikeMergingMethod::None => None,
+    /// This method is stable with respect to spike locations:  on merge, the weights of existing
+    /// removed spikes is set to zero, new ones inserted at the end of the spike vector.
+    /// They merge may also be performed by increasing the weights of the existing spikes,
+    /// without inserting new spikes.
+    fn merge_spikes<G>(&mut self, method: SpikeMergingMethod<F>, accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
+        if method.enabled {
+            self.do_merge_spikes_radius(method.radius, method.interp, accept)
+        } else {
+            0
         }
     }
 
@@ -90,35 +70,37 @@
     /// Calls [`SpikeMerging::merge_spikes`] with `accept` constructed from the composition of
     /// `value` and `fitness`, compared to initial fitness. Returns the last return value of `value`
     // for a merge  accepted by `fitness`. If no merge was accepted, `value` applied to the initial
-    /// `self` is returned.
+    /// `self` is returned. also the number of merges is returned;
     fn merge_spikes_fitness<G, H, V, O>(
         &mut self,
-        method : SpikeMergingMethod<F>,
-        value : G,
-        fitness : H
-    ) -> V
-    where G : Fn(&'_ Self) -> V,
-          H : Fn(&'_ V) -> O,
-          O : PartialOrd {
-        let initial_res = value(self);
-        let initial_fitness = fitness(&initial_res);
-        self.merge_spikes(method, |μ| {
-            let res = value(μ);
-            (fitness(&res) <= initial_fitness).then_some(res)
-        }).unwrap_or(initial_res)
+        method: SpikeMergingMethod<F>,
+        value: G,
+        fitness: H,
+    ) -> (V, usize)
+    where
+        G: Fn(&'_ Self) -> V,
+        H: Fn(&'_ V) -> O,
+        O: PartialOrd,
+    {
+        let mut res = value(self);
+        let initial_fitness = fitness(&res);
+        let count = self.merge_spikes(method, |μ| {
+            res = value(μ);
+            fitness(&res) <= initial_fitness
+        });
+        (res, count)
     }
 
     /// Attempt to merge spikes that are within radius $ρ$ of each other (unspecified norm).
     ///
-    /// This method implements [`SpikeMerging::merge_spikes`] for
-    /// [`SpikeMergingMethod::HeuristicRadius`]. The closure `accept` and the return value are
-    /// as for that method.
-    fn do_merge_spikes_radius<G, V>(&mut self, ρ : F, accept : G) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V>;
+    /// This method implements [`SpikeMerging::merge_spikes`].
+    fn do_merge_spikes_radius<G>(&mut self, ρ: F, interp: bool, accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool;
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float, const N : usize>  DiscreteMeasure<Loc<F, N>, F> {
+impl<F: Float, const N: usize> DiscreteMeasure<Loc<F, N>, F> {
     /// Attempts to merge spikes with indices `i` and `j`.
     ///
     /// This assumes that the weights of the two spikes have already been checked not to be zero.
@@ -126,78 +108,72 @@
     /// The parameter `res` points to the current “result” for [`SpikeMerging::merge_spikes`].
     /// If the merge is accepted by `accept` returning a [`Some`], `res` will be replaced by its
     /// return value.
-    fn attempt_merge<G, V>(
+    ///
+    /// Returns the index of `self.spikes` storing the new spike.
+    fn attempt_merge<G>(
         &mut self,
-        res : &mut Option<V>,
-        i : usize,
-        j : usize,
-        accept : &G
-    ) -> bool
-    where G : Fn(&'_ Self) -> Option<V> {
-        let &DeltaMeasure{ x : xi, α : αi } = &self.spikes[i];
-        let &DeltaMeasure{ x : xj, α : αj } = &self.spikes[j];
+        i: usize,
+        j: usize,
+        interp: bool,
+        accept: &mut G,
+    ) -> Option<usize>
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
+        let &DeltaMeasure { x: xi, α: αi } = &self.spikes[i];
+        let &DeltaMeasure { x: xj, α: αj } = &self.spikes[j];
 
-        // Merge inplace
-        self.spikes[i].α = 0.0;
-        self.spikes[j].α = 0.0;
-        //self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi + xj)/2.0 });
-        self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi * αi + xj * αj) / (αi + αj) });
-        match accept(self) {
-            some@Some(..) => {
-                // Merge accepted, update our return value
-                *res = some;
-                // On next iteration process the newly merged spike.
-                //indices[k+1] = self.spikes.len() - 1;
-                true
-            },
-            None => {
+        if interp {
+            // Merge inplace
+            self.spikes[i].α = 0.0;
+            self.spikes[j].α = 0.0;
+            let αia = αi.abs();
+            let αja = αj.abs();
+            self.spikes.push(DeltaMeasure {
+                α: αi + αj,
+                x: (xi * αia + xj * αja) / (αia + αja),
+            });
+            if accept(self) {
+                Some(self.spikes.len() - 1)
+            } else {
                 // Merge not accepted, restore modification
                 self.spikes[i].α = αi;
                 self.spikes[j].α = αj;
                 self.spikes.pop();
-                false
+                None
+            }
+        } else {
+            // Attempt merge inplace, first combination
+            self.spikes[i].α = αi + αj;
+            self.spikes[j].α = 0.0;
+            if accept(self) {
+                // Merge accepted
+                Some(i)
+            } else {
+                // Attempt merge inplace, second combination
+                self.spikes[i].α = 0.0;
+                self.spikes[j].α = αi + αj;
+                if accept(self) {
+                    // Merge accepted
+                    Some(j)
+                } else {
+                    // Merge not accepted, restore modification
+                    self.spikes[i].α = αi;
+                    self.spikes[j].α = αj;
+                    None
+                }
             }
         }
     }
-
-    /*
-    /// Attempts to merge spikes with indices i and j, acceptance through a delta.
-    fn attempt_merge_change<G, V>(
-        &mut self,
-        res : &mut Option<V>,
-        i : usize,
-        j : usize,
-        accept_change : &G
-    ) -> bool
-    where G : Fn(&'_ Self) -> Option<V> {
-        let &DeltaMeasure{ x : xi, α : αi } = &self.spikes[i];
-        let &DeltaMeasure{ x : xj, α : αj } = &self.spikes[j];
-        let δ = DeltaMeasure{ α : αi + αj, x : (xi + xj)/2.0 };
-        let λ = [-self.spikes[i], -self.spikes[j], δ.clone()].into();
-
-        match accept_change(&λ) {
-            some@Some(..) => {
-                // Merge accepted, update our return value
-                *res = some;
-                self.spikes[i].α = 0.0;
-                self.spikes[j].α = 0.0;
-                self.spikes.push(δ);
-                true
-            },
-            None => {
-                false
-            }
-        }
-    }*/
-
 }
 
 /// Sorts a vector of indices into `slice` by `compare`.
 ///
 /// The closure `compare` operators on references to elements of `slice`.
 /// Returns the sorted vector of indices into `slice`.
-pub fn sort_indices_by<V, F>(slice : &[V], mut compare : F) -> Vec<usize>
-where F : FnMut(&V, &V) -> Ordering
+pub fn sort_indices_by<V, F>(slice: &[V], mut compare: F) -> Vec<usize>
+where
+    F: FnMut(&V, &V) -> Ordering,
 {
     let mut indices = Vec::from_iter(0..slice.len());
     indices.sort_by(|&i, &j| compare(&slice[i], &slice[j]));
@@ -205,14 +181,11 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 1>, F> {
-
-    fn do_merge_spikes_radius<G, V>(
-        &mut self,
-        ρ : F,
-        accept : G
-    ) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V> {
+impl<F: Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 1>, F> {
+    fn do_merge_spikes_radius<G>(&mut self, ρ: F, interp: bool, mut accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
         // Sort by coordinate into an indexing array.
         let mut indices = sort_indices_by(&self.spikes, |&δ1, &δ2| {
             let &Loc([x1]) = &δ1.x;
@@ -222,34 +195,43 @@
         });
 
         // Initialise result
-        let mut res = None;
+        let mut count = 0;
 
         // Scan consecutive pairs and merge if close enough and accepted by `accept`.
         if indices.len() == 0 {
-            return res
+            return count;
         }
-        for k in 0..(indices.len()-1) {
+        for k in 0..(indices.len() - 1) {
             let i = indices[k];
-            let j = indices[k+1];
-            let &DeltaMeasure{ x : Loc([xi]), α : αi } = &self.spikes[i];
-            let &DeltaMeasure{ x : Loc([xj]), α : αj } = &self.spikes[j];
+            let j = indices[k + 1];
+            let &DeltaMeasure {
+                x: Loc([xi]),
+                α: αi,
+            } = &self.spikes[i];
+            let &DeltaMeasure {
+                x: Loc([xj]),
+                α: αj,
+            } = &self.spikes[j];
             debug_assert!(xi <= xj);
             // If close enough, attempt merging
             if αi != 0.0 && αj != 0.0 && xj <= xi + ρ {
-                if self.attempt_merge(&mut res, i, j, &accept) {
-                    indices[k+1] = self.spikes.len() - 1;
+                if let Some(l) = self.attempt_merge(i, j, interp, &mut accept) {
+                    // For this to work (the debug_assert! to not trigger above), the new
+                    // coordinate produced by attempt_merge has to be at most xj.
+                    indices[k + 1] = l;
+                    count += 1
                 }
             }
         }
 
-        res
+        count
     }
 }
 
 /// Orders `δ1` and `δ1` according to the first coordinate.
-fn compare_first_coordinate<F : Float>(
-    δ1 : &DeltaMeasure<Loc<F, 2>, F>,
-    δ2 : &DeltaMeasure<Loc<F, 2>, F>
+fn compare_first_coordinate<F: Float>(
+    δ1: &DeltaMeasure<Loc<F, 2>, F>,
+    δ2: &DeltaMeasure<Loc<F, 2>, F>,
 ) -> Ordering {
     let &Loc([x11, ..]) = &δ1.x;
     let &Loc([x21, ..]) = &δ2.x;
@@ -258,28 +240,32 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
-impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 2>, F> {
-
-    fn do_merge_spikes_radius<G, V>(&mut self, ρ : F, accept : G) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V> {
+impl<F: Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 2>, F> {
+    fn do_merge_spikes_radius<G>(&mut self, ρ: F, interp: bool, mut accept: G) -> usize
+    where
+        G: FnMut(&'_ Self) -> bool,
+    {
         // Sort by first coordinate into an indexing array.
         let mut indices = sort_indices_by(&self.spikes, compare_first_coordinate);
 
         // Initialise result
-        let mut res = None;
+        let mut count = 0;
         let mut start_scan_2nd = 0;
 
         // Scan in order
         if indices.len() == 0 {
-            return res
+            return count;
         }
-        for k in 0..indices.len()-1 {
+        for k in 0..indices.len() - 1 {
             let i = indices[k];
-            let &DeltaMeasure{ x : Loc([xi1, xi2]), α : αi } = &self[i];
+            let &DeltaMeasure {
+                x: Loc([xi1, xi2]),
+                α: αi,
+            } = &self[i];
 
             if αi == 0.0 {
                 // Nothin to be done if the weight is already zero
-                continue
+                continue;
             }
 
             let mut closest = None;
@@ -289,57 +275,59 @@
             // the _closest_ mergeable spike might have index less than `k` in `indices`, and a
             // merge with it might have not been attempted with this spike if a different closer
             // spike was discovered based on the second coordinate.
-            'scan_2nd: for l in (start_scan_2nd+1)..indices.len() {
+            'scan_2nd: for l in (start_scan_2nd + 1)..indices.len() {
                 if l == k {
                     // Do not attempt to merge a spike with itself
-                    continue
+                    continue;
                 }
                 let j = indices[l];
-                let &DeltaMeasure{ x : Loc([xj1, xj2]), α : αj } = &self[j];
+                let &DeltaMeasure {
+                    x: Loc([xj1, xj2]),
+                    α: αj,
+                } = &self[j];
 
                 if xj1 < xi1 - ρ {
                     // Spike `j = indices[l]` has too low first coordinate. Update starting index
                     // for next iteration, and continue scanning.
                     start_scan_2nd = l;
-                    continue 'scan_2nd
+                    continue 'scan_2nd;
                 } else if xj1 > xi1 + ρ {
                     // Break out: spike `j = indices[l]` has already too high first coordinate, no
                     // more close enough spikes can be found due to the sorting of `indices`.
-                    break 'scan_2nd
+                    break 'scan_2nd;
                 }
 
                 // If also second coordinate is close enough, attempt merging if closer than
                 // previously discovered mergeable spikes.
-                let d2 = (xi2-xj2).abs();
+                let d2 = (xi2 - xj2).abs();
                 if αj != 0.0 && d2 <= ρ {
-                    let r1 = xi1-xj1;
-                    let d = (d2*d2 + r1*r1).sqrt();
+                    let r1 = xi1 - xj1;
+                    let d = (d2 * d2 + r1 * r1).sqrt();
                     match closest {
                         None => closest = Some((l, j, d)),
                         Some((_, _, r)) if r > d => closest = Some((l, j, d)),
-                        _ => {},
+                        _ => {}
                     }
                 }
             }
 
             // Attempt merging closest close-enough spike
             if let Some((l, j, _)) = closest {
-                if self.attempt_merge(&mut res, i, j, &accept) {
+                if let Some(n) = self.attempt_merge(i, j, interp, &mut accept) {
                     // If merge was succesfull, make new spike candidate for merging.
-                    indices[l] = self.spikes.len() - 1;
-                    let compare = |i, j| compare_first_coordinate(&self.spikes[i],
-                                                                  &self.spikes[j]);
+                    indices[l] = n;
+                    count += 1;
+                    let compare = |i, j| compare_first_coordinate(&self.spikes[i], &self.spikes[j]);
                     // Re-sort relevant range of indices
                     if l < k {
                         indices[l..k].sort_by(|&i, &j| compare(i, j));
                     } else {
-                        indices[k+1..=l].sort_by(|&i, &j| compare(i, j));
+                        indices[k + 1..=l].sort_by(|&i, &j| compare(i, j));
                     }
                 }
             }
         }
 
-        res
+        count
     }
 }
-
--- a/src/pdps.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/pdps.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -6,8 +6,7 @@
  * Valkonen T. - _Proximal methods for point source localisation_,
    [arXiv:2212.02991](https://arxiv.org/abs/2212.02991).
 
-The main routine is [`pointsource_pdps`]. It is based on specilisatinn of
-[`generic_pointsource_fb_reg`] through relevant [`FBSpecialisation`] implementations.
+The main routine is [`pointsource_pdps_reg`].
 Both norm-2-squared and norm-1 data terms are supported. That is, implemented are solvers for
 <div>
 $$
@@ -37,10 +36,6 @@
 For $F_0(y)=\frac{1}{2}\|y\|_2^2$ the second part reads $y = Aμ -b$.
 For $F_0(y)=\|y\|_1$ the second part reads $y ∈ ∂\|·\|_1(Aμ - b)$.
 </p>
-
-Based on zero initialisation for $μ$, we use the [`Subdifferentiable`] trait to make an
-initialisation corresponding to the second part of the optimality conditions.
-In the algorithm itself, standard proximal steps are taking with respect to $F\_0^* + ⟨b, ·⟩$.
 */
 
 use numeric_literals::replace_float_literals;
@@ -48,37 +43,23 @@
 use nalgebra::DVector;
 use clap::ValueEnum;
 
-use alg_tools::iterate:: AlgIteratorFactory;
-use alg_tools::sets::Cube;
-use alg_tools::loc::Loc;
+use alg_tools::iterate::AlgIteratorFactory;
 use alg_tools::euclidean::Euclidean;
+use alg_tools::linops::Mapping;
 use alg_tools::norms::{
-    L1, Linfinity,
-    Projection, Norm,
+    Linfinity,
+    Projection,
 };
-use alg_tools::bisection_tree::{
-    BTFN,
-    PreBTFN,
-    Bounds,
-    BTNodeLookup,
-    BTNode,
-    BTSearch,
-    P2Minimise,
-    SupportGenerator,
-    LocalAnalysis,
-};
-use alg_tools::mapping::RealMapping;
+use alg_tools::mapping::{RealMapping, Instance};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::linops::AXPY;
 
 use crate::types::*;
-use crate::measures::DiscreteMeasure;
-use crate::measures::merging::{
-    SpikeMerging,
-};
-use crate::forward_model::ForwardModel;
-use crate::seminorms::{
-    DiscreteMeasureOp, Lipschitz
+use crate::measures::{DiscreteMeasure, RNDM};
+use crate::measures::merging::SpikeMerging;
+use crate::forward_model::{
+    ForwardModel,
+    AdjointProductBoundedBy,
 };
 use crate::plot::{
     SeqPlotter,
@@ -86,12 +67,21 @@
     PlotLookup
 };
 use crate::fb::{
+    postprocess,
+    prune_with_stats
+};
+pub use crate::prox_penalty::{
     FBGenericConfig,
-    FBSpecialisation,
-    generic_pointsource_fb_reg,
-    RegTerm,
+    ProxPenalty
 };
-use crate::regularisation::NonnegRadonRegTerm;
+use crate::regularisation::RegTerm;
+use crate::dataterm::{
+    DataTerm,
+    L2Squared,
+    L1
+};
+use crate::measures::merging::SpikeMergingMethod;
+
 
 /// Acceleration
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, ValueEnum, Debug)]
@@ -107,7 +97,30 @@
     Full
 }
 
-/// Settings for [`pointsource_pdps`].
+#[replace_float_literals(F::cast_from(literal))]
+impl Acceleration {
+    /// PDPS parameter acceleration. Updates τ and σ and returns ω.
+    /// This uses dual strong convexity, not primal.
+    fn accelerate<F : Float>(self, τ : &mut F, σ : &mut F, γ : F) -> F {
+        match self {
+            Acceleration::None => 1.0,
+            Acceleration::Partial => {
+                let ω = 1.0 / (1.0 + γ * (*σ)).sqrt();
+                *σ *= ω;
+                *τ /= ω;
+                ω
+            },
+            Acceleration::Full => {
+                let ω = 1.0 / (1.0 + 2.0 * γ * (*σ)).sqrt();
+                *σ *= ω;
+                *τ /= ω;
+                ω
+            },
+        }
+    }
+}
+
+/// Settings for [`pointsource_pdps_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
 pub struct PDPSConfig<F : Float> {
@@ -118,176 +131,77 @@
     /// Accelerate if available
     pub acceleration : Acceleration,
     /// Generic parameters
-    pub insertion : FBGenericConfig<F>,
+    pub generic : FBGenericConfig<F>,
 }
 
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float> Default for PDPSConfig<F> {
     fn default() -> Self {
-        let τ0 = 0.5;
+        let τ0 = 5.0;
         PDPSConfig {
             τ0,
             σ0 : 0.99/τ0,
             acceleration : Acceleration::Partial,
-            insertion : Default::default()
+            generic : FBGenericConfig {
+                merging : SpikeMergingMethod { enabled : true, ..Default::default() },
+                .. Default::default()
+            },
         }
     }
 }
 
-/// Trait for subdifferentiable objects
-pub trait Subdifferentiable<F : Float, V, U=V> {
-    /// Calculate some subdifferential at `x`
-    fn some_subdifferential(&self, x : V) -> U;
+/// Trait for data terms for the PDPS
+#[replace_float_literals(F::cast_from(literal))]
+pub trait PDPSDataTerm<F : Float, V, const N : usize> : DataTerm<F, V, N> {
+    /// Calculate some subdifferential at `x` for the conjugate
+    fn some_subdifferential(&self, x : V) -> V;
+
+    /// Factor of strong convexity of the conjugate
+    #[inline]
+    fn factor_of_strong_convexity(&self) -> F {
+        0.0
+    }
+
+    /// Perform dual update
+    fn dual_update(&self, _y : &mut V, _y_prev : &V, _σ : F);
 }
 
-/// Type for indicating norm-2-squared data fidelity.
-pub struct L2Squared;
 
-impl<F : Float, V : Euclidean<F>> Subdifferentiable<F, V> for L2Squared {
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, V, const N : usize> PDPSDataTerm<F, V, N>
+for L2Squared
+where
+    F : Float,
+    V :  Euclidean<F> + AXPY<F>,
+    for<'b> &'b V : Instance<V>,
+{
     fn some_subdifferential(&self, x : V) -> V { x }
+
+    fn factor_of_strong_convexity(&self) -> F {
+        1.0
+    }
+
+    #[inline]
+    fn dual_update(&self, y : &mut V, y_prev : &V, σ : F) {
+        y.axpy(1.0 / (1.0 + σ), y_prev, σ / (1.0 + σ));
+    }
 }
 
-impl<F : Float + nalgebra::RealField> Subdifferentiable<F, DVector<F>> for L1 {
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float + nalgebra::RealField, const N : usize>
+PDPSDataTerm<F, DVector<F>, N>
+for L1 {
     fn some_subdifferential(&self, mut x : DVector<F>) -> DVector<F> {
         // nalgebra sucks for providing second copies of the same stuff that's elsewhere as well.
         x.iter_mut()
          .for_each(|v| if *v != F::ZERO { *v = *v/<F as NumTraitsFloat>::abs(*v) });
         x
     }
-}
 
-/// Specialisation of [`generic_pointsource_fb_reg`] to PDPS.
-pub struct PDPS<
-    'a,
-    F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
-    D,
-    const N : usize
-> {
-    /// The data
-    b : &'a A::Observable,
-    /// The forward operator
-    opA : &'a A,
-    /// Primal step length
-    τ : F,
-    // Dual step length
-    σ : F,
-    /// Whether acceleration should be applied (if data term supports)
-    acceleration : Acceleration,
-    /// The dataterm. Only used by the type system.
-    _dataterm : D,
-    /// Previous dual iterate.
-    y_prev : A::Observable,
-}
-
-/// Implementation of [`FBSpecialisation`] for μPDPS with norm-2-squared data fidelity.
-#[replace_float_literals(F::cast_from(literal))]
-impl<
-    'a,
-    F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
-    const N : usize
-> FBSpecialisation<F, A::Observable, N> for PDPS<'a, F, A, L2Squared, N>
-where for<'b> &'b A::Observable : std::ops::Add<A::Observable, Output=A::Observable> {
-
-    fn update(
-        &mut self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-        μ_base : &DiscreteMeasure<Loc<F, N>, F>
-    ) -> (A::Observable, Option<F>) {
-        let σ = self.σ;
-        let τ = self.τ;
-        let ω = match self.acceleration {
-            Acceleration::None => 1.0,
-            Acceleration::Partial => {
-                let ω = 1.0 / (1.0 + σ).sqrt();
-                self.σ = σ * ω;
-                self.τ = τ / ω;
-                ω
-            },
-            Acceleration::Full => {
-                let ω = 1.0 / (1.0 + 2.0 * σ).sqrt();
-                self.σ = σ * ω;
-                self.τ = τ / ω;
-                ω
-            },
-        };
-
-        μ.prune();
-
-        let mut y = self.b.clone();
-        self.opA.gemv(&mut y, 1.0 + ω, μ, -1.0);
-        self.opA.gemv(&mut y, -ω, μ_base, 1.0);
-        y.axpy(1.0 / (1.0 + σ), &self.y_prev,  σ / (1.0 + σ));
-        self.y_prev.copy_from(&y);
-
-        (y, Some(self.τ))
-    }
-
-    fn calculate_fit(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-        _y : &A::Observable
-    ) -> F {
-        self.calculate_fit_simple(μ)
-    }
-
-    fn calculate_fit_simple(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-    ) -> F {
-        let mut residual = self.b.clone();
-        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
-        residual.norm2_squared_div2()
-    }
-}
-
-/// Implementation of [`FBSpecialisation`] for μPDPS with norm-1 data fidelity.
-#[replace_float_literals(F::cast_from(literal))]
-impl<
-    'a,
-    F : Float + ToNalgebraRealField,
-    A : ForwardModel<Loc<F, N>, F>,
-    const N : usize
-> FBSpecialisation<F, A::Observable, N> for PDPS<'a, F, A, L1, N>
-where A::Observable : Projection<F, Linfinity> + Norm<F, L1>,
-      for<'b> &'b A::Observable : std::ops::Add<A::Observable, Output=A::Observable> {
-    fn update(
-        &mut self,
-        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
-        μ_base : &DiscreteMeasure<Loc<F, N>, F>
-    ) -> (A::Observable, Option<F>) {
-        let σ = self.σ;
-
-        μ.prune();
-
-        //let ȳ = self.opA.apply(μ) * 2.0 - self.opA.apply(μ_base);
-        //*y = proj_{[-1,1]}(&self.y_prev + (ȳ - self.b) * σ)
-        let mut y = self.y_prev.clone();
-        self.opA.gemv(&mut y, 2.0 * σ, μ, 1.0);
-        self.opA.gemv(&mut y, -σ, μ_base, 1.0);
-        y.axpy(-σ, self.b, 1.0);
+     #[inline]
+     fn dual_update(&self, y : &mut DVector<F>, y_prev : &DVector<F>, σ : F) {
+        y.axpy(1.0, y_prev, σ);
         y.proj_ball_mut(1.0, Linfinity);
-        self.y_prev.copy_from(&y);
-
-        (y, None)
-    }
-
-    fn calculate_fit(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-        _y : &A::Observable
-    ) -> F {
-        self.calculate_fit_simple(μ)
-    }
-
-    fn calculate_fit_simple(
-        &self,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
-    ) -> F {
-        let mut residual = self.b.clone();
-        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
-        residual.norm(L1)
     }
 }
 
@@ -304,93 +218,106 @@
 ///
 /// Returns the final iterate.
 #[replace_float_literals(F::cast_from(literal))]
-pub fn pointsource_pdps_reg<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, D, Reg, const N : usize>(
-    opA : &'a A,
-    b : &'a A::Observable,
+pub fn pointsource_pdps_reg<F, I, A, D, Reg, P, const N : usize>(
+    opA : &A,
+    b : &A::Observable,
     reg : Reg,
-    op𝒟 : &'a 𝒟,
-    config : &PDPSConfig<F>,
+    prox_penalty : &P,
+    pdpsconfig : &PDPSConfig<F>,
     iterator : I,
-    plotter : SeqPlotter<F, N>,
+    mut plotter : SeqPlotter<F, N>,
     dataterm : D,
-) -> DiscreteMeasure<Loc<F, N>, F>
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<IterInfo<F, N>>,
-      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>
-                                  + std::ops::Add<A::Observable, Output=A::Observable>,
-                                  //+ std::ops::Mul<F, Output=A::Observable>, // <-- FIXME: compiler overflow
-      A::Observable : std::ops::MulAssign<F>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<𝒟, FloatType=F>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      PDPS<'a, F, A, D, N> : FBSpecialisation<F, A::Observable, N>,
-      D : Subdifferentiable<F, A::Observable>,
-      Reg : RegTerm<F, N> {
+) -> RNDM<F, N>
+where
+    F : Float + ToNalgebraRealField,
+    I : AlgIteratorFactory<IterInfo<F, N>>,
+    A : ForwardModel<RNDM<F, N>, F>
+        + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType=F>,
+    A::PreadjointCodomain : RealMapping<F, N>,
+    for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable> + Instance<A::Observable>,
+    PlotLookup : Plotting<N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    D : PDPSDataTerm<F, A::Observable, N>,
+    Reg : RegTerm<F, N>,
+    P : ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
+{
+
+    // Check parameters
+    assert!(pdpsconfig.τ0 > 0.0 &&
+            pdpsconfig.σ0 > 0.0 &&
+            pdpsconfig.τ0 * pdpsconfig.σ0 <= 1.0,
+            "Invalid step length parameters");
+
+    // Set up parameters
+    let config = &pdpsconfig.generic;
+    let l = opA.adjoint_product_bound(prox_penalty).unwrap().sqrt();
+    let mut τ = pdpsconfig.τ0 / l;
+    let mut σ = pdpsconfig.σ0 / l;
+    let γ = dataterm.factor_of_strong_convexity();
+
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut y = dataterm.some_subdifferential(-b);
+    let mut y_prev = y.clone();
+    let full_stats = |μ : &RNDM<F, N>, ε, stats| IterInfo {
+        value : dataterm.calculate_fit_op(μ, opA, b) + reg.apply(μ),
+        n_spikes : μ.len(),
+        ε,
+        // postprocessing: config.postprocessing.then(|| μ.clone()),
+        .. stats
+    };
+    let mut stats = IterInfo::new();
 
-    let y = dataterm.some_subdifferential(-b);
-    let l = opA.lipschitz_factor(&op𝒟).unwrap().sqrt();
-    let τ = config.τ0 / l;
-    let σ = config.σ0 / l;
+    // Run the algorithm
+    for state in iterator.iter_init(|| full_stats(&μ, ε, stats.clone())) {
+        // Calculate smooth part of surrogate model.
+        let mut τv = opA.preadjoint().apply(y * τ);
+
+        // Save current base point
+        let μ_base = μ.clone();
+        
+        // Insert and reweigh
+        let (maybe_d, _within_tolerances) = prox_penalty.insert_and_reweigh(
+            &mut μ, &mut τv, &μ_base, None,
+            τ, ε,
+            config, &reg, &state, &mut stats
+        );
+
+        // Prune and possibly merge spikes
+        if config.merge_now(&state) {
+            stats.merged += prox_penalty.merge_spikes_no_fitness(
+                &mut μ, &mut τv, &μ_base, None, τ, ε, config, &reg,
+            );
+        }
+        stats.pruned += prune_with_stats(&mut μ);
 
-    let pdps = PDPS {
-        b,
-        opA,
-        τ,
-        σ,
-        acceleration : config.acceleration,
-        _dataterm : dataterm,
-        y_prev : y.clone(),
-    };
+        // Update step length parameters
+        let ω = pdpsconfig.acceleration.accelerate(&mut τ, &mut σ, γ);
+
+        // Do dual update
+        y = b.clone();                          // y = b
+        opA.gemv(&mut y, 1.0 + ω, &μ, -1.0);    // y = A[(1+ω)μ^{k+1}]-b
+        opA.gemv(&mut y, -ω, &μ_base, 1.0);     // y = A[(1+ω)μ^{k+1} - ω μ^k]-b
+        dataterm.dual_update(&mut y, &y_prev, σ);
+        y_prev.copy_from(&y);
 
-    generic_pointsource_fb_reg(
-        opA, reg, op𝒟, τ, &config.insertion, iterator, plotter, y, pdps
-    )
+        // Give statistics if requested
+        let iter = state.iteration();
+        stats.this_iters += 1;
+
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv), &μ);
+            full_stats(&μ, ε, std::mem::replace(&mut stats, IterInfo::new()))
+        });
+
+        ε = tolerance.update(ε, iter);
+    }
+
+    postprocess(μ, config, dataterm, opA, b)
 }
 
-//
-// Deprecated interfaces
-//
-
-#[deprecated(note = "Use `pointsource_pdps_reg`")]
-pub fn pointsource_pdps<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, D, const N : usize>(
-    opA : &'a A,
-    b : &'a A::Observable,
-    α : F,
-    op𝒟 : &'a 𝒟,
-    config : &PDPSConfig<F>,
-    iterator : I,
-    plotter : SeqPlotter<F, N>,
-    dataterm : D,
-) -> DiscreteMeasure<Loc<F, N>, F>
-where F : Float + ToNalgebraRealField,
-      I : AlgIteratorFactory<IterInfo<F, N>>,
-      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>
-                                  + std::ops::Add<A::Observable, Output=A::Observable>,
-      A::Observable : std::ops::MulAssign<F>,
-      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
-      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
-          + Lipschitz<𝒟, FloatType=F>,
-      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
-      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
-      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
-      PlotLookup : Plotting<N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      PDPS<'a, F, A, D, N> : FBSpecialisation<F, A::Observable, N>,
-      D : Subdifferentiable<F, A::Observable> {
-
-    pointsource_pdps_reg(opA, b, NonnegRadonRegTerm(α), op𝒟, config, iterator, plotter, dataterm)
-}
--- a/src/plot.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/plot.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -1,29 +1,14 @@
 //! Plotting helper utilities
 
 use numeric_literals::replace_float_literals;
-use std::io::Write;
-use image::{
-    ImageFormat,
-    ImageBuffer,
-    Rgb
-};
-use itertools::izip;
-use colorbrewer::Palette as CbPalette;
-
+use serde::Serialize;
 use alg_tools::types::*;
 use alg_tools::lingrid::LinGrid;
 use alg_tools::mapping::RealMapping;
 use alg_tools::loc::Loc;
-use alg_tools::bisection_tree::Bounds;
-use alg_tools::maputil::map4;
 use alg_tools::tabledump::write_csv;
 use crate::measures::*;
 
-/// Default RGB ramp from [`colorbrewer`].
-///
-/// This is a tuple of parameters to [`colorbrewer::get_color_ramp`].
-const RAMP : (CbPalette, u32) = (CbPalette::RdBu, 11);
-
 /// Helper trait for implementing dimension-dependent plotting routines.
 pub trait Plotting<const N : usize> {
     /// Plot several mappings and a discrete measure into a file.
@@ -32,13 +17,10 @@
         T1 : RealMapping<F, N>,
         T2 : RealMapping<F, N>
     > (
-        g_explanation : String,
-        g : &T1,
-        ω_explanation : String,
+        g : Option<&T1>,
         ω : Option<&T2>,
         grid : LinGrid<F, N>,
-        bnd : Option<Bounds<F>>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
         filename : String,
     );
 
@@ -50,78 +32,57 @@
         g : &T1,
         grid : LinGrid<F, N>,
         filename : String,
-        explanation : String
     );
 }
 
 /// Helper type for looking up a [`Plotting`] based on dimension.
 pub struct PlotLookup;
 
+#[derive(Serialize)]
+struct CSVHelper1<F : Float> {
+    x : F,
+    f : F,
+}
+
+#[derive(Serialize)]
+struct CSVHelper1_2<F : Float>{
+    x : F,
+    g : Option<F>,
+    omega : Option<F>
+}
+
+#[derive(Serialize)]
+struct CSVSpike1<F : Float> {
+    x : F,
+    alpha : F,
+}
+
 impl Plotting<1> for PlotLookup {
     fn plot_into_file_spikes<
         F : Float,
         T1 : RealMapping<F, 1>,
         T2 : RealMapping<F, 1>
     > (
-        g_explanation : String,
-        g : &T1,
-        ω_explanation : String,
+        g0 : Option<&T1>,
         ω0 : Option<&T2>,
         grid : LinGrid<F, 1>,
-        bnd0 : Option<Bounds<F>>,
         μ : &DiscreteMeasure<Loc<F, 1>, F>,
         filename : String,
     ) {
-        let start = grid.start[0].as_();
-        let end = grid.end[0].as_();
-        let m = μ.iter_masses().fold(F::ZERO, |m, α| m.max(α));
-        let s = μ.iter_masses().fold(F::ZERO, |m, α| m.add(α));
-        let mut spike_scale = F::ONE;
-
-        let mut plotter = poloto::plot(
-            "f", "x",
-            format!("f(x); spike max={:.4}, n={}, ∑={:.4}", m, μ.len(), s)
-        ).move_into();
-
-        if let Some(ω) = ω0 {
-            let graph_ω = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
-                [x0.as_(), ω.apply(&x).as_()]
-            });
-            plotter.line(ω_explanation.as_str(), graph_ω.clone());
-            // let csv_f = format!("{}.txt", filename);
-            // write_csv(graph_ω, csv_f).expect("CSV save error");
-        }
-
-        let graph_g = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
-            [x0.as_(), g.apply(&x).as_()]
+        let data = grid.into_iter().map(|p@Loc([x]) : Loc<F, 1>| CSVHelper1_2 {
+            x,
+            g : g0.map(|g| g.apply(&p)),
+            omega : ω0.map(|ω| ω.apply(&p))
         });
-        plotter.line(g_explanation.as_str(), graph_g.clone());
-        // let csv_f = format!("{}.txt", filename);
-        // write_csv(graph_g, csv_f).expect("CSV save error");
-
-        bnd0.map(|bnd| {
-            let upperb = bnd.upper().as_();
-            let lowerb =  bnd.lower().as_();
-            let upper : [[f64; 2]; 2] = [[start, upperb], [end, upperb]];
-            let lower = [[start, lowerb], [end, lowerb]];
-            spike_scale *= bnd.upper();
+        let csv_f = format!("{}_functions.csv", filename);
+        write_csv(data, csv_f).expect("CSV save error");
 
-            plotter.line("upper bound", upper)
-                   .line("lower bound", lower)
-                   .ymarker(lowerb)
-                   .ymarker(upperb);
+        let spikes = μ.iter_spikes().map(|δ| {
+            let Loc([x]) = δ.x;
+            CSVSpike1 { x, alpha : δ.α }
         });
-
-        for &DeltaMeasure{ α, x : Loc([x]) } in μ.iter_spikes() {
-            let spike = [[x.as_(), 0.0], [x.as_(), (α/m * spike_scale).as_()]];
-            plotter.line("", spike);
-        }
-
-        let svg = format!("{}", poloto::disp(|a| poloto::simple_theme(a, plotter)));
-
-        std::fs::File::create(filename + ".svg").and_then(|mut file|
-            file.write_all(svg.as_bytes())
-        ).expect("SVG save error");
+        let csv_f = format!("{}_spikes.csv", filename);
+        write_csv(spikes, csv_f).expect("CSV save error");
     }
 
     fn plot_into_file<
@@ -131,150 +92,37 @@
         g : &T1,
         grid : LinGrid<F, 1>,
         filename : String,
-        explanation : String
     ) {
-        let graph_g = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
-            [x0.as_(), g.apply(&x).as_()]
+        let data = grid.into_iter().map(|p@Loc([x]) : Loc<F, 1>| CSVHelper1 {
+            x,
+            f : g.apply(&p),
         });
-
-        let plotter: poloto::Plotter<'_, float, float> = poloto::plot("f", "x", "f(x)")
-            .line(explanation.as_str(), graph_g.clone())
-            .move_into();
-
-        let svg = format!("{}", poloto::disp(|a| poloto::simple_theme(a, plotter)));
-
-        let svg_f = format!("{}.svg", filename);
-        std::fs::File::create(svg_f).and_then(|mut file|
-            file.write_all(svg.as_bytes())
-        ).expect("SVG save error");
-
         let csv_f = format!("{}.txt", filename);
-        write_csv(graph_g, csv_f).expect("CSV save error");
+        write_csv(data, csv_f).expect("CSV save error");
     }
 
 }
 
-/// Convert $[0, 1] ∈ F$ to $\\\{0, …, M\\\} ∈ F$ where $M=$`F::RANGE_MAX`.
-#[inline]
-fn scale_uint<F, U>(v : F) -> U
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-    (v*F::cast_from(U::RANGE_MAX)).as_()
-}
-
-/// Convert $[a, b] ∈ F$ to $\\\{0, …, M\\\} ∈ F$ where $M=$`F::RANGE_MAX`.
-#[replace_float_literals(F::cast_from(literal))]
-#[inline]
-fn scale_range_uint<F, U>(v : F, &Bounds(a, b) : &Bounds<F>) -> U
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-    debug_assert!(a < b);
-    scale_uint(((v - a)/(b - a)).max(0.0).min(1.0))
-}
-
-
-/// Sample a mapping on a grid.
-///
-/// Returns a vector of values as well as upper and lower bounds of the values.
-fn rawdata_and_range<F, T>(grid : &LinGrid<F, 2>, g :&T) -> (Vec<F>, Bounds<F>)
-where F : Float,
-      T : RealMapping<F, 2> {
-    let rawdata : Vec<F> = grid.into_iter().map(|x| g.apply(&x)).collect();
-    let range = rawdata.iter()
-                        .map(|&v| Bounds(v, v))
-                        .reduce(|b1, b2| b1.common(&b2))
-                        .unwrap();
-    (rawdata, range)
+#[derive(Serialize)]
+struct CSVHelper2<F : Float> {
+    x : F,
+    y : F,
+    f : F,
 }
 
-/*fn to_range<'a, F, U>(rawdata : &'a Vec<F>,  range : &'a Bounds<F>)
--> std::iter::Map<std::slice::Iter<'a, F>, impl FnMut(&'a F) -> U>
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-    rawdata.iter().map(move |&v| scale_range_uint(v, range))
-}*/
-
-/// Convert a scalar value to an RGB triplet.
-///
-/// Converts the value `v` supposed to be within the range `[a, b]` to an rgb value according
-/// to the given `ramp` of equally-spaced rgb interpolation points.
-#[replace_float_literals(F::cast_from(literal))]
-fn one_to_ramp<F, U>(
-    &Bounds(a, b) : &Bounds<F>,
-    ramp : &Vec<Loc<F, 3>>,
-    v : F,
-) -> Rgb<U>
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned {
-
-    let n = ramp.len() - 1;
-    let m = F::cast_from(U::RANGE_MAX);
-    let ramprange = move |v : F| {let m : usize = v.as_(); m.min(n).max(0) };
-
-    let w = F::cast_from(n) * (v - a) / (b - a);  // convert [0, 1] to [0, n]
-    let (l, u) = (w.floor(), w.ceil());           // Find closest integers
-    let (rl, ru) = (ramprange(l), ramprange(u));
-    let (cl, cu) = (ramp[rl], ramp[ru]);          // Get corresponding colours
-    let λ = match rl==ru {                        // Interpolation factor
-        true => 0.0,
-        false => (u - w) / (u - l),
-    };
-    let Loc(rgb) = cl * λ + cu * (1.0 - λ);       // Interpolate
-
-    Rgb(rgb.map(|v| (v * m).round().min(m).max(0.0).as_()))
+#[derive(Serialize)]
+struct CSVHelper2_2<F : Float>{
+    x : F,
+    y : F,
+    g : Option<F>,
+    omega : Option<F>
 }
 
-/// Convert a an iterator over scalar values to an iterator over RGB triplets.
-///
-/// The conversion is that performed by [`one_to_ramp`].
-#[replace_float_literals(F::cast_from(literal))]
-fn to_ramp<'a, F, U, I>(
-    bounds : &'a Bounds<F>,
-    ramp : &'a Vec<Loc<F, 3>>,
-    iter : I,
-) -> std::iter::Map<I, impl FnMut(F) -> Rgb<U> + 'a>
-where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
-      U : Unsigned,
-      I : Iterator<Item = F> + 'a {
-    iter.map(move |v| one_to_ramp(bounds, ramp, v))
-}
-
-/// Convert a [`colorbrewer`] sepcification to a ramp of rgb triplets.
-fn get_ramp<F : Float>((palette, nb) : (CbPalette, u32)) -> Vec<Loc<F, 3>> {
-    let m = F::cast_from(u8::MAX);
-    colorbrewer::get_color_ramp(palette, nb)
-                 .expect("Invalid colorbrewer ramp")
-                 .into_iter()
-                 .map(|rgb::RGB{r, g, b}| {
-                    [r, g, b].map(|c| F::cast_from(c) / m).into()
-                 }).collect()
-}
-
-/// Perform hue shifting of an RGB value.
-///
-// The hue `ω` is in radians.
-#[replace_float_literals(F::cast_from(literal))]
-fn hueshift<F, U>(ω : F, Rgb([r_in, g_in, b_in]) : Rgb<U>) -> Rgb<U>
-where F : Float + CastFrom<U>,
-      U : Unsigned {
-    let m = F::cast_from(U::RANGE_MAX);
-    let r = F::cast_from(r_in) / m;
-    let g = F::cast_from(g_in) / m;
-    let b = F::cast_from(b_in) / m;
-    let u = ω.cos();
-    let w = ω.sin();
-
-    let nr = (0.299 + 0.701*u + 0.168*w) * r
-              + (0.587 - 0.587*u + 0.330*w) * g
-              + (0.114 - 0.114*u - 0.497*w) * b;
-    let ng = (0.299 - 0.299*u - 0.328*w) * r
-              + (0.587 + 0.413*u + 0.035*w) * g
-              + (0.114 - 0.114*u + 0.292*w) *b;
-    let nb = (0.299 - 0.3*u + 1.25*w) * r
-              + (0.587 - 0.588*u - 1.05*w) * g
-              + (0.114 + 0.886*u - 0.203*w) * b;
-
-    Rgb([nr, ng, nb].map(scale_uint))
+#[derive(Serialize)]
+struct CSVSpike2<F : Float> {
+    x : F,
+    y : F,
+    alpha : F,
 }
 
 
@@ -285,55 +133,27 @@
         T1 : RealMapping<F, 2>,
         T2 : RealMapping<F, 2>
     > (
-        _g_explanation : String,
-        g : &T1,
-        _ω_explanation : String,
+        g0 : Option<&T1>,
         ω0 : Option<&T2>,
         grid : LinGrid<F, 2>,
-        _bnd0 : Option<Bounds<F>>,
         μ : &DiscreteMeasure<Loc<F, 2>, F>,
         filename : String,
     ) {
-        let [w, h] = grid.count;
-        let (rawdata_g, range_g) = rawdata_and_range(&grid, g);
-        let (rawdata_ω, range) = match ω0 {
-            Some(ω) => {
-                let (rawdata_ω, range_ω) = rawdata_and_range(&grid, ω);
-                (rawdata_ω, range_g.common(&range_ω))
-            },
-            None => {
-                let mut zeros = Vec::new();
-                zeros.resize(rawdata_g.len(), 0.0);
-                (zeros, range_g)
-            }
-        };
-        let ramp = get_ramp(RAMP);
-        let base_im_iter = to_ramp::<F, u16, _>(&range_g, &ramp, rawdata_g.iter().cloned());
-        let im_iter = izip!(base_im_iter, rawdata_g.iter(), rawdata_ω.iter())
-            .map(|(rgb, &v, &w)| {
-                hueshift(2.0 * F::PI * (v - w).abs() / range.upper(), rgb)
-            });
-        let mut img = ImageBuffer::new(w as u32, h as u32);
-        img.pixels_mut()
-           .zip(im_iter)
-           .for_each(|(p, v)| *p = v);
+        let data = grid.into_iter().map(|p@Loc([x, y]) : Loc<F, 2>| CSVHelper2_2 {
+            x,
+            y,
+            g : g0.map(|g| g.apply(&p)),
+            omega : ω0.map(|ω| ω.apply(&p))
+        });
+        let csv_f = format!("{}_functions.csv", filename);
+        write_csv(data, csv_f).expect("CSV save error");
 
-        // Add spikes
-        let m = μ.iter_masses().fold(F::ZERO, |m, α| m.max(α));
-        let μ_range = Bounds(F::ZERO, m);
-        for &DeltaMeasure{ ref x, α } in μ.iter_spikes() {
-            let [a, b] = map4(x, &grid.start, &grid.end, &grid.count, |&ξ, &a, &b, &n| {
-                ((ξ-a)/(b-a)*F::cast_from(n)).as_()
-            });
-            if a < w.as_() && b < h.as_() {
-                let sc : u16 = scale_range_uint(α, &μ_range);
-                // TODO: use max of points that map to this pixel.
-                img[(a, b)] = Rgb([u16::MAX, u16::MAX, sc/2]);
-            }
-        }
-
-        img.save_with_format(filename + ".png", ImageFormat::Png)
-           .expect("Image save error");
+        let spikes = μ.iter_spikes().map(|δ| {
+            let Loc([x, y]) = δ.x;
+            CSVSpike2 { x, y, alpha : δ.α }
+        });
+        let csv_f = format!("{}_spikes.csv", filename);
+        write_csv(spikes, csv_f).expect("CSV save error");
     }
 
     fn plot_into_file<
@@ -343,22 +163,14 @@
         g : &T1,
         grid : LinGrid<F, 2>,
         filename : String,
-        _explanation : String
     ) {
-        let [w, h] = grid.count;
-        let (rawdata, range) = rawdata_and_range(&grid, g);
-        let ramp = get_ramp(RAMP);
-        let im_iter = to_ramp::<F, u16, _>(&range, &ramp, rawdata.iter().cloned());
-        let mut img = ImageBuffer::new(w as u32, h as u32);
-        img.pixels_mut()
-           .zip(im_iter)
-           .for_each(|(p, v)| *p = v);
-        img.save_with_format(filename.clone() + ".png", ImageFormat::Png)
-           .expect("Image save error");
-        
-        let csv_iter = grid.into_iter().zip(rawdata.iter()).map(|(Loc(x), &v)| (x, v));
-        let csv_f = filename + ".txt";
-        write_csv(csv_iter, csv_f).expect("CSV save error");
+        let data = grid.into_iter().map(|p@Loc([x, y]) : Loc<F, 2>| CSVHelper2 {
+            x,
+            y,
+            f : g.apply(&p),
+        });
+        let csv_f = format!("{}.txt", filename);
+        write_csv(data, csv_f).expect("CSV save error");
     }
 
 }
@@ -386,12 +198,10 @@
     /// This calls [`PlotLookup::plot_into_file_spikes`] with a sequentially numbered file name.
     pub fn plot_spikes<T1, T2>(
         &mut self,
-        g_explanation : String,
-        g : &T1,
-        ω_explanation : String,
+        iter : usize,
+        g : Option<&T1>,
         ω : Option<&T2>,
-        tol : Option<Bounds<F>>,
-        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        μ : &RNDM<F, N>,
     ) where T1 : RealMapping<F, N>,
             T2 : RealMapping<F, N>
     {
@@ -400,12 +210,11 @@
         }
         if self.plot_count < self.max_plots {
             PlotLookup::plot_into_file_spikes(
-                g_explanation, g,
-                ω_explanation, ω,
+                g,
+                ω,
                 self.grid,
-                tol,
                 μ,
-                format!("{}out{:03}", self.prefix, self.plot_count)
+                format!("{}out{:03}", self.prefix, iter)
             );
             self.plot_count += 1;
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/preadjoint_helper.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,55 @@
+/*!
+Preadjoint construction helper
+*/
+
+use std::marker::PhantomData;
+use alg_tools::types::*;
+pub use alg_tools::linops::*;
+use alg_tools::norms::{Norm, HasDualExponent};
+
+/// Helper structure for constructing preadjoints of `S` where `S : Linear<X>`.
+/// [`Linear`] needs to be implemented for each instance, but [`Adjointable`]
+/// and [`BoundedLinear`] have blanket implementations.
+#[derive(Clone,Debug)]
+pub struct PreadjointHelper<'a, S : 'a, X> {
+    pub forward_op : &'a S,
+    _domain : PhantomData<X>
+}
+
+impl<'a, S : 'a, X> PreadjointHelper<'a, S, X> {
+    pub fn new(forward_op : &'a S) -> Self {
+        PreadjointHelper { forward_op, _domain: PhantomData }
+    }
+}
+
+impl<'a, X, Ypre, S> Adjointable<Ypre, X>
+for PreadjointHelper<'a, S, X>
+where
+    X : Space,
+    Ypre : Space,
+    Self : Linear<Ypre>,
+    S : Clone + Linear<X>
+{
+    type AdjointCodomain = S::Codomain;
+    type Adjoint<'b> = S where Self : 'b;
+
+    fn adjoint(&self) -> Self::Adjoint<'_> {
+        self.forward_op.clone()
+    }
+}
+
+impl<'a, F, X, Ypre, ExpXpre, ExpYpre, S> BoundedLinear<Ypre, ExpYpre, ExpXpre, F>
+for PreadjointHelper<'a, S, X>
+where
+    ExpXpre : HasDualExponent,
+    ExpYpre : HasDualExponent,
+    F : Float,
+    X : Space + Norm<F, ExpXpre::DualExp>,
+    Ypre : Space + Norm<F, ExpYpre>,
+    Self : Linear<Ypre>,
+    S : 'a + Clone + BoundedLinear<X, ExpXpre::DualExp, ExpYpre::DualExp, F>
+{
+    fn opnorm_bound(&self, expy : ExpYpre, expx : ExpXpre) -> F {
+        self.forward_op.opnorm_bound(expx.dual_exponent(), expy.dual_exponent())
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/prox_penalty.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,192 @@
+/*!
+Proximal penalty abstraction
+*/
+
+use alg_tools::types::*;
+use numeric_literals::replace_float_literals;
+use serde::{Deserialize, Serialize};
+
+use crate::measures::merging::SpikeMergingMethod;
+use crate::measures::RNDM;
+use crate::regularisation::RegTerm;
+use crate::subproblem::InnerSettings;
+use crate::tolerance::Tolerance;
+use crate::types::{IterInfo, RefinementSettings};
+use alg_tools::iterate::{AlgIterator, AlgIteratorIteration};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+
+pub mod radon_squared;
+pub mod wave;
+pub use radon_squared::RadonSquared;
+
+/// Settings for the solution of the stepwise optimality condition.
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct FBGenericConfig<F: Float> {
+    /// Tolerance for point insertion.
+    pub tolerance: Tolerance<F>,
+
+    /// Stop looking for predual maximum (where to isert a new point) below
+    /// `tolerance` multiplied by this factor.
+    ///
+    /// Not used by [`crate::prox_penalty::radon_squared`].
+    pub insertion_cutoff_factor: F,
+
+    /// Settings for branch and bound refinement when looking for predual maxima
+    pub refinement: RefinementSettings<F>,
+
+    /// Maximum insertions within each outer iteration
+    ///
+    /// Not used by [`crate::prox_penalty::radon_squared`].
+    pub max_insertions: usize,
+
+    /// Pair `(n, m)` for maximum insertions `m` on first `n` iterations.
+    ///
+    /// Not used by [`crate::prox_penalty::radon_squared`].
+    pub bootstrap_insertions: Option<(usize, usize)>,
+
+    /// Inner method settings
+    pub inner: InnerSettings<F>,
+
+    /// Spike merging method
+    pub merging: SpikeMergingMethod<F>,
+
+    /// Tolerance multiplier for merges
+    pub merge_tolerance_mult: F,
+
+    /// Merge spikes after last step (even if merging not generally enabled)
+    pub final_merging: bool,
+
+    /// Use fitness as merging criterion. Implies worse convergence guarantees.
+    pub fitness_merging: bool,
+
+    /// Iterations between merging heuristic tries
+    pub merge_every: usize,
+    // /// Save $μ$ for postprocessing optimisation
+    // pub postprocessing : bool
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float> Default for FBGenericConfig<F> {
+    fn default() -> Self {
+        FBGenericConfig {
+            tolerance: Default::default(),
+            insertion_cutoff_factor: 1.0,
+            refinement: Default::default(),
+            max_insertions: 100,
+            //bootstrap_insertions : None,
+            bootstrap_insertions: Some((10, 1)),
+            inner: Default::default(),
+            merging: Default::default(),
+            final_merging: true,
+            fitness_merging: false,
+            merge_every: 10,
+            merge_tolerance_mult: 2.0,
+            // postprocessing : false,
+        }
+    }
+}
+
+impl<F: Float> FBGenericConfig<F> {
+    /// Check if merging should be attempted this iteration
+    pub fn merge_now<I: AlgIterator>(&self, state: &AlgIteratorIteration<I>) -> bool {
+        self.merging.enabled && state.iteration() % self.merge_every == 0
+    }
+
+    /// Returns the final merging method
+    pub fn final_merging_method(&self) -> SpikeMergingMethod<F> {
+        SpikeMergingMethod {
+            enabled: self.final_merging,
+            ..self.merging
+        }
+    }
+}
+
+/// Trait for proximal penalties
+pub trait ProxPenalty<F, PreadjointCodomain, Reg, const N: usize>
+where
+    F: Float + ToNalgebraRealField,
+    Reg: RegTerm<F, N>,
+{
+    type ReturnMapping: RealMapping<F, N>;
+
+    /// Insert new spikes into `μ` to approximately satisfy optimality conditions
+    /// with the forward step term fixed to `τv`.
+    ///
+    /// May return `τv + w` for `w` a subdifferential of the regularisation term `reg`,
+    /// as well as an indication of whether the tolerance bounds `ε` are satisfied.
+    ///
+    /// `μ_base + ν_delta` is the base point, where `μ` and `μ_base` are assumed to have the same
+    /// spike locations, while `ν_delta` may have different locations.
+    ///
+    /// `τv` is mutable to allow [`alg_tools::bisection_tree::BTFN`] refinement.
+    /// Actual values of `τv` are not supposed to be mutated.
+    fn insert_and_reweigh<I>(
+        &self,
+        μ: &mut RNDM<F, N>,
+        τv: &mut PreadjointCodomain,
+        μ_base: &RNDM<F, N>,
+        ν_delta: Option<&RNDM<F, N>>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        reg: &Reg,
+        state: &AlgIteratorIteration<I>,
+        stats: &mut IterInfo<F, N>,
+    ) -> (Option<Self::ReturnMapping>, bool)
+    where
+        I: AlgIterator;
+
+    /// Merge spikes, if possible.
+    ///
+    /// Either optimality condition merging or objective value (fitness) merging
+    /// may be used, the latter only if `fitness` is provided and `config.fitness_merging`
+    /// is set.
+    fn merge_spikes(
+        &self,
+        μ: &mut RNDM<F, N>,
+        τv: &mut PreadjointCodomain,
+        μ_base: &RNDM<F, N>,
+        ν_delta: Option<&RNDM<F, N>>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        reg: &Reg,
+        fitness: Option<impl Fn(&RNDM<F, N>) -> F>,
+    ) -> usize;
+
+    /// Merge spikes, if possible.
+    ///
+    /// Unlike [`Self::merge_spikes`], this variant only supports optimality condition based merging
+    #[inline]
+    fn merge_spikes_no_fitness(
+        &self,
+        μ: &mut RNDM<F, N>,
+        τv: &mut PreadjointCodomain,
+        μ_base: &RNDM<F, N>,
+        ν_delta: Option<&RNDM<F, N>>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        reg: &Reg,
+    ) -> usize {
+        /// This is a hack to create a `None` of same type as a `Some`
+        // for the `impl Fn` parameter of `merge_spikes`.
+        #[inline]
+        fn into_none<T>(_: Option<T>) -> Option<T> {
+            None
+        }
+        self.merge_spikes(
+            μ,
+            τv,
+            μ_base,
+            ν_delta,
+            τ,
+            ε,
+            config,
+            reg,
+            into_none(Some(|_: &RNDM<F, N>| F::ZERO)),
+        )
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/prox_penalty/radon_squared.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,182 @@
+/*!
+Solver for the point source localisation problem using a simplified forward-backward splitting method.
+
+Instead of the $𝒟$-norm of `fb.rs`, this uses a standard Radon norm for the proximal map.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+use nalgebra::DVector;
+
+use alg_tools::iterate::{
+    AlgIteratorIteration,
+    AlgIterator
+};
+use alg_tools::norms::{L2, Norm};
+use alg_tools::linops::Mapping;
+use alg_tools::bisection_tree::{
+    BTFN,
+    Bounds,
+    BTSearch,
+    SupportGenerator,
+    LocalAnalysis,
+};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+
+use crate::types::*;
+use crate::measures::{
+    RNDM,
+    DeltaMeasure,
+    Radon,
+};
+use crate::measures::merging::SpikeMerging;
+use crate::regularisation::RegTerm;
+use crate::forward_model::{
+    ForwardModel,
+    AdjointProductBoundedBy
+};
+use super::{
+    FBGenericConfig,
+    ProxPenalty,
+};
+
+/// Radon-norm squared proximal penalty
+
+#[derive(Copy,Clone,Serialize,Deserialize)]
+pub struct RadonSquared;
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, GA, BTA, S, Reg, const N : usize>
+ProxPenalty<F, BTFN<F, GA, BTA, N>, Reg, N> for RadonSquared
+where
+    F : Float + ToNalgebraRealField,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    Reg : RegTerm<F, N>,
+    RNDM<F, N> : SpikeMerging<F>,
+{
+    type ReturnMapping = BTFN<F, GA, BTA, N>;
+
+    fn insert_and_reweigh<I>(
+        &self,
+        μ : &mut RNDM<F, N>,
+        τv : &mut BTFN<F, GA, BTA, N>,
+        μ_base : &RNDM<F, N>,
+        ν_delta: Option<&RNDM<F, N>>,
+        τ : F,
+        ε : F,
+        config : &FBGenericConfig<F>,
+        reg : &Reg,
+        _state : &AlgIteratorIteration<I>,
+        stats : &mut IterInfo<F, N>,
+    ) -> (Option<Self::ReturnMapping>, bool)
+    where
+        I : AlgIterator
+    {
+        let mut y = μ_base.masses_dvector();
+
+        assert!(μ_base.len() <= μ.len());
+        
+        'i_and_w: for i in 0..=1 {
+            // Optimise weights
+            if μ.len() > 0 {
+                // Form finite-dimensional subproblem. The subproblem references to the original μ^k
+                // from the beginning of the iteration are all contained in the immutable c and g.
+                // TODO: observe negation of -τv after switch from minus_τv: finite-dimensional
+                // problems have not yet been updated to sign change.
+                let g̃ = DVector::from_iterator(μ.len(),
+                                               μ.iter_locations()
+                                                .map(|ζ| - F::to_nalgebra_mixed(τv.apply(ζ))));
+                let mut x = μ.masses_dvector();
+                y.extend(std::iter::repeat(0.0.to_nalgebra_mixed()).take(0.max(x.len()-y.len())));
+                assert_eq!(y.len(), x.len());
+                // Solve finite-dimensional subproblem.
+                // TODO: This assumes that ν_delta has no common locations with μ-μ_base, to
+                // ignore it.
+                stats.inner_iters += reg.solve_findim_l1squared(&y, &g̃, τ, &mut x, ε, config);
+
+                // Update masses of μ based on solution of finite-dimensional subproblem.
+                μ.set_masses_dvector(&x);
+            }
+
+            if i>0 {
+                // Simple debugging test to see if more inserts would be needed. Doesn't seem so.
+                //let n = μ.dist_matching(μ_base);
+                //println!("{:?}", reg.find_tolerance_violation_slack(τv, τ, ε, false, config, n));
+                break 'i_and_w
+            }
+            
+            // Calculate ‖μ - μ_base‖_ℳ
+            // TODO: This assumes that ν_delta has no common locations with μ-μ_base.
+            let n = μ.dist_matching(μ_base) + ν_delta.map_or(0.0, |ν| ν.norm(Radon));
+        
+            // Find a spike to insert, if needed.
+            // This only check the overall tolerances, not tolerances on support of μ-μ_base or μ,
+            // which are supposed to have been guaranteed by the finite-dimensional weight optimisation.
+            match reg.find_tolerance_violation_slack(τv, τ, ε, false, config, n) {
+                None => { break 'i_and_w },
+                Some((ξ, _v_ξ, _in_bounds)) => {
+                    // Weight is found out by running the finite-dimensional optimisation algorithm
+                    // above
+                    *μ += DeltaMeasure { x : ξ, α : 0.0 };
+                    stats.inserted += 1;
+                }
+            };
+        }
+
+        (None, true)
+    }
+
+    fn merge_spikes(
+        &self,
+        μ : &mut RNDM<F, N>,
+        τv : &mut BTFN<F, GA, BTA, N>,
+        μ_base : &RNDM<F, N>,
+        ν_delta: Option<&RNDM<F, N>>,
+        τ : F,
+        ε : F,
+        config : &FBGenericConfig<F>,
+        reg : &Reg,
+        fitness : Option<impl Fn(&RNDM<F, N>) -> F>,
+    ) -> usize
+    {
+        if config.fitness_merging {
+            if let Some(f) = fitness {
+                return μ.merge_spikes_fitness(config.merging, f, |&v| v)
+                        .1
+            }
+        }
+        μ.merge_spikes(config.merging, |μ_candidate| {
+            // Important: μ_candidate's new points are afterwards,
+            // and do not conflict with μ_base.
+            // TODO: could simplify to requiring μ_base instead of μ_radon.
+            // but may complicate with sliding base's exgtra points that need to be
+            // after μ_candidate's extra points.
+            // TODO: doesn't seem to work, maybe need to merge μ_base as well?
+            // Although that doesn't seem to make sense.
+            let μ_radon = match ν_delta {
+                None => μ_candidate.sub_matching(μ_base),
+                Some(ν) => μ_candidate.sub_matching(μ_base) - ν,
+            };
+            reg.verify_merge_candidate_radonsq(τv, μ_candidate, τ, ε, &config, &μ_radon)
+            //let n = μ_candidate.dist_matching(μ_base);
+            //reg.find_tolerance_violation_slack(τv, τ, ε, false, config, n).is_none()
+        })
+    }
+}
+
+
+impl<F, A, const N : usize> AdjointProductBoundedBy<RNDM<F, N>, RadonSquared>
+for A
+where
+    F : Float,
+    A : ForwardModel<RNDM<F, N>, F>
+{
+    type FloatType = F;
+
+    fn adjoint_product_bound(&self, _ : &RadonSquared) -> Option<Self::FloatType> {
+        self.opnorm_bound(Radon, L2).powi(2).into()
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/prox_penalty/wave.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,191 @@
+/*!
+Basic proximal penalty based on convolution operators $𝒟$.
+ */
+
+use numeric_literals::replace_float_literals;
+use nalgebra::DVector;
+use colored::Colorize;
+
+use alg_tools::types::*;
+use alg_tools::loc::Loc;
+use alg_tools::mapping::{Mapping, RealMapping};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::Linfinity;
+use alg_tools::iterate::{
+    AlgIteratorIteration,
+    AlgIterator,
+};
+use alg_tools::bisection_tree::{
+    BTFN,
+    PreBTFN,
+    Bounds,
+    BTSearch,
+    SupportGenerator,
+    LocalAnalysis,
+    BothGenerators,
+};
+use crate::measures::{
+    RNDM,
+    DeltaMeasure,
+    Radon,
+};
+use crate::measures::merging::{
+    SpikeMerging,
+};
+use crate::seminorms::DiscreteMeasureOp;
+use crate::types::{
+    IterInfo,
+};
+use crate::regularisation::RegTerm;
+use super::{ProxPenalty, FBGenericConfig};
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, GA, BTA, S, Reg, 𝒟, G𝒟, K, const N : usize>
+ProxPenalty<F, BTFN<F, GA, BTA, N>, Reg, N> for 𝒟
+where
+    F : Float + ToNalgebraRealField,
+    GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+    BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+    S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
+    𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
+    𝒟::Codomain : RealMapping<F, N>,
+    K : RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+    Reg : RegTerm<F, N>,
+    RNDM<F, N> : SpikeMerging<F>,
+{
+    type ReturnMapping = BTFN<F, BothGenerators<GA, G𝒟>, BTA, N>;
+
+    fn insert_and_reweigh<I>(
+        &self,
+        μ : &mut RNDM<F, N>,
+        τv : &mut BTFN<F, GA, BTA, N>,
+        μ_base : &RNDM<F, N>,
+        ν_delta: Option<&RNDM<F, N>>,
+        τ : F,
+        ε : F,
+        config : &FBGenericConfig<F>,
+        reg : &Reg,
+        state : &AlgIteratorIteration<I>,
+        stats : &mut IterInfo<F, N>,
+    ) -> (Option<BTFN<F, BothGenerators<GA, G𝒟>, BTA, N>>, bool)
+    where
+        I : AlgIterator
+    {
+
+        let op𝒟norm = self.opnorm_bound(Radon, Linfinity);
+
+        // Maximum insertion count and measure difference calculation depend on insertion style.
+        let (max_insertions, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) {
+            (i, Some((l, k))) if i <= l => (k, false),
+            _ => (config.max_insertions, !state.is_quiet()),
+        };
+
+        let ω0 = match ν_delta {
+            None => self.apply(μ_base),
+            Some(ν) => self.apply(μ_base + ν),
+        };
+
+        // Add points to support until within error tolerance or maximum insertion count reached.
+        let mut count = 0;
+        let (within_tolerances, d) = 'insertion: loop {
+            if μ.len() > 0 {
+                // Form finite-dimensional subproblem. The subproblem references to the original μ^k
+                // from the beginning of the iteration are all contained in the immutable c and g.
+                // TODO: observe negation of -τv after switch from minus_τv: finite-dimensional
+                // problems have not yet been updated to sign change.
+                let à = self.findim_matrix(μ.iter_locations());
+                let g̃ = DVector::from_iterator(μ.len(),
+                                               μ.iter_locations()
+                                                .map(|ζ| ω0.apply(ζ) - τv.apply(ζ))
+                                                .map(F::to_nalgebra_mixed));
+                let mut x = μ.masses_dvector();
+
+                // The gradient of the forward component of the inner objective is C^*𝒟Cx - g̃.
+                // We have |C^*𝒟Cx|_2 = sup_{|z|_2 ≤ 1} ⟨z, C^*𝒟Cx⟩ = sup_{|z|_2 ≤ 1} ⟨Cz|𝒟Cx⟩
+                // ≤ sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟Cx|_∞ ≤  sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟| |Cx|_ℳ
+                // ≤ sup_{|z|_2 ≤ 1} |z|_1 |𝒟| |x|_1 ≤ sup_{|z|_2 ≤ 1} n |z|_2 |𝒟| |x|_2
+                // = n |𝒟| |x|_2, where n is the number of points. Therefore
+                let Ã_normest = op𝒟norm * F::cast_from(μ.len());
+
+                // Solve finite-dimensional subproblem.
+                stats.inner_iters += reg.solve_findim(&Ã, &g̃, τ, &mut x, Ã_normest, ε, config);
+
+                // Update masses of μ based on solution of finite-dimensional subproblem.
+                μ.set_masses_dvector(&x);
+            }
+
+            // Form d = τv + 𝒟μ - ω0 = τv + 𝒟(μ - μ^k) for checking the proximate optimality
+            // conditions in the predual space, and finding new points for insertion, if necessary.
+            let mut d = &*τv + match ν_delta {
+                None => self.preapply(μ.sub_matching(μ_base)),
+                Some(ν) => self.preapply(μ.sub_matching(μ_base) - ν)
+            };
+
+            // If no merging heuristic is used, let's be more conservative about spike insertion,
+            // and skip it after first round. If merging is done, being more greedy about spike
+            // insertion also seems to improve performance.
+            let skip_by_rough_check = if config.merging.enabled {
+                false
+            } else {
+                count > 0
+            };
+
+            // Find a spike to insert, if needed
+            let (ξ, _v_ξ, in_bounds) =  match reg.find_tolerance_violation(
+                &mut d, τ, ε, skip_by_rough_check, config
+            ) {
+                None => break 'insertion (true, d),
+                Some(res) => res,
+            };
+
+            // Break if maximum insertion count reached
+            if count >= max_insertions {
+                break 'insertion (in_bounds, d)
+            }
+
+            // No point in optimising the weight here; the finite-dimensional algorithm is fast.
+            *μ += DeltaMeasure { x : ξ, α : 0.0 };
+            count += 1;
+            stats.inserted += 1;
+        };
+
+        if !within_tolerances && warn_insertions {
+            // Complain (but continue) if we failed to get within tolerances
+            // by inserting more points.
+            let err = format!("Maximum insertions reached without achieving \
+                                subproblem solution tolerance");
+            println!("{}", err.red());
+        }
+
+        (Some(d), within_tolerances)
+    }
+
+    fn merge_spikes(
+        &self,
+        μ : &mut RNDM<F, N>,
+        τv : &mut BTFN<F, GA, BTA, N>,
+        μ_base : &RNDM<F, N>,
+        ν_delta: Option<&RNDM<F, N>>,
+        τ : F,
+        ε : F,
+        config : &FBGenericConfig<F>,
+        reg : &Reg,
+        fitness : Option<impl Fn(&RNDM<F, N>) -> F>,
+    ) -> usize
+    {
+        if config.fitness_merging {
+            if let Some(f) = fitness {
+                return μ.merge_spikes_fitness(config.merging, f, |&v| v)
+                        .1
+            }
+        }
+        μ.merge_spikes(config.merging, |μ_candidate| {
+            let mut d = &*τv + self.preapply(match ν_delta {
+                None => μ_candidate.sub_matching(μ_base),
+                Some(ν) => μ_candidate.sub_matching(μ_base) - ν,
+            });
+            reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, config)
+        })
+    }
+}
--- a/src/regularisation.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/regularisation.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -2,25 +2,41 @@
 Regularisation terms
 */
 
-use serde::{Serialize, Deserialize};
-use alg_tools::norms::Norm;
-use alg_tools::linops::Apply;
-use alg_tools::loc::Loc;
+#[allow(unused_imports)] // Used by documentation.
+use crate::fb::pointsource_fb_reg;
+use crate::fb::FBGenericConfig;
+use crate::measures::{DeltaMeasure, Radon, RNDM};
+#[allow(unused_imports)] // Used by documentation.
+use crate::sliding_fb::pointsource_sliding_fb_reg;
 use crate::types::*;
-use crate::measures::{
-    DiscreteMeasure,
-    Radon
+use alg_tools::instance::Instance;
+use alg_tools::linops::Mapping;
+use alg_tools::loc::Loc;
+use alg_tools::norms::Norm;
+use numeric_literals::replace_float_literals;
+use serde::{Deserialize, Serialize};
+
+use crate::subproblem::{
+    l1squared_nonneg::l1squared_nonneg, l1squared_unconstrained::l1squared_unconstrained,
+    nonneg::quadratic_nonneg, unconstrained::quadratic_unconstrained,
 };
-#[allow(unused_imports)] // Used by documentation.
-use crate::fb::generic_pointsource_fb_reg;
+use alg_tools::bisection_tree::{
+    BTSearch, Bounded, Bounds, LocalAnalysis, P2Minimise, SupportGenerator, BTFN,
+};
+use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use nalgebra::{DMatrix, DVector};
 
-/// The regularisation term $α\\|μ\\|\_{ℳ(Ω)} + δ_{≥ 0}(μ)$ for [`generic_pointsource_fb_reg`].
+use std::cmp::Ordering::{Equal, Greater, Less};
+
+/// The regularisation term $α\\|μ\\|\_{ℳ(Ω)} + δ_{≥ 0}(μ)$ for [`pointsource_fb_reg`] and other
+/// algorithms.
 ///
 /// The only member of the struct is the regularisation parameter α.
 #[derive(Copy, Clone, Debug, Serialize, Deserialize)]
-pub struct NonnegRadonRegTerm<F : Float>(pub F /* α */);
+pub struct NonnegRadonRegTerm<F: Float>(pub F /* α */);
 
-impl<'a, F : Float> NonnegRadonRegTerm<F> {
+impl<'a, F: Float> NonnegRadonRegTerm<F> {
     /// Returns the regularisation parameter
     pub fn α(&self) -> F {
         let &NonnegRadonRegTerm(α) = self;
@@ -28,24 +44,24 @@
     }
 }
 
-impl<'a, F : Float, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
-for NonnegRadonRegTerm<F> {
-    type Output = F;
-    
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> F {
-        self.α() * μ.norm(Radon)
+impl<'a, F: Float, const N: usize> Mapping<RNDM<F, N>> for NonnegRadonRegTerm<F> {
+    type Codomain = F;
+
+    fn apply<I>(&self, μ: I) -> F
+    where
+        I: Instance<RNDM<F, N>>,
+    {
+        self.α() * μ.eval(|x| x.norm(Radon))
     }
 }
 
-
-/// The regularisation term $α\|μ\|_{ℳ(Ω)}$ for [`generic_pointsource_fb_reg`].
+/// The regularisation term $α\|μ\|_{ℳ(Ω)}$ for [`pointsource_fb_reg`].
 ///
 /// The only member of the struct is the regularisation parameter α.
 #[derive(Copy, Clone, Debug, Serialize, Deserialize)]
-pub struct RadonRegTerm<F : Float>(pub F /* α */);
+pub struct RadonRegTerm<F: Float>(pub F /* α */);
 
-
-impl<'a, F : Float> RadonRegTerm<F> {
+impl<'a, F: Float> RadonRegTerm<F> {
     /// Returns the regularisation parameter
     pub fn α(&self) -> F {
         let &RadonRegTerm(α) = self;
@@ -53,32 +69,615 @@
     }
 }
 
-impl<'a, F : Float, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
-for RadonRegTerm<F> {
-    type Output = F;
-    
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> F {
-        self.α() * μ.norm(Radon)
+impl<'a, F: Float, const N: usize> Mapping<RNDM<F, N>> for RadonRegTerm<F> {
+    type Codomain = F;
+
+    fn apply<I>(&self, μ: I) -> F
+    where
+        I: Instance<RNDM<F, N>>,
+    {
+        self.α() * μ.eval(|x| x.norm(Radon))
     }
 }
 
 /// Regularisation term configuration
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
-pub enum Regularisation<F : Float> {
+pub enum Regularisation<F: Float> {
     /// $α \\|μ\\|\_{ℳ(Ω)}$
     Radon(F),
     /// $α\\|μ\\|\_{ℳ(Ω)} + δ_{≥ 0}(μ)$
     NonnegRadon(F),
 }
 
-impl<'a, F : Float, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
-for Regularisation<F> {
-    type Output = F;
-    
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> F {
+impl<'a, F: Float, const N: usize> Mapping<RNDM<F, N>> for Regularisation<F> {
+    type Codomain = F;
+
+    fn apply<I>(&self, μ: I) -> F
+    where
+        I: Instance<RNDM<F, N>>,
+    {
         match *self {
             Self::Radon(α) => RadonRegTerm(α).apply(μ),
             Self::NonnegRadon(α) => NonnegRadonRegTerm(α).apply(μ),
         }
     }
 }
+
+/// Abstraction of regularisation terms.
+pub trait RegTerm<F: Float + ToNalgebraRealField, const N: usize>:
+    Mapping<RNDM<F, N>, Codomain = F>
+{
+    /// Approximately solve the problem
+    /// <div>$$
+    ///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + τ G(x)
+    /// $$</div>
+    /// for $G$ depending on the trait implementation.
+    ///
+    /// The parameter `mA` is $A$. An estimate for its opeator norm should be provided in
+    /// `mA_normest`. The initial iterate and output is `x`. The current main tolerance is `ε`.
+    ///
+    /// Returns the number of iterations taken.
+    fn solve_findim(
+        &self,
+        mA: &DMatrix<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        mA_normest: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> usize;
+
+    /// Approximately solve the problem
+    /// <div>$$
+    ///     \min_{x ∈ ℝ^n} \frac{1}{2} |x-y|_1^2 - g^⊤ x + τ G(x)
+    /// $$</div>
+    /// for $G$ depending on the trait implementation.
+    ///
+    /// Returns the number of iterations taken.
+    fn solve_findim_l1squared(
+        &self,
+        y: &DVector<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> usize;
+
+    /// Find a point where `d` may violate the tolerance `ε`.
+    ///
+    /// If `skip_by_rough_check` is set, do not find the point if a rough check indicates that we
+    /// are in bounds. `ε` is the current main tolerance and `τ` a scaling factor for the
+    /// regulariser.
+    ///
+    /// Returns `None` if `d` is in bounds either based on the rough check, or a more precise check
+    /// terminating early. Otherwise returns a possibly violating point, the value of `d` there,
+    /// and a boolean indicating whether the found point is in bounds.
+    fn find_tolerance_violation<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
+    ) -> Option<(Loc<F, N>, F, bool)>
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        self.find_tolerance_violation_slack(d, τ, ε, skip_by_rough_check, config, F::ZERO)
+    }
+
+    /// Find a point where `d` may violate the tolerance `ε`.
+    ///
+    /// This version includes a `slack` parameter to expand the tolerances.
+    /// It is used for Radon-norm squared proximal term in [`crate::prox_penalty::radon_squared`].
+    ///
+    /// If `skip_by_rough_check` is set, do not find the point if a rough check indicates that we
+    /// are in bounds. `ε` is the current main tolerance and `τ` a scaling factor for the
+    /// regulariser.
+    ///
+    /// Returns `None` if `d` is in bounds either based on the rough check, or a more precise check
+    /// terminating early. Otherwise returns a possibly violating point, the value of `d` there,
+    /// and a boolean indicating whether the found point is in bounds.
+    fn find_tolerance_violation_slack<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
+        slack: F,
+    ) -> Option<(Loc<F, N>, F, bool)>
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
+
+    /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
+    ///
+    /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
+    fn verify_merge_candidate<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> bool
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
+
+    /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
+    ///
+    /// This version is s used for Radon-norm squared proximal term in
+    /// [`crate::prox_penalty::radon_squared`].
+    /// The [measures][crate::measures::DiscreteMeasure] `μ` and `radon_μ` are supposed to have
+    /// same coordinates at same agreeing indices.
+    ///
+    /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
+    fn verify_merge_candidate_radonsq<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        radon_μ: &RNDM<F, N>,
+    ) -> bool
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
+
+    /// TODO: document this
+    fn target_bounds(&self, τ: F, ε: F) -> Option<Bounds<F>>;
+
+    /// Returns a scaling factor for the tolerance sequence.
+    ///
+    /// Typically this is the regularisation parameter.
+    fn tolerance_scaling(&self) -> F;
+}
+
+/// Abstraction of regularisation terms for [`pointsource_sliding_fb_reg`].
+pub trait SlidingRegTerm<F: Float + ToNalgebraRealField, const N: usize>: RegTerm<F, N> {
+    /// Calculate $τ[w(z) - w(y)]$ for some w in the subdifferential of the regularisation
+    /// term, such that $-ε ≤ τw - d ≤ ε$.
+    fn goodness<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        y: &Loc<F, N>,
+        z: &Loc<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> F
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>;
+
+    /// Convert bound on the regulariser to a bond on the Radon norm
+    fn radon_norm_bound(&self, b: F) -> F;
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float + ToNalgebraRealField, const N: usize> RegTerm<F, N> for NonnegRadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
+    fn solve_findim(
+        &self,
+        mA: &DMatrix<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        mA_normest: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> usize {
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
+        quadratic_nonneg(mA, g, τ * self.α(), x, mA_normest, &config.inner, inner_it)
+    }
+
+    fn solve_findim_l1squared(
+        &self,
+        y: &DVector<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> usize {
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
+        l1squared_nonneg(y, g, τ * self.α(), 1.0, x, &config.inner, inner_it)
+    }
+
+    #[inline]
+    fn find_tolerance_violation_slack<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
+        slack: F,
+    ) -> Option<(Loc<F, N>, F, bool)>
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let τα = τ * self.α();
+        let keep_above = -τα - slack - ε;
+        let minimise_below = -τα - slack - ε * config.insertion_cutoff_factor;
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+
+        // If preliminary check indicates that we are in bounds, and if it otherwise matches
+        // the insertion strategy, skip insertion.
+        if skip_by_rough_check && d.bounds().lower() >= keep_above {
+            None
+        } else {
+            // If the rough check didn't indicate no insertion needed, find minimising point.
+            d.minimise_below(
+                minimise_below,
+                refinement_tolerance,
+                config.refinement.max_steps,
+            )
+            .map(|(ξ, v_ξ)| (ξ, v_ξ, v_ξ >= keep_above))
+        }
+    }
+
+    fn verify_merge_candidate<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> bool
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let τα = τ * self.α();
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let merge_tolerance = config.merge_tolerance_mult * ε;
+        let keep_above = -τα - merge_tolerance;
+        let keep_supp_below = -τα + merge_tolerance;
+        let bnd = d.bounds();
+
+        return (bnd.upper() <= keep_supp_below
+            || μ
+                .iter_spikes()
+                .all(|&DeltaMeasure { α, ref x }| (α == 0.0) || d.apply(x) <= keep_supp_below))
+            && (bnd.lower() >= keep_above
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                ));
+    }
+
+    fn verify_merge_candidate_radonsq<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        radon_μ: &RNDM<F, N>,
+    ) -> bool
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let τα = τ * self.α();
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let merge_tolerance = config.merge_tolerance_mult * ε;
+        let slack = radon_μ.norm(Radon);
+        let bnd = d.bounds();
+
+        return {
+            μ.both_matching(radon_μ).all(|(α, rα, x)| {
+                let v = -d.apply(x); // TODO: observe ad hoc negation here, after minus_τv
+                                     // switch to τv.
+                let (l1, u1) = match α.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (τα, τα),
+                    _ => (F::NEG_INFINITY, τα),
+                    // Less should not happen; treated as Equal
+                };
+                let (l2, u2) = match rα.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (slack, slack),
+                    Equal => (-slack, slack),
+                    Less => (-slack, -slack),
+                };
+                // TODO: both fail.
+                (l1 + l2 - merge_tolerance <= v) && (v <= u1 + u2 + merge_tolerance)
+            })
+        } && {
+            let keep_above = -τα - slack - merge_tolerance;
+            bnd.lower() <= keep_above
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                )
+        };
+    }
+
+    fn target_bounds(&self, τ: F, ε: F) -> Option<Bounds<F>> {
+        let τα = τ * self.α();
+        Some(Bounds(τα - ε, τα + ε))
+    }
+
+    fn tolerance_scaling(&self) -> F {
+        self.α()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float + ToNalgebraRealField, const N: usize> SlidingRegTerm<F, N> for NonnegRadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
+    fn goodness<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        _μ: &RNDM<F, N>,
+        y: &Loc<F, N>,
+        z: &Loc<F, N>,
+        τ: F,
+        ε: F,
+        _config: &FBGenericConfig<F>,
+    ) -> F
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let w = |x| 1.0.min((ε + d.apply(x)) / (τ * self.α()));
+        w(z) - w(y)
+    }
+
+    fn radon_norm_bound(&self, b: F) -> F {
+        b / self.α()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float + ToNalgebraRealField, const N: usize> RegTerm<F, N> for RadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
+    fn solve_findim(
+        &self,
+        mA: &DMatrix<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        mA_normest: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> usize {
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
+        quadratic_unconstrained(mA, g, τ * self.α(), x, mA_normest, &config.inner, inner_it)
+    }
+
+    fn solve_findim_l1squared(
+        &self,
+        y: &DVector<F::MixedType>,
+        g: &DVector<F::MixedType>,
+        τ: F,
+        x: &mut DVector<F::MixedType>,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> usize {
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
+        l1squared_unconstrained(y, g, τ * self.α(), 1.0, x, &config.inner, inner_it)
+    }
+
+    fn find_tolerance_violation_slack<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        τ: F,
+        ε: F,
+        skip_by_rough_check: bool,
+        config: &FBGenericConfig<F>,
+        slack: F,
+    ) -> Option<(Loc<F, N>, F, bool)>
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let τα = τ * self.α();
+        let keep_below = τα + slack + ε;
+        let keep_above = -(τα + slack) - ε;
+        let maximise_above = τα + slack + ε * config.insertion_cutoff_factor;
+        let minimise_below = -(τα + slack) - ε * config.insertion_cutoff_factor;
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+
+        // If preliminary check indicates that we are in bonds, and if it otherwise matches
+        // the insertion strategy, skip insertion.
+        if skip_by_rough_check && Bounds(keep_above, keep_below).superset(&d.bounds()) {
+            None
+        } else {
+            // If the rough check didn't indicate no insertion needed, find maximising point.
+            let mx = d.maximise_above(
+                maximise_above,
+                refinement_tolerance,
+                config.refinement.max_steps,
+            );
+            let mi = d.minimise_below(
+                minimise_below,
+                refinement_tolerance,
+                config.refinement.max_steps,
+            );
+
+            match (mx, mi) {
+                (None, None) => None,
+                (Some((ξ, v_ξ)), None) => Some((ξ, v_ξ, keep_below >= v_ξ)),
+                (None, Some((ζ, v_ζ))) => Some((ζ, v_ζ, keep_above <= v_ζ)),
+                (Some((ξ, v_ξ)), Some((ζ, v_ζ))) => {
+                    if v_ξ - τα > τα - v_ζ {
+                        Some((ξ, v_ξ, keep_below >= v_ξ))
+                    } else {
+                        Some((ζ, v_ζ, keep_above <= v_ζ))
+                    }
+                }
+            }
+        }
+    }
+
+    fn verify_merge_candidate<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+    ) -> bool
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let τα = τ * self.α();
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let merge_tolerance = config.merge_tolerance_mult * ε;
+        let keep_below = τα + merge_tolerance;
+        let keep_above = -τα - merge_tolerance;
+        let keep_supp_pos_above = τα - merge_tolerance;
+        let keep_supp_neg_below = -τα + merge_tolerance;
+        let bnd = d.bounds();
+
+        return ((bnd.lower() >= keep_supp_pos_above && bnd.upper() <= keep_supp_neg_below)
+            || μ
+                .iter_spikes()
+                .all(|&DeltaMeasure { α: β, ref x }| match β.partial_cmp(&0.0) {
+                    Some(Greater) => d.apply(x) >= keep_supp_pos_above,
+                    Some(Less) => d.apply(x) <= keep_supp_neg_below,
+                    _ => true,
+                }))
+            && (bnd.upper() <= keep_below
+                || d.has_upper_bound(
+                    keep_below,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                ))
+            && (bnd.lower() >= keep_above
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                ));
+    }
+
+    fn verify_merge_candidate_radonsq<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        μ: &RNDM<F, N>,
+        τ: F,
+        ε: F,
+        config: &FBGenericConfig<F>,
+        radon_μ: &RNDM<F, N>,
+    ) -> bool
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let τα = τ * self.α();
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let merge_tolerance = config.merge_tolerance_mult * ε;
+        let slack = radon_μ.norm(Radon);
+        let bnd = d.bounds();
+
+        return {
+            μ.both_matching(radon_μ).all(|(α, rα, x)| {
+                let v = d.apply(x);
+                let (l1, u1) = match α.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (τα, τα),
+                    Equal => (-τα, τα),
+                    Less => (-τα, -τα),
+                };
+                let (l2, u2) = match rα.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (slack, slack),
+                    Equal => (-slack, slack),
+                    Less => (-slack, -slack),
+                };
+                (l1 + l2 - merge_tolerance <= v) && (v <= u1 + u2 + merge_tolerance)
+            })
+        } && {
+            let keep_below = τα + slack + merge_tolerance;
+            bnd.upper() <= keep_below
+                || d.has_upper_bound(
+                    keep_below,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                )
+        } && {
+            let keep_above = -τα - slack - merge_tolerance;
+            bnd.lower() >= keep_above
+                || d.has_lower_bound(
+                    keep_above,
+                    refinement_tolerance,
+                    config.refinement.max_steps,
+                )
+        };
+    }
+
+    fn target_bounds(&self, τ: F, ε: F) -> Option<Bounds<F>> {
+        let τα = τ * self.α();
+        Some(Bounds(-τα - ε, τα + ε))
+    }
+
+    fn tolerance_scaling(&self) -> F {
+        self.α()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float + ToNalgebraRealField, const N: usize> SlidingRegTerm<F, N> for RadonRegTerm<F>
+where
+    Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+{
+    fn goodness<G, BT>(
+        &self,
+        d: &mut BTFN<F, G, BT, N>,
+        _μ: &RNDM<F, N>,
+        y: &Loc<F, N>,
+        z: &Loc<F, N>,
+        τ: F,
+        ε: F,
+        _config: &FBGenericConfig<F>,
+    ) -> F
+    where
+        BT: BTSearch<F, N, Agg = Bounds<F>>,
+        G: SupportGenerator<F, N, Id = BT::Data>,
+        G::SupportType: Mapping<Loc<F, N>, Codomain = F> + LocalAnalysis<F, Bounds<F>, N>,
+    {
+        let α = self.α();
+        let w = |x| {
+            let dx = d.apply(x);
+            ((-ε + dx) / (τ * α)).max(1.0.min(ε + dx) / (τ * α))
+        };
+        w(z) - w(y)
+    }
+
+    fn radon_norm_bound(&self, b: F) -> F {
+        b / self.α()
+    }
+}
--- a/src/run.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/run.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -26,53 +26,116 @@
     AlgIteratorOptions,
     Verbose,
     AlgIteratorFactory,
+    LoggingIteratorFactory,
+    TimingIteratorFactory,
+    BasicAlgIteratorFactory,
 };
 use alg_tools::logger::Logger;
-use alg_tools::error::DynError;
+use alg_tools::error::{
+    DynError,
+    DynResult,
+};
 use alg_tools::tabledump::TableDump;
 use alg_tools::sets::Cube;
-use alg_tools::mapping::RealMapping;
+use alg_tools::mapping::{
+    RealMapping,
+    DifferentiableMapping,
+    DifferentiableRealMapping,
+    Instance
+};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::euclidean::Euclidean;
-use alg_tools::norms::L1;
-use alg_tools::lingrid::lingrid;
+use alg_tools::lingrid::{lingrid, LinSpace};
 use alg_tools::sets::SetOrd;
+use alg_tools::linops::{RowOp, IdOp /*, ZeroOp*/};
+use alg_tools::discrete_gradient::{Grad, ForwardNeumann};
+use alg_tools::convex::Zero;
+use alg_tools::maputil::map3;
+use alg_tools::direct_product::Pair;
 
 use crate::kernels::*;
 use crate::types::*;
 use crate::measures::*;
-use crate::measures::merging::SpikeMerging;
+use crate::measures::merging::{SpikeMerging,SpikeMergingMethod};
 use crate::forward_model::*;
+use crate::forward_model::sensor_grid::{
+    SensorGrid,
+    SensorGridBT,
+    //SensorGridBTFN,
+    Sensor,
+    Spread,
+};
+
 use crate::fb::{
     FBConfig,
+    FBGenericConfig,
     pointsource_fb_reg,
-    FBMetaAlgorithm,
-    FBGenericConfig,
+    pointsource_fista_reg,
+};
+use crate::sliding_fb::{
+    SlidingFBConfig,
+    TransportConfig,
+    pointsource_sliding_fb_reg
+};
+use crate::sliding_pdps::{
+    SlidingPDPSConfig,
+    pointsource_sliding_pdps_pair
+};
+use crate::forward_pdps::{
+    ForwardPDPSConfig,
+    pointsource_forward_pdps_pair
 };
 use crate::pdps::{
     PDPSConfig,
-    L2Squared,
     pointsource_pdps_reg,
 };
 use crate::frank_wolfe::{
     FWConfig,
     FWVariant,
     pointsource_fw_reg,
-    WeightOptim,
+    //WeightOptim,
 };
-use crate::subproblem::InnerSettings;
+use crate::subproblem::{InnerSettings, InnerMethod};
 use crate::seminorms::*;
 use crate::plot::*;
 use crate::{AlgorithmOverrides, CommandLineArgs};
 use crate::tolerance::Tolerance;
-use crate::regularisation::{Regularisation, RadonRegTerm, NonnegRadonRegTerm};
+use crate::regularisation::{
+    Regularisation,
+    RadonRegTerm,
+    NonnegRadonRegTerm
+};
+use crate::dataterm::{
+    L1,
+    L2Squared,
+};
+use crate::prox_penalty::{
+    RadonSquared,
+    //ProxPenalty,
+};
+use alg_tools::norms::{L2, NormExponent};
+use alg_tools::operator_arithmetic::Weighted;
+use anyhow::anyhow;
+
+/// Available proximal terms
+#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
+pub enum ProxTerm {
+    /// Partial-to-wave operator 𝒟.
+    Wave,
+    /// Radon-norm squared
+    RadonSquared
+}
 
 /// Available algorithms and their configurations
 #[derive(Copy, Clone, Debug, Serialize, Deserialize)]
 pub enum AlgorithmConfig<F : Float> {
-    FB(FBConfig<F>),
+    FB(FBConfig<F>, ProxTerm),
+    FISTA(FBConfig<F>, ProxTerm),
     FW(FWConfig<F>),
-    PDPS(PDPSConfig<F>),
+    PDPS(PDPSConfig<F>, ProxTerm),
+    SlidingFB(SlidingFBConfig<F>, ProxTerm),
+    ForwardPDPS(ForwardPDPSConfig<F>, ProxTerm),
+    SlidingPDPS(SlidingPDPSConfig<F>, ProxTerm),
 }
 
 fn unpack_tolerance<F : Float>(v : &Vec<F>) -> Tolerance<F> {
@@ -83,6 +146,13 @@
 impl<F : ClapFloat> AlgorithmConfig<F> {
     /// Override supported parameters based on the command line.
     pub fn cli_override(self, cli : &AlgorithmOverrides<F>) -> Self {
+        let override_merging = |g : SpikeMergingMethod<F>| {
+            SpikeMergingMethod {
+                enabled : cli.merge.unwrap_or(g.enabled),
+                radius : cli.merge_radius.unwrap_or(g.radius),
+                interp : cli.merge_interp.unwrap_or(g.interp),
+            }
+        };
         let override_fb_generic = |g : FBGenericConfig<F>| {
             FBGenericConfig {
                 bootstrap_insertions : cli.bootstrap_insertions
@@ -90,37 +160,74 @@
                                           .map_or(g.bootstrap_insertions,
                                                   |n| Some((n[0], n[1]))),
                 merge_every : cli.merge_every.unwrap_or(g.merge_every),
-                merging : cli.merging.clone().unwrap_or(g.merging),
-                final_merging : cli.final_merging.clone().unwrap_or(g.final_merging),
+                merging : override_merging(g.merging),
+                final_merging : cli.final_merging.unwrap_or(g.final_merging),
+                fitness_merging : cli.fitness_merging.unwrap_or(g.fitness_merging),
                 tolerance: cli.tolerance.as_ref().map(unpack_tolerance).unwrap_or(g.tolerance),
                 .. g
             }
         };
+        let override_transport = |g : TransportConfig<F>| {
+            TransportConfig {
+                θ0 : cli.theta0.unwrap_or(g.θ0),
+                tolerance_mult_con: cli.transport_tolerance_pos.unwrap_or(g.tolerance_mult_con),
+                adaptation: cli.transport_adaptation.unwrap_or(g.adaptation),
+                .. g
+            }
+        };
 
         use AlgorithmConfig::*;
         match self {
-            FB(fb) => FB(FBConfig {
+            FB(fb, prox) => FB(FBConfig {
                 τ0 : cli.tau0.unwrap_or(fb.τ0),
-                insertion : override_fb_generic(fb.insertion),
+                generic : override_fb_generic(fb.generic),
                 .. fb
-            }),
-            PDPS(pdps) => PDPS(PDPSConfig {
+            }, prox),
+            FISTA(fb, prox) => FISTA(FBConfig {
+                τ0 : cli.tau0.unwrap_or(fb.τ0),
+                generic : override_fb_generic(fb.generic),
+                .. fb
+            }, prox),
+            PDPS(pdps, prox) => PDPS(PDPSConfig {
                 τ0 : cli.tau0.unwrap_or(pdps.τ0),
                 σ0 : cli.sigma0.unwrap_or(pdps.σ0),
                 acceleration : cli.acceleration.unwrap_or(pdps.acceleration),
-                insertion : override_fb_generic(pdps.insertion),
+                generic : override_fb_generic(pdps.generic),
                 .. pdps
-            }),
+            }, prox),
             FW(fw) => FW(FWConfig {
-                merging : cli.merging.clone().unwrap_or(fw.merging),
+                merging : override_merging(fw.merging),
                 tolerance : cli.tolerance.as_ref().map(unpack_tolerance).unwrap_or(fw.tolerance),
                 .. fw
-            })
+            }),
+            SlidingFB(sfb, prox) => SlidingFB(SlidingFBConfig {
+                τ0 : cli.tau0.unwrap_or(sfb.τ0),
+                transport : override_transport(sfb.transport),
+                insertion : override_fb_generic(sfb.insertion),
+                .. sfb
+            }, prox),
+            SlidingPDPS(spdps, prox) => SlidingPDPS(SlidingPDPSConfig {
+                τ0 : cli.tau0.unwrap_or(spdps.τ0),
+                σp0 : cli.sigmap0.unwrap_or(spdps.σp0),
+                σd0 : cli.sigma0.unwrap_or(spdps.σd0),
+                //acceleration : cli.acceleration.unwrap_or(pdps.acceleration),
+                transport : override_transport(spdps.transport),
+                insertion : override_fb_generic(spdps.insertion),
+                .. spdps
+            }, prox),
+            ForwardPDPS(fpdps, prox) => ForwardPDPS(ForwardPDPSConfig {
+                τ0 : cli.tau0.unwrap_or(fpdps.τ0),
+                σp0 : cli.sigmap0.unwrap_or(fpdps.σp0),
+                σd0 : cli.sigma0.unwrap_or(fpdps.σd0),
+                //acceleration : cli.acceleration.unwrap_or(pdps.acceleration),
+                insertion : override_fb_generic(fpdps.insertion),
+                .. fpdps
+            }, prox),
         }
     }
 }
 
-/// Helper struct for tagging and [`AlgorithmConfig`] or [`Experiment`] with a name.
+/// Helper struct for tagging and [`AlgorithmConfig`] or [`ExperimentV2`] with a name.
 #[derive(Clone, Debug, Serialize, Deserialize)]
 pub struct Named<Data> {
     pub name : String,
@@ -146,24 +253,89 @@
     /// The μPDPS primal-dual proximal splitting method
     #[clap(name = "pdps")]
     PDPS,
+    /// The sliding FB method
+    #[clap(name = "sliding_fb", alias = "sfb")]
+    SlidingFB,
+    /// The sliding PDPS method
+    #[clap(name = "sliding_pdps", alias = "spdps")]
+    SlidingPDPS,
+    /// The PDPS method with a forward step for the smooth function
+    #[clap(name = "forward_pdps", alias = "fpdps")]
+    ForwardPDPS,
+
+    // Radon variants
+
+    /// The μFB forward-backward method with radon-norm squared proximal term
+    #[clap(name = "radon_fb")]
+    RadonFB,
+    /// The μFISTA inertial forward-backward method with radon-norm squared proximal term
+    #[clap(name = "radon_fista")]
+    RadonFISTA,
+    /// The μPDPS primal-dual proximal splitting method with radon-norm squared proximal term
+    #[clap(name = "radon_pdps")]
+    RadonPDPS,
+    /// The sliding FB method with radon-norm squared proximal term
+    #[clap(name = "radon_sliding_fb", alias = "radon_sfb")]
+    RadonSlidingFB,
+    /// The sliding PDPS method with radon-norm squared proximal term
+    #[clap(name = "radon_sliding_pdps", alias = "radon_spdps")]
+    RadonSlidingPDPS,
+    /// The PDPS method with a forward step for the smooth function with radon-norm squared proximal term
+    #[clap(name = "radon_forward_pdps", alias = "radon_fpdps")]
+    RadonForwardPDPS,
 }
 
 impl DefaultAlgorithm {
     /// Returns the algorithm configuration corresponding to the algorithm shorthand
     pub fn default_config<F : Float>(&self) -> AlgorithmConfig<F> {
         use DefaultAlgorithm::*;
+        let radon_insertion = FBGenericConfig {
+            merging : SpikeMergingMethod{ interp : false, .. Default::default() },
+            inner : InnerSettings {
+                method : InnerMethod::PDPS, // SSN not implemented
+                .. Default::default()
+            },
+            .. Default::default()
+        };
         match *self {
-            FB => AlgorithmConfig::FB(Default::default()),
-            FISTA => AlgorithmConfig::FB(FBConfig{
-                meta : FBMetaAlgorithm::InertiaFISTA,
-                .. Default::default()
-            }),
+            FB => AlgorithmConfig::FB(Default::default(), ProxTerm::Wave),
+            FISTA => AlgorithmConfig::FISTA(Default::default(), ProxTerm::Wave),
             FW => AlgorithmConfig::FW(Default::default()),
             FWRelax => AlgorithmConfig::FW(FWConfig{
                 variant : FWVariant::Relaxed,
                 .. Default::default()
             }),
-            PDPS => AlgorithmConfig::PDPS(Default::default()),
+            PDPS => AlgorithmConfig::PDPS(Default::default(), ProxTerm::Wave),
+            SlidingFB => AlgorithmConfig::SlidingFB(Default::default(), ProxTerm::Wave),
+            SlidingPDPS => AlgorithmConfig::SlidingPDPS(Default::default(), ProxTerm::Wave),
+            ForwardPDPS => AlgorithmConfig::ForwardPDPS(Default::default(), ProxTerm::Wave),
+
+            // Radon variants
+
+            RadonFB => AlgorithmConfig::FB(
+                FBConfig{ generic : radon_insertion, ..Default::default() },
+                ProxTerm::RadonSquared
+            ),
+            RadonFISTA => AlgorithmConfig::FISTA(
+                FBConfig{ generic : radon_insertion, ..Default::default() },
+                ProxTerm::RadonSquared
+            ),
+            RadonPDPS => AlgorithmConfig::PDPS(
+                PDPSConfig{ generic : radon_insertion, ..Default::default() },
+                ProxTerm::RadonSquared
+            ),
+            RadonSlidingFB => AlgorithmConfig::SlidingFB(
+                SlidingFBConfig{ insertion : radon_insertion, ..Default::default() },
+                ProxTerm::RadonSquared
+            ),
+            RadonSlidingPDPS => AlgorithmConfig::SlidingPDPS(
+                SlidingPDPSConfig{ insertion : radon_insertion, ..Default::default() },
+                ProxTerm::RadonSquared
+            ),
+            RadonForwardPDPS => AlgorithmConfig::ForwardPDPS(
+                ForwardPDPSConfig{ insertion : radon_insertion, ..Default::default() },
+                ProxTerm::RadonSquared
+            ),
         }
     }
 
@@ -201,6 +373,12 @@
     Iter,
 }
 
+impl Default for PlotLevel {
+    fn default() -> Self {
+        Self::Data
+    }
+}
+
 type DefaultBT<F, const N : usize> = BT<
     DynamicDepth,
     F,
@@ -223,7 +401,8 @@
     iter : usize,
     cpu_time : f64,
     value : F,
-    post_value : F,
+    relative_value : F,
+    //post_value : F,
     n_spikes : usize,
     inner_iters : usize,
     merged : usize,
@@ -278,7 +457,7 @@
 /// Struct for experiment configurations
 #[derive(Debug, Clone, Serialize)]
 pub struct ExperimentV2<F, NoiseDistr, S, K, P, const N : usize>
-where F : Float,
+where F : Float + ClapFloat,
       [usize; N] : Serialize,
       NoiseDistr : Distribution<F>,
       S : Sensor<F, N>,
@@ -300,7 +479,7 @@
     /// Kernel $ρ$ of $𝒟$.
     pub kernel : K,
     /// True point sources
-    pub μ_hat : DiscreteMeasure<Loc<F, N>, F>,
+    pub μ_hat : RNDM<F, N>,
     /// Regularisation term and parameter
     pub regularisation : Regularisation<F>,
     /// For plotting : how wide should the kernels be plotted
@@ -308,8 +487,27 @@
     /// Data term
     pub dataterm : DataTerm,
     /// A map of default configurations for algorithms
-    #[serde(skip)]
-    pub algorithm_defaults : HashMap<DefaultAlgorithm, AlgorithmConfig<F>>,
+    pub algorithm_overrides : HashMap<DefaultAlgorithm, AlgorithmOverrides<F>>,
+    /// Default merge radius
+    pub default_merge_radius : F,
+}
+
+#[derive(Debug, Clone, Serialize)]
+pub struct ExperimentBiased<F, NoiseDistr, S, K, P, B, const N : usize>
+where F : Float + ClapFloat,
+      [usize; N] : Serialize,
+      NoiseDistr : Distribution<F>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      K : SimpleConvolutionKernel<F, N>,
+      B : Mapping<Loc<F, N>, Codomain = F> + Serialize + std::fmt::Debug,
+{
+    /// Basic setup
+    pub base : ExperimentV2<F, NoiseDistr, S, K, P, N>,
+    /// Weight of TV term
+    pub λ : F,
+    /// Bias function
+    pub bias : B,
 }
 
 /// Trait for runnable experiments
@@ -319,41 +517,190 @@
               algs : Option<Vec<Named<AlgorithmConfig<F>>>>) -> DynError;
 
     /// Return algorithm default config
-    fn algorithm_defaults(&self, alg : DefaultAlgorithm, cli : &AlgorithmOverrides<F>)
-    -> Named<AlgorithmConfig<F>>;
+    fn algorithm_overrides(&self, alg : DefaultAlgorithm) -> AlgorithmOverrides<F>;
+}
+
+/// Helper function to print experiment start message and save setup.
+/// Returns saving prefix.
+fn start_experiment<E, S>(
+    experiment : &Named<E>,
+    cli : &CommandLineArgs,
+    stats : S,
+) -> DynResult<String>
+where
+    E : Serialize + std::fmt::Debug,
+    S : Serialize,
+{
+    let Named { name : experiment_name, data } = experiment;
+
+    println!("{}\n{}",
+             format!("Performing experiment {}…", experiment_name).cyan(),
+             format!("Experiment settings: {}", serde_json::to_string(&data)?).bright_black());
+
+    // Set up output directory
+    let prefix = format!("{}/{}/", cli.outdir, experiment_name);
+
+    // Save experiment configuration and statistics
+    let mkname_e = |t| format!("{prefix}{t}.json", prefix = prefix, t = t);
+    std::fs::create_dir_all(&prefix)?;
+    write_json(mkname_e("experiment"), experiment)?;
+    write_json(mkname_e("config"), cli)?;
+    write_json(mkname_e("stats"), &stats)?;
+
+    Ok(prefix)
+}
+
+/// Error codes for running an algorithm on an experiment.
+enum RunError {
+    /// Algorithm not implemented for this experiment
+    NotImplemented,
 }
 
-// *** macro boilerplate ***
-macro_rules! impl_experiment {
-($type:ident, $reg_field:ident, $reg_convert:path) => {
-// *** macro ***
-impl<F, NoiseDistr, S, K, P, const N : usize> RunnableExperiment<F> for
-Named<$type<F, NoiseDistr, S, K, P, N>>
-where F : ClapFloat + nalgebra::RealField + ToNalgebraRealField<MixedType=F>,
-      [usize; N] : Serialize,
-      S : Sensor<F, N> + Copy + Serialize + std::fmt::Debug,
-      P : Spread<F, N> + Copy + Serialize + std::fmt::Debug,
-      Convolution<S, P>: Spread<F, N> + Bounded<F> + LocalAnalysis<F, Bounds<F>, N> + Copy,
-      AutoConvolution<P> : BoundedBy<F, K>,
-      K : SimpleConvolutionKernel<F, N> + LocalAnalysis<F, Bounds<F>, N> 
-          + Copy + Serialize + std::fmt::Debug,
-      Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
-      PlotLookup : Plotting<N>,
-      DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
-      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
-      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
-      NoiseDistr : Distribution<F> + Serialize + std::fmt::Debug {
+use RunError::*;
+
+type DoRunAllIt<'a, F, const N : usize> = LoggingIteratorFactory<
+    'a,
+    Timed<IterInfo<F, N>>,
+    TimingIteratorFactory<BasicAlgIteratorFactory<IterInfo<F, N>>>
+>;
+
+/// Helper function to run all algorithms on an experiment.
+fn do_runall<F : Float + for<'b> Deserialize<'b>, Z, const N : usize>(
+    experiment_name : &String,
+    prefix : &String,
+    cli : &CommandLineArgs,
+    algorithms : Vec<Named<AlgorithmConfig<F>>>,
+    plotgrid : LinSpace<Loc<F, N>, [usize; N]>,
+    mut save_extra : impl FnMut(String, Z) -> DynError,
+    mut do_alg : impl FnMut(
+        &AlgorithmConfig<F>,
+        DoRunAllIt<F, N>,
+        SeqPlotter<F, N>,
+        String,
+    ) -> Result<(RNDM<F, N>, Z), RunError>,
+) ->  DynError
+where
+    PlotLookup : Plotting<N>,
+{
+    let mut logs = Vec::new();
+
+    let iterator_options = AlgIteratorOptions{
+            max_iter : cli.max_iter,
+            verbose_iter : cli.verbose_iter
+                                .map_or(Verbose::LogarithmicCap{base : 10, cap : 2},
+                                        |n| Verbose::Every(n)),
+            quiet : cli.quiet,
+    };
+
+    // Run the algorithm(s)
+    for named @ Named { name : alg_name, data : alg } in algorithms.iter() {
+        let this_prefix = format!("{}{}/", prefix, alg_name);
+
+        // Create Logger and IteratorFactory
+        let mut logger = Logger::new();
+        let iterator = iterator_options.instantiate()
+                                        .timed()
+                                        .into_log(&mut logger);
+
+        let running = if !cli.quiet {
+            format!("{}\n{}\n{}\n",
+                    format!("Running {} on experiment {}…", alg_name, experiment_name).cyan(),
+                    format!("Iteration settings: {}", serde_json::to_string(&iterator_options)?).bright_black(),
+                    format!("Algorithm settings: {}", serde_json::to_string(&alg)?).bright_black())
+        } else {
+            "".to_string()
+        };
+        //
+        // The following is for postprocessing, which has been disabled anyway.
+        //
+        // let reg : Box<dyn WeightOptim<_, _, _, N>> = match regularisation {
+        //     Regularisation::Radon(α) => Box::new(RadonRegTerm(α)),
+        //     Regularisation::NonnegRadon(α) => Box::new(NonnegRadonRegTerm(α)),
+        // };
+        //let findim_data = reg.prepare_optimise_weights(&opA, &b);
+        //let inner_config : InnerSettings<F> = Default::default();
+        //let inner_it = inner_config.iterator_options;
+
+        // Create plotter and directory if needed.
+        let plot_count = if cli.plot >= PlotLevel::Iter { 2000 } else { 0 };
+        let plotter = SeqPlotter::new(this_prefix, plot_count, plotgrid.clone());
+
+        let start = Instant::now();
+        let start_cpu = ProcessTime::now();
 
-    fn algorithm_defaults(&self, alg : DefaultAlgorithm, cli : &AlgorithmOverrides<F>)
-    -> Named<AlgorithmConfig<F>> {
-        alg.to_named(
-            self.data
-                .algorithm_defaults
-                .get(&alg)
-                .map_or_else(|| alg.default_config(),
-                            |config| config.clone())
-                .cli_override(cli)
-        )
+        let (μ, z) = match do_alg(alg, iterator, plotter, running) {
+            Ok(μ) => μ,
+            Err(RunError::NotImplemented) => {
+                let msg = format!("Algorithm “{alg_name}” not implemented for {experiment_name}. \
+                                   Skipping.").red();
+                eprintln!("{}", msg);
+                continue
+            }
+        };
+
+        let elapsed = start.elapsed().as_secs_f64();
+        let cpu_time = start_cpu.elapsed().as_secs_f64();
+
+        println!("{}", format!("Elapsed {elapsed}s (CPU time {cpu_time}s)… ").yellow());
+
+        // Save results
+        println!("{}", "Saving results …".green());
+
+        let mkname = |t| format!("{prefix}{alg_name}_{t}");
+
+        write_json(mkname("config.json"), &named)?;
+        write_json(mkname("stats.json"), &AlgorithmStats { cpu_time, elapsed })?;
+        μ.write_csv(mkname("reco.txt"))?;
+        save_extra(mkname(""), z)?;
+        //logger.write_csv(mkname("log.txt"))?;
+        logs.push((mkname("log.txt"), logger));
+    }
+
+    save_logs(logs, format!("{prefix}valuerange.json"), cli.load_valuerange)
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, NoiseDistr, S, K, P, /*PreadjointCodomain, */ const N : usize> RunnableExperiment<F> for
+Named<ExperimentV2<F, NoiseDistr, S, K, P, N>>
+where
+    F : ClapFloat + nalgebra::RealField + ToNalgebraRealField<MixedType=F>
+        + Default + for<'b> Deserialize<'b>,
+    [usize; N] : Serialize,
+    S : Sensor<F, N> + Copy + Serialize + std::fmt::Debug,
+    P : Spread<F, N> + Copy + Serialize + std::fmt::Debug,
+    Convolution<S, P>: Spread<F, N> + Bounded<F> + LocalAnalysis<F, Bounds<F>, N> + Copy
+                        // TODO: shold not have differentiability as a requirement, but
+                        // decide availability of sliding based on it.
+                        //+ for<'b> Differentiable<&'b Loc<F, N>, Output = Loc<F, N>>,
+                        // TODO: very weird that rust only compiles with Differentiable
+                        // instead of the above one on references, which is required by
+                        // poitsource_sliding_fb_reg.
+                        + DifferentiableRealMapping<F, N>
+                        + Lipschitz<L2, FloatType=F>,
+    for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>, // TODO: should not be required generally, only for sliding_fb.
+    AutoConvolution<P> : BoundedBy<F, K>,
+    K : SimpleConvolutionKernel<F, N>
+        + LocalAnalysis<F, Bounds<F>, N>
+        + Copy + Serialize + std::fmt::Debug,
+    Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
+    PlotLookup : Plotting<N>,
+    DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
+    BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    NoiseDistr : Distribution<F> + Serialize + std::fmt::Debug,
+    // DefaultSG<F, S, P, N> : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = PreadjointCodomain, Observable=DVector<F::MixedType>>,
+    // PreadjointCodomain : Space + Bounded<F> + DifferentiableRealMapping<F, N>,
+    // DefaultSeminormOp<F, K, N> : ProxPenalty<F, PreadjointCodomain, RadonRegTerm<F>, N>,
+    // DefaultSeminormOp<F, K, N> : ProxPenalty<F, PreadjointCodomain, NonnegRadonRegTerm<F>, N>,
+    // RadonSquared : ProxPenalty<F, PreadjointCodomain, RadonRegTerm<F>, N>,
+    // RadonSquared : ProxPenalty<F, PreadjointCodomain, NonnegRadonRegTerm<F>, N>,
+{
+
+    fn algorithm_overrides(&self, alg : DefaultAlgorithm) -> AlgorithmOverrides<F> {
+        AlgorithmOverrides {
+            merge_radius : Some(self.data.default_merge_radius),
+            .. self.data.algorithm_overrides.get(&alg).cloned().unwrap_or(Default::default())
+        }
     }
 
     fn runall(&self, cli : &CommandLineArgs,
@@ -361,31 +708,15 @@
         // Get experiment configuration
         let &Named {
             name : ref experiment_name,
-            data : $type {
+            data : ExperimentV2 {
                 domain, sensor_count, ref noise_distr, sensor, spread, kernel,
-                ref μ_hat, /*regularisation,*/ kernel_plot_width, dataterm, noise_seed,
+                ref μ_hat, regularisation, kernel_plot_width, dataterm, noise_seed,
                 ..
             }
         } = self;
-        #[allow(deprecated)]
-        let regularisation = $reg_convert(self.data.$reg_field);
-
-        println!("{}\n{}",
-                 format!("Performing experiment {}…", experiment_name).cyan(),
-                 format!("{:?}", &self.data).bright_black());
-
-        // Set up output directory
-        let prefix = format!("{}/{}/", cli.outdir, self.name);
 
         // Set up algorithms
-        let iterator_options = AlgIteratorOptions{
-                max_iter : cli.max_iter,
-                verbose_iter : cli.verbose_iter
-                                  .map_or(Verbose::Logarithmic(10),
-                                          |n| Verbose::Every(n)),
-                quiet : cli.quiet,
-        };
-        let algorithms = match (algs, self.data.dataterm) {
+        let algorithms = match (algs, dataterm) {
             (Some(algs), _) => algs,
             (None, DataTerm::L2Squared) => vec![DefaultAlgorithm::FB.get_named()],
             (None, DataTerm::L1) => vec![DefaultAlgorithm::PDPS.get_named()],
@@ -407,186 +738,492 @@
         // overloading log10 and conflicting with standard NumTraits one.
         let stats = ExperimentStats::new(&b, &noise);
 
-        // Save experiment configuration and statistics
-        let mkname_e = |t| format!("{prefix}{t}.json", prefix = prefix, t = t);
-        std::fs::create_dir_all(&prefix)?;
-        write_json(mkname_e("experiment"), self)?;
-        write_json(mkname_e("config"), cli)?;
-        write_json(mkname_e("stats"), &stats)?;
+        let prefix = start_experiment(&self, cli, stats)?;
 
         plotall(cli, &prefix, &domain, &sensor, &kernel, &spread,
                 &μ_hat, &op𝒟, &opA, &b_hat, &b, kernel_plot_width)?;
 
-        // Run the algorithm(s)
-        for named @ Named { name : alg_name, data : alg } in algorithms.iter() {
-            let this_prefix = format!("{}{}/", prefix, alg_name);
+        let plotgrid = lingrid(&domain, &[if N==1 { 1000 } else { 100 }; N]);
+
+        let save_extra = |_, ()| Ok(());
 
-            let running = || if !cli.quiet {
-                println!("{}\n{}\n{}",
-                        format!("Running {} on experiment {}…", alg_name, experiment_name).cyan(),
-                        format!("{:?}", iterator_options).bright_black(),
-                        format!("{:?}", alg).bright_black());
-            };
-            let not_implemented = || {
-                let msg = format!("Algorithm “{alg_name}” not implemented for \
-                                   dataterm {dataterm:?} and regularisation {regularisation:?}. \
-                                   Skipping.").red();
-                eprintln!("{}", msg);
-            };
-            // Create Logger and IteratorFactory
-            let mut logger = Logger::new();
-            let reg : Box<dyn WeightOptim<_, _, _, N>> = match regularisation {
-                Regularisation::Radon(α) => Box::new(RadonRegTerm(α)),
-                Regularisation::NonnegRadon(α) => Box::new(NonnegRadonRegTerm(α)),
-            };
-            let findim_data = reg.prepare_optimise_weights(&opA, &b);
-            let inner_config : InnerSettings<F> = Default::default();
-            let inner_it = inner_config.iterator_options;
-            let logmap = |iter, Timed { cpu_time, data }| {
-                let IterInfo {
-                    value,
-                    n_spikes,
-                    inner_iters,
-                    merged,
-                    pruned,
-                    postprocessing,
-                    this_iters,
-                    ..
-                } = data;
-                let post_value = match (postprocessing, dataterm) {
-                    (Some(mut μ), DataTerm::L2Squared) => {
-                        // Comparison postprocessing is only implemented for the case handled
-                        // by the FW variants.
-                        reg.optimise_weights(
-                            &mut μ, &opA, &b, &findim_data, &inner_config,
-                            inner_it
-                        );
-                        dataterm.value_at_residual(opA.apply(&μ) - &b)
-                            + regularisation.apply(&μ)
-                    },
-                    _ => value,
-                };
-                CSVLog {
-                    iter,
-                    value,
-                    post_value,
-                    n_spikes,
-                    cpu_time : cpu_time.as_secs_f64(),
-                    inner_iters,
-                    merged,
-                    pruned,
-                    this_iters
-                }
-            };
-            let iterator = iterator_options.instantiate()
-                                           .timed()
-                                           .mapped(logmap)
-                                           .into_log(&mut logger);
-            let plotgrid = lingrid(&domain, &[if N==1 { 1000 } else { 100 }; N]);
-
-            // Create plotter and directory if needed.
-            let plot_count = if cli.plot >= PlotLevel::Iter { 2000 } else { 0 };
-            let plotter = SeqPlotter::new(this_prefix, plot_count, plotgrid);
-
-            // Run the algorithm
-            let start = Instant::now();
-            let start_cpu = ProcessTime::now();
+        do_runall(experiment_name, &prefix, cli, algorithms, plotgrid, save_extra,
+            |alg, iterator, plotter, running|
+        {
             let μ = match alg {
-                AlgorithmConfig::FB(ref algconfig) => {
-                    match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                AlgorithmConfig::FB(ref algconfig, prox) => {
+                    match (regularisation, dataterm, prox) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
                             pointsource_fb_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
                             pointsource_fb_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter
                             )
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_fb_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_fb_reg(
+                                &opA, &b, RadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        _ => Err(NotImplemented)
                     }
                 },
-                AlgorithmConfig::PDPS(ref algconfig) => {
-                    running();
-                    match (regularisation, dataterm) {
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
+                AlgorithmConfig::FISTA(ref algconfig, prox) => {
+                    match (regularisation, dataterm, prox) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_fista_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_fista_reg(
+                                &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_fista_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_fista_reg(
+                                &opA, &b, RadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        _ => Err(NotImplemented),
+                    }
+                },
+                AlgorithmConfig::SlidingFB(ref algconfig, prox) => {
+                    match (regularisation, dataterm, prox) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_fb_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_fb_reg(
+                                &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_fb_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_fb_reg(
+                                &opA, &b, RadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter
+                            )
+                        }),
+                        _ => Err(NotImplemented),
+                    }
+                },
+                AlgorithmConfig::PDPS(ref algconfig, prox) => {
+                    print!("{running}");
+                    match (regularisation, dataterm, prox) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L2Squared
                             )
-                        },
-                        (Regularisation::Radon(α),DataTerm::L2Squared) => {
+                        }),
+                        (Regularisation::Radon(α),DataTerm::L2Squared, ProxTerm::Wave) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L2Squared
                             )
-                        },
-                        (Regularisation::NonnegRadon(α), DataTerm::L1) => {
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L1, ProxTerm::Wave) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L1
                             )
-                        },
-                        (Regularisation::Radon(α), DataTerm::L1) => {
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L1, ProxTerm::Wave) => Ok({
                             pointsource_pdps_reg(
                                 &opA, &b, RadonRegTerm(α), &op𝒟, algconfig,
                                 iterator, plotter, L1
                             )
-                        },
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            pointsource_pdps_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter, L2Squared
+                            )
+                        }),
+                        (Regularisation::Radon(α),DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            pointsource_pdps_reg(
+                                &opA, &b, RadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter, L2Squared
+                            )
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L1, ProxTerm::RadonSquared) => Ok({
+                            pointsource_pdps_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter, L1
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L1, ProxTerm::RadonSquared) => Ok({
+                            pointsource_pdps_reg(
+                                &opA, &b, RadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter, L1
+                            )
+                        }),
+                        // _ => Err(NotImplemented),
                     }
                 },
                 AlgorithmConfig::FW(ref algconfig) => {
                     match (regularisation, dataterm) {
-                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
-                            running();
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fw_reg(&opA, &b, RadonRegTerm(α),
                                                algconfig, iterator, plotter)
-                        },
-                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
-                            running();
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => Ok({
+                            print!("{running}");
                             pointsource_fw_reg(&opA, &b, NonnegRadonRegTerm(α),
                                                algconfig, iterator, plotter)
-                        },
-                        _ => {
-                            not_implemented();
-                            continue
-                        }
+                        }),
+                        _ => Err(NotImplemented),
                     }
-                }
-            };
-
-            let elapsed = start.elapsed().as_secs_f64();
-            let cpu_time = start_cpu.elapsed().as_secs_f64();
-
-            println!("{}", format!("Elapsed {elapsed}s (CPU time {cpu_time}s)… ").yellow());
-
-            // Save results
-            println!("{}", "Saving results…".green());
-
-            let mkname = |t| format!("{prefix}{alg_name}_{t}");
-
-            write_json(mkname("config.json"), &named)?;
-            write_json(mkname("stats.json"), &AlgorithmStats { cpu_time, elapsed })?;
-            μ.write_csv(mkname("reco.txt"))?;
-            logger.write_csv(mkname("log.txt"))?;
-        }
-
-        Ok(())
+                },
+                _ => Err(NotImplemented),
+            }?;
+            Ok((μ, ()))
+        })
     }
 }
-// *** macro end boiler plate ***
-}}
-// *** actual code ***
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, NoiseDistr, S, K, P, B, /*PreadjointCodomain,*/ const N : usize> RunnableExperiment<F> for
+Named<ExperimentBiased<F, NoiseDistr, S, K, P, B, N>>
+where
+    F : ClapFloat + nalgebra::RealField + ToNalgebraRealField<MixedType=F>
+        + Default + for<'b> Deserialize<'b>,
+    [usize; N] : Serialize,
+    S : Sensor<F, N> + Copy + Serialize + std::fmt::Debug,
+    P : Spread<F, N> + Copy + Serialize + std::fmt::Debug,
+    Convolution<S, P>: Spread<F, N> + Bounded<F> + LocalAnalysis<F, Bounds<F>, N> + Copy
+                        // TODO: shold not have differentiability as a requirement, but
+                        // decide availability of sliding based on it.
+                        //+ for<'b> Differentiable<&'b Loc<F, N>, Output = Loc<F, N>>,
+                        // TODO: very weird that rust only compiles with Differentiable
+                        // instead of the above one on references, which is required by
+                        // poitsource_sliding_fb_reg.
+                        + DifferentiableRealMapping<F, N>
+                        + Lipschitz<L2, FloatType=F>,
+    for<'b> <Convolution<S, P> as DifferentiableMapping<Loc<F,N>>>::Differential<'b> : Lipschitz<L2, FloatType=F>, // TODO: should not be required generally, only for sliding_fb.
+    AutoConvolution<P> : BoundedBy<F, K>,
+    K : SimpleConvolutionKernel<F, N>
+        + LocalAnalysis<F, Bounds<F>, N>
+        + Copy + Serialize + std::fmt::Debug,
+    Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
+    PlotLookup : Plotting<N>,
+    DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
+    BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+    RNDM<F, N> : SpikeMerging<F>,
+    NoiseDistr : Distribution<F> + Serialize + std::fmt::Debug,
+    B : Mapping<Loc<F, N>, Codomain = F> + Serialize + std::fmt::Debug,
+    // DefaultSG<F, S, P, N> : ForwardModel<RNDM<F, N>, F, PreadjointCodomain = PreadjointCodomain, Observable=DVector<F::MixedType>>,
+    // PreadjointCodomain : Bounded<F> + DifferentiableRealMapping<F, N>,
+    // DefaultSeminormOp<F, K, N> :  ProxPenalty<F, PreadjointCodomain, RadonRegTerm<F>, N>,
+    // DefaultSeminormOp<F, K, N> :  ProxPenalty<F, PreadjointCodomain, NonnegRadonRegTerm<F>, N>,
+    // RadonSquared : ProxPenalty<F, PreadjointCodomain, RadonRegTerm<F>, N>,
+    // RadonSquared : ProxPenalty<F, PreadjointCodomain, NonnegRadonRegTerm<F>, N>,
+{
+
+    fn algorithm_overrides(&self, alg : DefaultAlgorithm) -> AlgorithmOverrides<F> {
+        AlgorithmOverrides {
+            merge_radius : Some(self.data.base.default_merge_radius),
+            .. self.data.base.algorithm_overrides.get(&alg).cloned().unwrap_or(Default::default())
+        }
+    }
+
+    fn runall(&self, cli : &CommandLineArgs,
+              algs : Option<Vec<Named<AlgorithmConfig<F>>>>) -> DynError {
+        // Get experiment configuration
+        let &Named {
+            name : ref experiment_name,
+            data : ExperimentBiased {
+                λ,
+                ref bias,
+                base : ExperimentV2 {
+                    domain, sensor_count, ref noise_distr, sensor, spread, kernel,
+                    ref μ_hat, regularisation, kernel_plot_width, dataterm, noise_seed,
+                    ..
+                }
+            }
+        } = self;
+
+        // Set up algorithms
+        let algorithms = match (algs, dataterm) {
+            (Some(algs), _) => algs,
+            _ => vec![DefaultAlgorithm::SlidingPDPS.get_named()],
+        };
+
+        // Set up operators
+        let depth = DynamicDepth(8);
+        let opA = DefaultSG::new(domain, sensor_count, sensor, spread, depth);
+        let op𝒟 = DefaultSeminormOp::new(depth, domain, kernel);
+        let opAext = RowOp(opA.clone(), IdOp::new());
+        let fnR = Zero::new();
+        let h = map3(domain.span_start(), domain.span_end(), sensor_count,
+                     |a, b, n| (b-a)/F::cast_from(n))
+                    .into_iter()
+                    .reduce(NumTraitsFloat::max)
+                    .unwrap();
+        let z = DVector::zeros(sensor_count.iter().product());
+        let opKz = Grad::new_for(&z, h, sensor_count, ForwardNeumann).unwrap();
+        let y = opKz.apply(&z);
+        let fnH = Weighted{ base_fn : L1.as_mapping(), weight : λ};  // TODO: L_{2,1}
+        // let zero_y = y.clone();
+        // let zeroBTFN = opA.preadjoint().apply(&zero_y);
+        // let opKμ = ZeroOp::new(&zero_y, zeroBTFN);
+
+        // Set up random number generator.
+        let mut rng = StdRng::seed_from_u64(noise_seed);
+
+        // Generate the data and calculate SSNR statistic
+        let bias_vec = DVector::from_vec(opA.grid()
+                                            .into_iter()
+                                            .map(|v| bias.apply(v))
+                                            .collect::<Vec<F>>());
+        let b_hat : DVector<_> = opA.apply(μ_hat) + &bias_vec;
+        let noise = DVector::from_distribution(b_hat.len(), &noise_distr, &mut rng);
+        let b = &b_hat + &noise;
+        // Need to wrap calc_ssnr into a function to hide ultra-lame nalgebra::RealField
+        // overloading log10 and conflicting with standard NumTraits one.
+        let stats = ExperimentStats::new(&b, &noise);
+
+        let prefix = start_experiment(&self, cli, stats)?;
+
+        plotall(cli, &prefix, &domain, &sensor, &kernel, &spread,
+                &μ_hat, &op𝒟, &opA, &b_hat, &b, kernel_plot_width)?;
+
+        opA.write_observable(&bias_vec, format!("{prefix}bias"))?;
+
+        let plotgrid = lingrid(&domain, &[if N==1 { 1000 } else { 100 }; N]);
+
+        let save_extra = |prefix, z| opA.write_observable(&z, format!("{prefix}z"));
 
-impl_experiment!(ExperimentV2, regularisation, std::convert::identity);
+        // Run the algorithms
+        do_runall(experiment_name, &prefix, cli, algorithms, plotgrid, save_extra,
+            |alg, iterator, plotter, running|
+        {
+            let Pair(μ, z) = match alg {
+                AlgorithmConfig::ForwardPDPS(ref algconfig, prox) => {
+                    match (regularisation, dataterm, prox) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_forward_pdps_pair(
+                                &opAext, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_forward_pdps_pair(
+                                &opAext, &b, RadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_forward_pdps_pair(
+                                &opAext, &b, NonnegRadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_forward_pdps_pair(
+                                &opAext, &b, RadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        _ => Err(NotImplemented)
+                    }
+                },
+                AlgorithmConfig::SlidingPDPS(ref algconfig, prox) => {
+                    match (regularisation, dataterm, prox) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_pdps_pair(
+                                &opAext, &b, NonnegRadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::Wave) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_pdps_pair(
+                                &opAext, &b, RadonRegTerm(α), &op𝒟, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_pdps_pair(
+                                &opAext, &b, NonnegRadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        (Regularisation::Radon(α), DataTerm::L2Squared, ProxTerm::RadonSquared) => Ok({
+                            print!("{running}");
+                            pointsource_sliding_pdps_pair(
+                                &opAext, &b, RadonRegTerm(α), &RadonSquared, algconfig,
+                                iterator, plotter,
+                                /* opKμ, */ &opKz, &fnR, &fnH, z.clone(), y.clone(),
+                            )
+                        }),
+                        _ => Err(NotImplemented)
+                    }
+                },
+                _ => Err(NotImplemented)
+            }?;
+            Ok((μ, z))
+        })
+    }
+}
+
+#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
+struct ValueRange<F : Float> {
+    ini : F,
+    min : F,
+}
+
+impl<F : Float> ValueRange<F> {
+    fn expand_with(self, other : Self) -> Self {
+        ValueRange {
+            ini : self.ini.max(other.ini),
+            min : self.min.min(other.min),
+        }
+    }
+}
+
+/// Calculative minimum and maximum values of all the `logs`, and save them into
+/// corresponding file names given as the first elements of the tuples in the vectors.
+fn save_logs<F : Float + for<'b> Deserialize<'b>, const N : usize>(
+    logs : Vec<(String, Logger<Timed<IterInfo<F, N>>>)>,
+    valuerange_file : String,
+    load_valuerange : bool,
+) -> DynError {
+    // Process logs for relative values
+    println!("{}", "Processing logs…");
+
+    // Find minimum value and initial value within a single log
+    let proc_single_log = |log : &Logger<Timed<IterInfo<F, N>>>| {
+        let d = log.data();
+        let mi = d.iter()
+                  .map(|i| i.data.value)
+                  .reduce(NumTraitsFloat::min);
+        d.first()
+         .map(|i| i.data.value)
+         .zip(mi)
+         .map(|(ini, min)| ValueRange{ ini, min })
+    };
+
+    // Find minimum and maximum value over all logs
+    let mut v = logs.iter()
+                    .filter_map(|&(_, ref log)| proc_single_log(log))
+                    .reduce(|v1, v2| v1.expand_with(v2))
+                    .ok_or(anyhow!("No algorithms found"))?;
+
+    // Load existing range
+    if load_valuerange && std::fs::metadata(&valuerange_file).is_ok() {
+        let data = std::fs::read_to_string(&valuerange_file)?;
+        v = v.expand_with(serde_json::from_str(&data)?);
+    }
+
+    let logmap = |Timed { cpu_time, iter, data }| {
+        let IterInfo {
+            value,
+            n_spikes,
+            inner_iters,
+            merged,
+            pruned,
+            //postprocessing,
+            this_iters,
+            ..
+        } = data;
+        // let post_value = match (postprocessing, dataterm) {
+        //     (Some(mut μ), DataTerm::L2Squared) => {
+        //         // Comparison postprocessing is only implemented for the case handled
+        //         // by the FW variants.
+        //         reg.optimise_weights(
+        //             &mut μ, &opA, &b, &findim_data, &inner_config,
+        //             inner_it
+        //         );
+        //         dataterm.value_at_residual(opA.apply(&μ) - &b)
+        //             + regularisation.apply(&μ)
+        //     },
+        //     _ => value,
+        // };
+        let relative_value = (value - v.min)/(v.ini - v.min);
+        CSVLog {
+            iter,
+            value,
+            relative_value,
+            //post_value,
+            n_spikes,
+            cpu_time : cpu_time.as_secs_f64(),
+            inner_iters,
+            merged,
+            pruned,
+            this_iters
+        }
+    };
+
+    println!("{}", "Saving logs …".green());
+
+    serde_json::to_writer_pretty(std::fs::File::create(&valuerange_file)?, &v)?;
+
+    for (name, logger) in logs {
+        logger.map(logmap).write_csv(name)?;
+    }
+
+    Ok(())
+}
+
 
 /// Plot experiment setup
 #[replace_float_literals(F::cast_from(literal))]
@@ -597,7 +1234,7 @@
     sensor : &Sensor,
     kernel : &Kernel,
     spread : &Spread,
-    μ_hat : &DiscreteMeasure<Loc<F, N>, F>,
+    μ_hat : &RNDM<F, N>,
     op𝒟 : &𝒟,
     opA : &A,
     b_hat : &A::Observable,
@@ -608,11 +1245,12 @@
       Sensor : RealMapping<F, N> + Support<F, N> + Clone,
       Spread : RealMapping<F, N> + Support<F, N> + Clone,
       Kernel : RealMapping<F, N> + Support<F, N>,
-      Convolution<Sensor, Spread> : RealMapping<F, N> + Support<F, N>,
+      Convolution<Sensor, Spread> : DifferentiableRealMapping<F, N> + Support<F, N>,
       𝒟 : DiscreteMeasureOp<Loc<F, N>, F>,
       𝒟::Codomain : RealMapping<F, N>,
-      A : ForwardModel<Loc<F, N>, F>,
-      A::PreadjointCodomain : RealMapping<F, N> + Bounded<F>,
+      A : ForwardModel<RNDM<F, N>, F>,
+      for<'a> &'a A::Observable : Instance<A::Observable>,
+      A::PreadjointCodomain : DifferentiableRealMapping<F, N> + Bounded<F>,
       PlotLookup : Plotting<N>,
       Cube<F, N> : SetOrd {
 
@@ -623,79 +1261,36 @@
     let base = Convolution(sensor.clone(), spread.clone());
 
     let resolution = if N==1 { 100 } else { 40 };
-    let pfx = |n| format!("{}{}", prefix, n);
+    let pfx = |n| format!("{prefix}{n}");
     let plotgrid = lingrid(&[[-kernel_plot_width, kernel_plot_width]; N].into(), &[resolution; N]);
 
-    PlotLookup::plot_into_file(sensor, plotgrid, pfx("sensor"), "sensor".to_string());
-    PlotLookup::plot_into_file(kernel, plotgrid, pfx("kernel"), "kernel".to_string());
-    PlotLookup::plot_into_file(spread, plotgrid, pfx("spread"), "spread".to_string());
-    PlotLookup::plot_into_file(&base, plotgrid, pfx("base_sensor"), "base_sensor".to_string());
+    PlotLookup::plot_into_file(sensor, plotgrid, pfx("sensor"));
+    PlotLookup::plot_into_file(kernel, plotgrid, pfx("kernel"));
+    PlotLookup::plot_into_file(spread, plotgrid, pfx("spread"));
+    PlotLookup::plot_into_file(&base, plotgrid, pfx("base_sensor"));
 
     let plotgrid2 = lingrid(&domain, &[resolution; N]);
 
     let ω_hat = op𝒟.apply(μ_hat);
     let noise =  opA.preadjoint().apply(opA.apply(μ_hat) - b);
-    PlotLookup::plot_into_file(&ω_hat, plotgrid2, pfx("omega_hat"), "ω̂".to_string());
-    PlotLookup::plot_into_file(&noise, plotgrid2, pfx("omega_noise"),
-                               "noise Aᵀ(Aμ̂ - b)".to_string());
+    PlotLookup::plot_into_file(&ω_hat, plotgrid2, pfx("omega_hat"));
+    PlotLookup::plot_into_file(&noise, plotgrid2, pfx("omega_noise"));
 
     let preadj_b =  opA.preadjoint().apply(b);
     let preadj_b_hat =  opA.preadjoint().apply(b_hat);
     //let bounds = preadj_b.bounds().common(&preadj_b_hat.bounds());
     PlotLookup::plot_into_file_spikes(
-        "Aᵀb".to_string(), &preadj_b,
-        "Aᵀb̂".to_string(), Some(&preadj_b_hat),
-        plotgrid2, None, &μ_hat,
+        Some(&preadj_b),
+        Some(&preadj_b_hat),
+        plotgrid2,
+        &μ_hat,
         pfx("omega_b")
     );
+    PlotLookup::plot_into_file(&preadj_b, plotgrid2, pfx("preadj_b"));
+    PlotLookup::plot_into_file(&preadj_b_hat, plotgrid2, pfx("preadj_b_hat"));
 
     // Save true solution and observables
-    let pfx = |n| format!("{}{}", prefix, n);
     μ_hat.write_csv(pfx("orig.txt"))?;
     opA.write_observable(&b_hat, pfx("b_hat"))?;
     opA.write_observable(&b, pfx("b_noisy"))
 }
-
-//
-// Deprecated interface
-//
-
-/// Struct for experiment configurations
-#[derive(Debug, Clone, Serialize)]
-pub struct Experiment<F, NoiseDistr, S, K, P, const N : usize>
-where F : Float,
-      [usize; N] : Serialize,
-      NoiseDistr : Distribution<F>,
-      S : Sensor<F, N>,
-      P : Spread<F, N>,
-      K : SimpleConvolutionKernel<F, N>,
-{
-    /// Domain $Ω$.
-    pub domain : Cube<F, N>,
-    /// Number of sensors along each dimension
-    pub sensor_count : [usize; N],
-    /// Noise distribution
-    pub noise_distr : NoiseDistr,
-    /// Seed for random noise generation (for repeatable experiments)
-    pub noise_seed : u64,
-    /// Sensor $θ$; $θ * ψ$ forms the forward operator $𝒜$.
-    pub sensor : S,
-    /// Spread $ψ$; $θ * ψ$ forms the forward operator $𝒜$.
-    pub spread : P,
-    /// Kernel $ρ$ of $𝒟$.
-    pub kernel : K,
-    /// True point sources
-    pub μ_hat : DiscreteMeasure<Loc<F, N>, F>,
-    /// Regularisation parameter
-    #[deprecated(note = "Use [`ExperimentV2`], which replaces `α` by more generic `regularisation`")]
-    pub α : F,
-    /// For plotting : how wide should the kernels be plotted
-    pub kernel_plot_width : F,
-    /// Data term
-    pub dataterm : DataTerm,
-    /// A map of default configurations for algorithms
-    #[serde(skip)]
-    pub algorithm_defaults : HashMap<DefaultAlgorithm, AlgorithmConfig<F>>,
-}
-
-impl_experiment!(Experiment, α, Regularisation::NonnegRadon);
--- a/src/seminorms.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/seminorms.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -12,9 +12,11 @@
 use alg_tools::bisection_tree::*;
 use alg_tools::mapping::RealMapping;
 use alg_tools::iter::{Mappable, FilterMapX};
-use alg_tools::linops::{Apply, Linear, BoundedLinear};
+use alg_tools::linops::{Mapping, Linear, BoundedLinear};
+use alg_tools::instance::Instance;
 use alg_tools::nalgebra_support::ToNalgebraRealField;
-use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter};
+use alg_tools::norms::Linfinity;
+use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter, Radon, RNDM};
 use nalgebra::DMatrix;
 use std::marker::PhantomData;
 use itertools::Itertools;
@@ -22,9 +24,12 @@
 /// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$.
 ///
 /// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`].
-pub trait DiscreteMeasureOp<Domain, F> : BoundedLinear<DiscreteMeasure<Domain, F>, FloatType=F>
-where F : Float + ToNalgebraRealField,
-      Domain : 'static {
+pub trait DiscreteMeasureOp<Domain, F>
+    : BoundedLinear<DiscreteMeasure<Domain, F>, Radon, Linfinity, F>
+where
+    F : Float + ToNalgebraRealField,
+    Domain : 'static + Clone + PartialEq,
+{
     /// The output type of [`Self::preapply`].
     type PreCodomain;
 
@@ -38,7 +43,7 @@
     fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType>
     where I : ExactSizeIterator<Item=&'a Domain> + Clone;
 
-    /// [`Apply::apply`] that typically returns an uninitialised [`PreBTFN`]
+    /// [`Mapping`] that typically returns an uninitialised [`PreBTFN`]
     /// instead of a full [`BTFN`].
     fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain;
 }
@@ -73,7 +78,7 @@
 pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize>
 where K : SimpleConvolutionKernel<F, N> {
     kernel : K,
-    centres : DiscreteMeasure<Loc<F, N>, F>,
+    centres : RNDM<F, N>,
 }
 
 impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N>
@@ -130,9 +135,9 @@
 where F : Float + ToNalgebraRealField,
       BT : BTImpl<F, N, Data=usize>,
       K : SimpleConvolutionKernel<F, N> {
-    /// Depth of the [`BT`] bisection tree for the outputs [`Apply::apply`].
+    /// Depth of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
     depth : BT::Depth,
-    /// Domain of the [`BT`] bisection tree for the outputs [`Apply::apply`].
+    /// Domain of the [`BT`] bisection tree for the outputs [`Mapping::apply`].
     domain : Cube<F, N>,
     /// The convolution kernel
     kernel : K,
@@ -146,7 +151,7 @@
 
     /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`.
     ///
-    /// The output of [`Apply::apply`] is a [`BT`] of given `depth`.
+    /// The output of [`Mapping::apply`] is a [`BT`] of given `depth`.
     pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self {
         ConvolutionOp {
             depth : depth,
@@ -157,7 +162,7 @@
     }
 
     /// Returns the support generator for this convolution operator.
-    fn support_generator(&self, μ : DiscreteMeasure<Loc<F, N>, F>)
+    fn support_generator(&self, μ : RNDM<F, N>)
     -> ConvolutionSupportGenerator<F, K, N> {
 
         // TODO: can we avoid cloning μ?
@@ -173,94 +178,43 @@
     }
 }
 
-impl<F, K, BT, const N : usize> Apply<DiscreteMeasure<Loc<F, N>, F>>
+impl<F, K, BT, const N : usize> Mapping<RNDM<F, N>>
 for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+where
+    F : Float + ToNalgebraRealField,
+    BT : BTImpl<F, N, Data=usize>,
+    K : SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N>
+{
 
-    type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
+    type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
 
-    fn apply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::Output {
-        let g = self.support_generator(μ);
+    fn apply<I>(&self, μ : I) -> Self::Codomain
+    where I : Instance<RNDM<F, N>> {
+        let g = self.support_generator(μ.own());
         BTFN::construct(self.domain.clone(), self.depth, g)
     }
 }
 
-impl<'a, F, K, BT, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
+/// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s.
+impl<F, K, BT, const N : usize> Linear<RNDM<F, N>>
+for ConvolutionOp<F, K, BT, N>
+where
+    F : Float + ToNalgebraRealField,
+    BT : BTImpl<F, N, Data=usize>,
+    K : SimpleConvolutionKernel<F, N>,
+    Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N>
+{ }
+
+impl<F, K, BT, const N : usize>
+BoundedLinear<RNDM<F, N>, Radon, Linfinity, F>
 for ConvolutionOp<F, K, BT, N>
 where F : Float + ToNalgebraRealField,
       BT : BTImpl<F, N, Data=usize>,
       K : SimpleConvolutionKernel<F, N>,
       Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
 
-    type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
-
-    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> Self::Output {
-        self.apply(μ.clone())
-    }
-}
-
-/// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s.
-impl<F, K, BT, const N : usize> Linear<DiscreteMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-    type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
-}
-
-impl<F, K, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
-
-    type Output = Weighted<Shift<K, F, N>, F>;
-
-    #[inline]
-    fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> Self::Output {
-        self.kernel.clone().shift(δ.x).weigh(δ.α)
-    }
-}
-
-impl<'a, F, K, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
-
-    type Output = Weighted<Shift<K, F, N>, F>;
-
-    #[inline]
-    fn apply(&self, δ : &'a DeltaMeasure<Loc<F, N>, F>) -> Self::Output {
-        self.kernel.clone().shift(δ.x).weigh(δ.α)
-    }
-}
-
-/// [`ConvolutionOp`]s as linear operators over [`DeltaMeasure`]s.
-///
-/// The codomain is different from the implementation for [`DiscreteMeasure`].
-impl<F, K, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N> {
-    type Codomain = Weighted<Shift<K, F, N>, F>;
-}
-
-impl<F, K, BT, const N : usize> BoundedLinear<DiscreteMeasure<Loc<F, N>, F>>
-for ConvolutionOp<F, K, BT, N>
-where F : Float + ToNalgebraRealField,
-      BT : BTImpl<F, N, Data=usize>,
-      K : SimpleConvolutionKernel<F, N>,
-      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
-
-    type FloatType = F;
-
-    fn opnorm_bound(&self) -> F {
+    fn opnorm_bound(&self, _ : Radon, _ : Linfinity) -> F {
         // With μ = ∑_i α_i δ_{x_i}, we have
         // |𝒟μ|_∞
         // = sup_z |∑_i α_i φ(z - x_i)|
@@ -292,10 +246,10 @@
         DMatrix::from_iterator(n, n, values)
     }
 
-    /// A version of [`Apply::apply`] that does not instantiate the [`BTFN`] codomain with
+    /// A version of [`Mapping::apply`] that does not instantiate the [`BTFN`] codomain with
     /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when
     /// the output is to be added as the right-hand-side operand to a proper BTFN.
-    fn preapply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::PreCodomain {
+    fn preapply(&self, μ : RNDM<F, N>) -> Self::PreCodomain {
         BTFN::new_pre(self.support_generator(μ))
     }
 }
@@ -368,11 +322,3 @@
 
 make_convolutionsupportgenerator_unaryop!(Neg, neg);
 
-/// Trait for indicating that `Self` is Lipschitz with respect to the seminorm `D`.
-pub trait Lipschitz<D> {
-    /// The type of floats
-    type FloatType : Float;
-
-    /// Returns the Lipschitz factor of `self` with respect to the seminorm `D`.
-    fn lipschitz_factor(&self, seminorm : &D) -> Option<Self::FloatType>;
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/sliding_fb.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,444 @@
+/*!
+Solver for the point source localisation problem using a sliding
+forward-backward splitting method.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Deserialize, Serialize};
+//use colored::Colorize;
+//use nalgebra::{DVector, DMatrix};
+use itertools::izip;
+use std::iter::Iterator;
+
+use alg_tools::euclidean::Euclidean;
+use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::mapping::{DifferentiableRealMapping, Instance, Mapping};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::Norm;
+
+use crate::forward_model::{AdjointProductBoundedBy, BoundedCurvature, ForwardModel};
+use crate::measures::merging::SpikeMerging;
+use crate::measures::{DiscreteMeasure, Radon, RNDM};
+use crate::types::*;
+//use crate::tolerance::Tolerance;
+use crate::dataterm::{calculate_residual, calculate_residual2, DataTerm, L2Squared};
+use crate::fb::*;
+use crate::plot::{PlotLookup, Plotting, SeqPlotter};
+use crate::regularisation::SlidingRegTerm;
+//use crate::transport::TransportLipschitz;
+
+/// Transport settings for [`pointsource_sliding_fb_reg`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct TransportConfig<F: Float> {
+    /// Transport step length $θ$ normalised to $(0, 1)$.
+    pub θ0: F,
+    /// Factor in $(0, 1)$ for decreasing transport to adapt to tolerance.
+    pub adaptation: F,
+    /// A posteriori transport tolerance multiplier (C_pos)
+    pub tolerance_mult_con: F,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float> TransportConfig<F> {
+    /// Check that the parameters are ok. Panics if not.
+    pub fn check(&self) {
+        assert!(self.θ0 > 0.0);
+        assert!(0.0 < self.adaptation && self.adaptation < 1.0);
+        assert!(self.tolerance_mult_con > 0.0);
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float> Default for TransportConfig<F> {
+    fn default() -> Self {
+        TransportConfig {
+            θ0: 0.9,
+            adaptation: 0.9,
+            tolerance_mult_con: 100.0,
+        }
+    }
+}
+
+/// Settings for [`pointsource_sliding_fb_reg`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct SlidingFBConfig<F: Float> {
+    /// Step length scaling
+    pub τ0: F,
+    /// Transport parameters
+    pub transport: TransportConfig<F>,
+    /// Generic parameters
+    pub insertion: FBGenericConfig<F>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float> Default for SlidingFBConfig<F> {
+    fn default() -> Self {
+        SlidingFBConfig {
+            τ0: 0.99,
+            transport: Default::default(),
+            insertion: Default::default(),
+        }
+    }
+}
+
+/// Internal type of adaptive transport step length calculation
+pub(crate) enum TransportStepLength<F: Float, G: Fn(F, F) -> F> {
+    /// Fixed, known step length
+    #[allow(dead_code)]
+    Fixed(F),
+    /// Adaptive step length, only wrt. maximum transport.
+    /// Content of `l` depends on use case, while `g` calculates the step length from `l`.
+    AdaptiveMax { l: F, max_transport: F, g: G },
+    /// Adaptive step length.
+    /// Content of `l` depends on use case, while `g` calculates the step length from `l`.
+    FullyAdaptive { l: F, max_transport: F, g: G },
+}
+
+/// Constrution of initial transport `γ1` from initial measure `μ` and `v=F'(μ)`
+/// with step lengh τ and transport step length `θ_or_adaptive`.
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn initial_transport<F, G, D, const N: usize>(
+    γ1: &mut RNDM<F, N>,
+    μ: &mut RNDM<F, N>,
+    τ: F,
+    θ_or_adaptive: &mut TransportStepLength<F, G>,
+    v: D,
+) -> (Vec<F>, RNDM<F, N>)
+where
+    F: Float + ToNalgebraRealField,
+    G: Fn(F, F) -> F,
+    D: DifferentiableRealMapping<F, N>,
+{
+    use TransportStepLength::*;
+
+    // Save current base point and shift μ to new positions. Idea is that
+    //  μ_base(_masses) = μ^k (vector of masses)
+    //  μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
+    //  γ1 = π_♯^1γ^{k+1}
+    //  μ = μ^{k+1}
+    let μ_base_masses: Vec<F> = μ.iter_masses().collect();
+    let mut μ_base_minus_γ0 = μ.clone(); // Weights will be set in the loop below.
+                                         // Construct μ^{k+1} and π_♯^1γ^{k+1} initial candidates
+                                         //let mut sum_norm_dv = 0.0;
+    let γ_prev_len = γ1.len();
+    assert!(μ.len() >= γ_prev_len);
+    γ1.extend(μ[γ_prev_len..].iter().cloned());
+
+    // Calculate initial transport and step length.
+    // First calculate initial transported weights
+    for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+        // If old transport has opposing sign, the new transport will be none.
+        ρ.α = if (ρ.α > 0.0 && δ.α < 0.0) || (ρ.α < 0.0 && δ.α > 0.0) {
+            0.0
+        } else {
+            δ.α
+        };
+    }
+
+    // Calculate transport rays.
+    match *θ_or_adaptive {
+        Fixed(θ) => {
+            let θτ = τ * θ;
+            for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+                ρ.x = δ.x - v.differential(&δ.x) * (ρ.α.signum() * θτ);
+            }
+        }
+        AdaptiveMax {
+            l: ℓ_F,
+            ref mut max_transport,
+            g: ref calculate_θ,
+        } => {
+            *max_transport = max_transport.max(γ1.norm(Radon));
+            let θτ = τ * calculate_θ(ℓ_F, *max_transport);
+            for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+                ρ.x = δ.x - v.differential(&δ.x) * (ρ.α.signum() * θτ);
+            }
+        }
+        FullyAdaptive {
+            l: ref mut adaptive_ℓ_F,
+            ref mut max_transport,
+            g: ref calculate_θ,
+        } => {
+            *max_transport = max_transport.max(γ1.norm(Radon));
+            let mut θ = calculate_θ(*adaptive_ℓ_F, *max_transport);
+            // Do two runs through the spikes to update θ, breaking if first run did not cause
+            // a change.
+            for _i in 0..=1 {
+                let mut changes = false;
+                for (δ, ρ) in izip!(μ.iter_spikes(), γ1.iter_spikes_mut()) {
+                    let dv_x = v.differential(&δ.x);
+                    let g = &dv_x * (ρ.α.signum() * θ * τ);
+                    ρ.x = δ.x - g;
+                    let n = g.norm2();
+                    if n >= F::EPSILON {
+                        // Estimate Lipschitz factor of ∇v
+                        let this_ℓ_F = (dv_x - v.differential(&ρ.x)).norm2() / n;
+                        *adaptive_ℓ_F = adaptive_ℓ_F.max(this_ℓ_F);
+                        θ = calculate_θ(*adaptive_ℓ_F, *max_transport);
+                        changes = true
+                    }
+                }
+                if !changes {
+                    break;
+                }
+            }
+        }
+    }
+
+    // Set initial guess for μ=μ^{k+1}.
+    for (δ, ρ, &β) in izip!(μ.iter_spikes_mut(), γ1.iter_spikes(), μ_base_masses.iter()) {
+        if ρ.α.abs() > F::EPSILON {
+            δ.x = ρ.x;
+            //δ.α = ρ.α; // already set above
+        } else {
+            δ.α = β;
+        }
+    }
+    // Calculate μ^k-π_♯^0γ^{k+1} and v̆ = A_*(A[μ_transported + μ_transported_base]-b)
+    μ_base_minus_γ0.set_masses(
+        μ_base_masses
+            .iter()
+            .zip(γ1.iter_masses())
+            .map(|(&a, b)| a - b),
+    );
+    (μ_base_masses, μ_base_minus_γ0)
+}
+
+/// A posteriori transport adaptation.
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn aposteriori_transport<F, const N: usize>(
+    γ1: &mut RNDM<F, N>,
+    μ: &mut RNDM<F, N>,
+    μ_base_minus_γ0: &mut RNDM<F, N>,
+    μ_base_masses: &Vec<F>,
+    extra: Option<F>,
+    ε: F,
+    tconfig: &TransportConfig<F>,
+) -> bool
+where
+    F: Float + ToNalgebraRealField,
+{
+    // 1. If π_♯^1γ^{k+1} = γ1 has non-zero mass at some point y, but μ = μ^{k+1} does not,
+    // then the ansatz ∇w̃_x(y) = w^{k+1}(y) may not be satisfied. So set the mass of γ1
+    // at that point to zero, and retry.
+    let mut all_ok = true;
+    for (α_μ, α_γ1) in izip!(μ.iter_masses(), γ1.iter_masses_mut()) {
+        if α_μ == 0.0 && *α_γ1 != 0.0 {
+            all_ok = false;
+            *α_γ1 = 0.0;
+        }
+    }
+
+    // 2. Through bounding ∫ B_ω(y, z) dλ(x, y, z).
+    //    through the estimate ≤ C ‖Δ‖‖γ^{k+1}‖ for Δ := μ^{k+1}-μ^k-(π_♯^1-π_♯^0)γ^{k+1},
+    //    which holds for some some C if the convolution kernel in 𝒟 has Lipschitz gradient.
+    let nγ = γ1.norm(Radon);
+    let nΔ = μ_base_minus_γ0.norm(Radon) + μ.dist_matching(&γ1) + extra.unwrap_or(0.0);
+    let t = ε * tconfig.tolerance_mult_con;
+    if nγ * nΔ > t {
+        // Since t/(nγ*nΔ)<1, and the constant tconfig.adaptation < 1,
+        // this will guarantee that eventually ‖γ‖ decreases sufficiently that we
+        // will not enter here.
+        *γ1 *= tconfig.adaptation * t / (nγ * nΔ);
+        all_ok = false
+    }
+
+    if !all_ok {
+        // Update weights for μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
+        μ_base_minus_γ0.set_masses(
+            μ_base_masses
+                .iter()
+                .zip(γ1.iter_masses())
+                .map(|(&a, b)| a - b),
+        );
+    }
+
+    all_ok
+}
+
+/// Iteratively solve the pointsource localisation problem using sliding forward-backward
+/// splitting
+///
+/// The parametrisation is as for [`pointsource_fb_reg`].
+/// Inertia is currently not supported.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_sliding_fb_reg<F, I, A, Reg, P, const N: usize>(
+    opA: &A,
+    b: &A::Observable,
+    reg: Reg,
+    prox_penalty: &P,
+    config: &SlidingFBConfig<F>,
+    iterator: I,
+    mut plotter: SeqPlotter<F, N>,
+) -> RNDM<F, N>
+where
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<IterInfo<F, N>>,
+    A: ForwardModel<RNDM<F, N>, F>
+        + AdjointProductBoundedBy<RNDM<F, N>, P, FloatType = F>
+        + BoundedCurvature<FloatType = F>,
+    for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable> + Instance<A::Observable>,
+    A::PreadjointCodomain: DifferentiableRealMapping<F, N>,
+    RNDM<F, N>: SpikeMerging<F>,
+    Reg: SlidingRegTerm<F, N>,
+    P: ProxPenalty<F, A::PreadjointCodomain, Reg, N>,
+    PlotLookup: Plotting<N>,
+{
+    // Check parameters
+    assert!(config.τ0 > 0.0, "Invalid step length parameter");
+    config.transport.check();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut γ1 = DiscreteMeasure::new();
+    let mut residual = -b; // Has to equal $Aμ-b$.
+
+    // Set up parameters
+    // let opAnorm = opA.opnorm_bound(Radon, L2);
+    //let max_transport = config.max_transport.scale
+    //                    * reg.radon_norm_bound(b.norm2_squared() / 2.0);
+    //let ℓ = opA.transport.lipschitz_factor(L2Squared) * max_transport;
+    let ℓ = 0.0;
+    let τ = config.τ0 / opA.adjoint_product_bound(prox_penalty).unwrap();
+    let (maybe_ℓ_F0, maybe_transport_lip) = opA.curvature_bound_components();
+    let transport_lip = maybe_transport_lip.unwrap();
+    let calculate_θ = |ℓ_F, max_transport| {
+        let ℓ_r = transport_lip * max_transport;
+        config.transport.θ0 / (τ * (ℓ + ℓ_F + ℓ_r))
+    };
+    let mut θ_or_adaptive = match maybe_ℓ_F0 {
+        //Some(ℓ_F0) => TransportStepLength::Fixed(calculate_θ(ℓ_F0 * b.norm2(), 0.0)),
+        Some(ℓ_F0) => TransportStepLength::AdaptiveMax {
+            l: ℓ_F0 * b.norm2(), // TODO: could estimate computing the real reesidual
+            max_transport: 0.0,
+            g: calculate_θ,
+        },
+        None => TransportStepLength::FullyAdaptive {
+            l: 10.0 * F::EPSILON, // Start with something very small to estimate differentials
+            max_transport: 0.0,
+            g: calculate_θ,
+        },
+    };
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.insertion.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    // Statistics
+    let full_stats = |residual: &A::Observable, μ: &RNDM<F, N>, ε, stats| IterInfo {
+        value: residual.norm2_squared_div2() + reg.apply(μ),
+        n_spikes: μ.len(),
+        ε,
+        // postprocessing: config.insertion.postprocessing.then(|| μ.clone()),
+        ..stats
+    };
+    let mut stats = IterInfo::new();
+
+    // Run the algorithm
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, ε, stats.clone())) {
+        // Calculate initial transport
+        let v = opA.preadjoint().apply(residual);
+        let (μ_base_masses, mut μ_base_minus_γ0) =
+            initial_transport(&mut γ1, &mut μ, τ, &mut θ_or_adaptive, v);
+
+        // Solve finite-dimensional subproblem several times until the dual variable for the
+        // regularisation term conforms to the assumptions made for the transport above.
+        let (maybe_d, _within_tolerances, mut τv̆) = 'adapt_transport: loop {
+            // Calculate τv̆ = τA_*(A[μ_transported + μ_transported_base]-b)
+            let residual_μ̆ = calculate_residual2(&γ1, &μ_base_minus_γ0, opA, b);
+            let mut τv̆ = opA.preadjoint().apply(residual_μ̆ * τ);
+
+            // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes.
+            let (maybe_d, within_tolerances) = prox_penalty.insert_and_reweigh(
+                &mut μ,
+                &mut τv̆,
+                &γ1,
+                Some(&μ_base_minus_γ0),
+                τ,
+                ε,
+                &config.insertion,
+                &reg,
+                &state,
+                &mut stats,
+            );
+
+            // A posteriori transport adaptation.
+            if aposteriori_transport(
+                &mut γ1,
+                &mut μ,
+                &mut μ_base_minus_γ0,
+                &μ_base_masses,
+                None,
+                ε,
+                &config.transport,
+            ) {
+                break 'adapt_transport (maybe_d, within_tolerances, τv̆);
+            }
+        };
+
+        stats.untransported_fraction = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.untransported_fraction.unwrap_or((0.0, 0.0));
+            let source = μ_base_masses.iter().map(|v| v.abs()).sum();
+            (a + μ_base_minus_γ0.norm(Radon), b + source)
+        });
+        stats.transport_error = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.transport_error.unwrap_or((0.0, 0.0));
+            (a + μ.dist_matching(&γ1), b + γ1.norm(Radon))
+        });
+
+        // Merge spikes.
+        // This crucially expects the merge routine to be stable with respect to spike locations,
+        // and not to performing any pruning. That is be to done below simultaneously for γ.
+        let ins = &config.insertion;
+        if ins.merge_now(&state) {
+            stats.merged += prox_penalty.merge_spikes(
+                &mut μ,
+                &mut τv̆,
+                &γ1,
+                Some(&μ_base_minus_γ0),
+                τ,
+                ε,
+                ins,
+                &reg,
+                Some(|μ̃: &RNDM<F, N>| L2Squared.calculate_fit_op(μ̃, opA, b)),
+            );
+        }
+
+        // Prune spikes with zero weight. To maintain correct ordering between μ and γ1, also the
+        // latter needs to be pruned when μ is.
+        // TODO: This could do with a two-vector Vec::retain to avoid copies.
+        let μ_new = DiscreteMeasure::from_iter(μ.iter_spikes().filter(|δ| δ.α != F::ZERO).cloned());
+        if μ_new.len() != μ.len() {
+            let mut μ_iter = μ.iter_spikes();
+            γ1.prune_by(|_| μ_iter.next().unwrap().α != F::ZERO);
+            stats.pruned += μ.len() - μ_new.len();
+            μ = μ_new;
+        }
+
+        // Update residual
+        residual = calculate_residual(&μ, opA, b);
+
+        let iter = state.iteration();
+        stats.this_iters += 1;
+
+        // Give statistics if requested
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv̆), &μ);
+            full_stats(
+                &residual,
+                &μ,
+                ε,
+                std::mem::replace(&mut stats, IterInfo::new()),
+            )
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
+
+    postprocess(μ, &config.insertion, L2Squared, opA, b)
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/sliding_pdps.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,373 @@
+/*!
+Solver for the point source localisation problem using a sliding
+primal-dual proximal splitting method.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Deserialize, Serialize};
+//use colored::Colorize;
+//use nalgebra::{DVector, DMatrix};
+use std::iter::Iterator;
+
+use alg_tools::convex::{Conjugable, Prox};
+use alg_tools::direct_product::Pair;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::iterate::AlgIteratorFactory;
+use alg_tools::linops::{Adjointable, BoundedLinear, IdOp, AXPY, GEMV};
+use alg_tools::mapping::{DifferentiableRealMapping, Instance, Mapping};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::{Dist, Norm};
+use alg_tools::norms::{PairNorm, L2};
+
+use crate::forward_model::{AdjointProductPairBoundedBy, BoundedCurvature, ForwardModel};
+use crate::measures::merging::SpikeMerging;
+use crate::measures::{DiscreteMeasure, Radon, RNDM};
+use crate::types::*;
+// use crate::transport::TransportLipschitz;
+//use crate::tolerance::Tolerance;
+use crate::fb::*;
+use crate::plot::{PlotLookup, Plotting, SeqPlotter};
+use crate::regularisation::SlidingRegTerm;
+// use crate::dataterm::L2Squared;
+use crate::dataterm::{calculate_residual, calculate_residual2};
+use crate::sliding_fb::{
+    aposteriori_transport, initial_transport, TransportConfig, TransportStepLength,
+};
+
+/// Settings for [`pointsource_sliding_pdps_pair`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct SlidingPDPSConfig<F: Float> {
+    /// Primal step length scaling.
+    pub τ0: F,
+    /// Primal step length scaling.
+    pub σp0: F,
+    /// Dual step length scaling.
+    pub σd0: F,
+    /// Transport parameters
+    pub transport: TransportConfig<F>,
+    /// Generic parameters
+    pub insertion: FBGenericConfig<F>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F: Float> Default for SlidingPDPSConfig<F> {
+    fn default() -> Self {
+        SlidingPDPSConfig {
+            τ0: 0.99,
+            σd0: 0.05,
+            σp0: 0.99,
+            transport: TransportConfig {
+                θ0: 0.9,
+                ..Default::default()
+            },
+            insertion: Default::default(),
+        }
+    }
+}
+
+type MeasureZ<F, Z, const N: usize> = Pair<RNDM<F, N>, Z>;
+
+/// Iteratively solve the pointsource localisation with an additional variable
+/// using sliding primal-dual proximal splitting
+///
+/// The parametrisation is as for [`crate::forward_pdps::pointsource_forward_pdps_pair`].
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_sliding_pdps_pair<
+    F,
+    I,
+    A,
+    S,
+    Reg,
+    P,
+    Z,
+    R,
+    Y,
+    /*KOpM, */ KOpZ,
+    H,
+    const N: usize,
+>(
+    opA: &A,
+    b: &A::Observable,
+    reg: Reg,
+    prox_penalty: &P,
+    config: &SlidingPDPSConfig<F>,
+    iterator: I,
+    mut plotter: SeqPlotter<F, N>,
+    //opKμ : KOpM,
+    opKz: &KOpZ,
+    fnR: &R,
+    fnH: &H,
+    mut z: Z,
+    mut y: Y,
+) -> MeasureZ<F, Z, N>
+where
+    F: Float + ToNalgebraRealField,
+    I: AlgIteratorFactory<IterInfo<F, N>>,
+    A: ForwardModel<MeasureZ<F, Z, N>, F, PairNorm<Radon, L2, L2>, PreadjointCodomain = Pair<S, Z>>
+        + AdjointProductPairBoundedBy<MeasureZ<F, Z, N>, P, IdOp<Z>, FloatType = F>
+        + BoundedCurvature<FloatType = F>,
+    S: DifferentiableRealMapping<F, N>,
+    for<'b> &'b A::Observable: std::ops::Neg<Output = A::Observable> + Instance<A::Observable>,
+    PlotLookup: Plotting<N>,
+    RNDM<F, N>: SpikeMerging<F>,
+    Reg: SlidingRegTerm<F, N>,
+    P: ProxPenalty<F, S, Reg, N>,
+    // KOpM : Linear<RNDM<F, N>, Codomain=Y>
+    //     + GEMV<F, RNDM<F, N>>
+    //     + Preadjointable<
+    //         RNDM<F, N>, Y,
+    //         PreadjointCodomain = S,
+    //     >
+    //     + TransportLipschitz<L2Squared, FloatType=F>
+    //     + AdjointProductBoundedBy<RNDM<F, N>, 𝒟, FloatType=F>,
+    // for<'b> KOpM::Preadjoint<'b> : GEMV<F, Y>,
+    // Since Z is Hilbert, we may just as well use adjoints for K_z.
+    KOpZ: BoundedLinear<Z, L2, L2, F, Codomain = Y>
+        + GEMV<F, Z>
+        + Adjointable<Z, Y, AdjointCodomain = Z>,
+    for<'b> KOpZ::Adjoint<'b>: GEMV<F, Y>,
+    Y: AXPY<F> + Euclidean<F, Output = Y> + Clone + ClosedAdd,
+    for<'b> &'b Y: Instance<Y>,
+    Z: AXPY<F, Owned = Z> + Euclidean<F, Output = Z> + Clone + Norm<F, L2> + Dist<F, L2>,
+    for<'b> &'b Z: Instance<Z>,
+    R: Prox<Z, Codomain = F>,
+    H: Conjugable<Y, F, Codomain = F>,
+    for<'b> H::Conjugate<'b>: Prox<Y>,
+{
+    // Check parameters
+    assert!(
+        config.τ0 > 0.0
+            && config.τ0 < 1.0
+            && config.σp0 > 0.0
+            && config.σp0 < 1.0
+            && config.σd0 > 0.0
+            && config.σp0 * config.σd0 <= 1.0,
+        "Invalid step length parameters"
+    );
+    config.transport.check();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut γ1 = DiscreteMeasure::new();
+    let mut residual = calculate_residual(Pair(&μ, &z), opA, b);
+    let zero_z = z.similar_origin();
+
+    // Set up parameters
+    // TODO: maybe this PairNorm doesn't make sense here?
+    // let opAnorm = opA.opnorm_bound(PairNorm(Radon, L2, L2), L2);
+    let bigθ = 0.0; //opKμ.transport_lipschitz_factor(L2Squared);
+    let bigM = 0.0; //opKμ.adjoint_product_bound(&op𝒟).unwrap().sqrt();
+    let nKz = opKz.opnorm_bound(L2, L2);
+    let ℓ = 0.0;
+    let opIdZ = IdOp::new();
+    let (l, l_z) = opA
+        .adjoint_product_pair_bound(prox_penalty, &opIdZ)
+        .unwrap();
+    // We need to satisfy
+    //
+    //     τσ_dM(1-σ_p L_z)/(1 - τ L) + [σ_p L_z + σ_pσ_d‖K_z‖^2] < 1
+    //                                  ^^^^^^^^^^^^^^^^^^^^^^^^^
+    // with 1 > σ_p L_z and 1 > τ L.
+    //
+    // To do so, we first solve σ_p and σ_d from standard PDPS step length condition
+    // ^^^^^ < 1. then we solve τ from  the rest.
+    let σ_d = config.σd0 / nKz;
+    let σ_p = config.σp0 / (l_z + config.σd0 * nKz);
+    // Observe that = 1 - ^^^^^^^^^^^^^^^^^^^^^ = 1 - σ_{p,0}
+    // We get the condition τσ_d M (1-σ_p L_z) < (1-σ_{p,0})*(1-τ L)
+    // ⟺ τ [ σ_d M (1-σ_p L_z) + (1-σ_{p,0}) L ] < (1-σ_{p,0})
+    let φ = 1.0 - config.σp0;
+    let a = 1.0 - σ_p * l_z;
+    let τ = config.τ0 * φ / (σ_d * bigM * a + φ * l);
+    let ψ = 1.0 - τ * l;
+    let β = σ_p * config.σd0 * nKz / a; // σ_p * σ_d * (nKz * nK_z) / a;
+    assert!(β < 1.0);
+    // Now we need κ‖K_μ(π_♯^1 - π_♯^0)γ‖^2 ≤ (1/θ - τ[ℓ_F + ℓ]) ∫ c_2 dγ for κ defined as:
+    let κ = τ * σ_d * ψ / ((1.0 - β) * ψ - τ * σ_d * bigM);
+    //  The factor two in the manuscript disappears due to the definition of 𝚹 being
+    // for ‖x-y‖₂² instead of c_2(x, y)=‖x-y‖₂²/2.
+    let (maybe_ℓ_F0, maybe_transport_lip) = opA.curvature_bound_components();
+    let transport_lip = maybe_transport_lip.unwrap();
+    let calculate_θ = |ℓ_F, max_transport| {
+        let ℓ_r = transport_lip * max_transport;
+        config.transport.θ0 / (τ * (ℓ + ℓ_F + ℓ_r) + κ * bigθ * max_transport)
+    };
+    let mut θ_or_adaptive = match maybe_ℓ_F0 {
+        // We assume that the residual is decreasing.
+        Some(ℓ_F0) => TransportStepLength::AdaptiveMax {
+            l: ℓ_F0 * b.norm2(), // TODO: could estimate computing the real reesidual
+            max_transport: 0.0,
+            g: calculate_θ,
+        },
+        None => TransportStepLength::FullyAdaptive {
+            l: F::EPSILON,
+            max_transport: 0.0,
+            g: calculate_θ,
+        },
+    };
+    // Acceleration is not currently supported
+    // let γ = dataterm.factor_of_strong_convexity();
+    let ω = 1.0;
+
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.insertion.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    let starH = fnH.conjugate();
+
+    // Statistics
+    let full_stats = |residual: &A::Observable, μ: &RNDM<F, N>, z: &Z, ε, stats| IterInfo {
+        value: residual.norm2_squared_div2()
+            + fnR.apply(z)
+            + reg.apply(μ)
+            + fnH.apply(/* opKμ.apply(μ) + */ opKz.apply(z)),
+        n_spikes: μ.len(),
+        ε,
+        // postprocessing: config.insertion.postprocessing.then(|| μ.clone()),
+        ..stats
+    };
+    let mut stats = IterInfo::new();
+
+    // Run the algorithm
+    for state in iterator.iter_init(|| full_stats(&residual, &μ, &z, ε, stats.clone())) {
+        // Calculate initial transport
+        let Pair(v, _) = opA.preadjoint().apply(&residual);
+        //opKμ.preadjoint().apply_add(&mut v, y);
+        // We want to proceed as in Example 4.12 but with v and v̆ as in §5.
+        // With A(ν, z) = A_μ ν + A_z z, following Example 5.1, we have
+        // P_ℳ[F'(ν, z) + Ξ(ν, z, y)]= A_ν^*[A_ν ν + A_z z] + K_μ ν = A_ν^*A(ν, z) + K_μ ν,
+        // where A_ν^* becomes a multiplier.
+        // This is much easier with K_μ = 0, which is the only reason why are enforcing it.
+        // TODO: Write a version of initial_transport that can deal with K_μ ≠ 0.
+
+        let (μ_base_masses, mut μ_base_minus_γ0) =
+            initial_transport(&mut γ1, &mut μ, τ, &mut θ_or_adaptive, v);
+
+        // Solve finite-dimensional subproblem several times until the dual variable for the
+        // regularisation term conforms to the assumptions made for the transport above.
+        let (maybe_d, _within_tolerances, mut τv̆, z_new) = 'adapt_transport: loop {
+            // Calculate τv̆ = τA_*(A[μ_transported + μ_transported_base]-b)
+            let residual_μ̆ =
+                calculate_residual2(Pair(&γ1, &z), Pair(&μ_base_minus_γ0, &zero_z), opA, b);
+            let Pair(mut τv̆, τz̆) = opA.preadjoint().apply(residual_μ̆ * τ);
+            // opKμ.preadjoint().gemv(&mut τv̆, τ, y, 1.0);
+
+            // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes.
+            let (maybe_d, within_tolerances) = prox_penalty.insert_and_reweigh(
+                &mut μ,
+                &mut τv̆,
+                &γ1,
+                Some(&μ_base_minus_γ0),
+                τ,
+                ε,
+                &config.insertion,
+                &reg,
+                &state,
+                &mut stats,
+            );
+
+            // Do z variable primal update here to able to estimate B_{v̆^k-v^{k+1}}
+            let mut z_new = τz̆;
+            opKz.adjoint().gemv(&mut z_new, -σ_p, &y, -σ_p / τ);
+            z_new = fnR.prox(σ_p, z_new + &z);
+
+            // A posteriori transport adaptation.
+            if aposteriori_transport(
+                &mut γ1,
+                &mut μ,
+                &mut μ_base_minus_γ0,
+                &μ_base_masses,
+                Some(z_new.dist(&z, L2)),
+                ε,
+                &config.transport,
+            ) {
+                break 'adapt_transport (maybe_d, within_tolerances, τv̆, z_new);
+            }
+        };
+
+        stats.untransported_fraction = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.untransported_fraction.unwrap_or((0.0, 0.0));
+            let source = μ_base_masses.iter().map(|v| v.abs()).sum();
+            (a + μ_base_minus_γ0.norm(Radon), b + source)
+        });
+        stats.transport_error = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.transport_error.unwrap_or((0.0, 0.0));
+            (a + μ.dist_matching(&γ1), b + γ1.norm(Radon))
+        });
+
+        // Merge spikes.
+        // This crucially expects the merge routine to be stable with respect to spike locations,
+        // and not to performing any pruning. That is be to done below simultaneously for γ.
+        let ins = &config.insertion;
+        if ins.merge_now(&state) {
+            stats.merged += prox_penalty.merge_spikes_no_fitness(
+                &mut μ,
+                &mut τv̆,
+                &γ1,
+                Some(&μ_base_minus_γ0),
+                τ,
+                ε,
+                ins,
+                &reg,
+                //Some(|μ̃ : &RNDM<F, N>| calculate_residual(Pair(μ̃, &z), opA, b).norm2_squared_div2()),
+            );
+        }
+
+        // Prune spikes with zero weight. To maintain correct ordering between μ and γ1, also the
+        // latter needs to be pruned when μ is.
+        // TODO: This could do with a two-vector Vec::retain to avoid copies.
+        let μ_new = DiscreteMeasure::from_iter(μ.iter_spikes().filter(|δ| δ.α != F::ZERO).cloned());
+        if μ_new.len() != μ.len() {
+            let mut μ_iter = μ.iter_spikes();
+            γ1.prune_by(|_| μ_iter.next().unwrap().α != F::ZERO);
+            stats.pruned += μ.len() - μ_new.len();
+            μ = μ_new;
+        }
+
+        // Do dual update
+        // opKμ.gemv(&mut y, σ_d*(1.0 + ω), &μ, 1.0);    // y = y + σ_d K[(1+ω)(μ,z)^{k+1}]
+        opKz.gemv(&mut y, σ_d * (1.0 + ω), &z_new, 1.0);
+        // opKμ.gemv(&mut y, -σ_d*ω, μ_base, 1.0);// y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        opKz.gemv(&mut y, -σ_d * ω, z, 1.0); // y = y + σ_d K[(1+ω)(μ,z)^{k+1} - ω (μ,z)^k]-b
+        y = starH.prox(σ_d, y);
+        z = z_new;
+
+        // Update residual
+        residual = calculate_residual(Pair(&μ, &z), opA, b);
+
+        // Update step length parameters
+        // let ω = pdpsconfig.acceleration.accelerate(&mut τ, &mut σ, γ);
+
+        // Give statistics if requested
+        let iter = state.iteration();
+        stats.this_iters += 1;
+
+        state.if_verbose(|| {
+            plotter.plot_spikes(iter, maybe_d.as_ref(), Some(&τv̆), &μ);
+            full_stats(
+                &residual,
+                &μ,
+                &z,
+                ε,
+                std::mem::replace(&mut stats, IterInfo::new()),
+            )
+        });
+
+        // Update main tolerance for next iteration
+        ε = tolerance.update(ε, iter);
+    }
+
+    let fit = |μ̃: &RNDM<F, N>| {
+        (opA.apply(Pair(μ̃, &z))-b).norm2_squared_div2()
+        //+ fnR.apply(z) + reg.apply(μ)
+        + fnH.apply(/* opKμ.apply(&μ̃) + */ opKz.apply(&z))
+    };
+
+    μ.merge_spikes_fitness(config.insertion.final_merging_method(), fit, |&v| v);
+    μ.prune();
+    Pair(μ, z)
+}
--- a/src/subproblem.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/subproblem.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -14,15 +14,12 @@
 
 pub mod nonneg;
 pub mod unconstrained;
+pub mod l1squared_unconstrained;
+pub mod l1squared_nonneg;
 
-#[deprecated(since = "1.0.1", note = "Moved to submodule nonneg")]
-pub use nonneg::{
-    quadratic_nonneg,
-    quadratic_nonneg_ssn,
-    quadratic_nonneg_fb
-};
 
-/// Method for solving finite-dimensional subproblems
+/// Method for solving finite-dimensional subproblems.
+/// Not all outer methods necessarily support all options.
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[allow(dead_code)]
 pub enum InnerMethod {
@@ -30,6 +27,8 @@
     FB,
     /// Semismooth Newton
     SSN,
+    /// PDPS
+    PDPS,
 }
 
 /// Settings for the solution of finite-dimensional subproblems
@@ -37,8 +36,10 @@
 pub struct InnerSettings<F : Float> {
     /// Method
     pub method : InnerMethod,
-    /// Proportional step length (∈ [0, 1) for `InnerMethod::FB`).
-    pub τ0 : F,
+    /// Proportional step length ∈ [0, 1) for `InnerMethod::FB`.
+    pub fb_τ0 : F,
+    /// Proportional primal and dual step lengths for `InnerMethod::PDPS`.
+    pub pdps_τσ0 : (F, F),
     /// Fraction of `tolerance` given to inner algorithm
     pub tolerance_mult : F,
     /// Iterator options
@@ -50,7 +51,8 @@
 impl<F : Float> Default for InnerSettings<F> {
     fn default() -> Self {
         InnerSettings {
-            τ0 : 0.99,
+            fb_τ0 : 0.99,
+            pdps_τσ0 : (1.98, 0.5),
             iterator_options : AlgIteratorOptions {
                 // max_iter cannot be very small, as initially FB needs many iterations, although
                 // on later invocations even one or two tends to be enough
@@ -62,7 +64,7 @@
                 quiet : true,
                 .. Default::default()
             },
-            method : InnerMethod::FB,
+            method : InnerMethod::SSN,
             tolerance_mult : 0.01,
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/subproblem/l1squared_nonneg.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,437 @@
+/*!
+Iterative algorithms for solving the finite-dimensional subproblem with constraint.
+*/
+
+use nalgebra::DVector;
+use numeric_literals::replace_float_literals;
+use itertools::izip;
+//use std::iter::zip;
+use std::cmp::Ordering::*;
+
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::{Dist, L1};
+use alg_tools::nanleast::NaNLeast;
+
+use crate::types::*;
+use super::{
+    InnerMethod,
+    InnerSettings
+};
+use super::nonneg::nonneg_soft_thresholding;
+use super::l1squared_unconstrained::l1squared_prox;
+
+/// Return maximum of `dist` and distnce of inteval `[lb, ub]` to zero.
+#[replace_float_literals(F::cast_from(literal))]
+pub(super) fn max_interval_dist_to_zero<F : Float>(dist  : F, lb : F, ub : F) -> F {
+    if lb < 0.0 {
+        if ub > 0.0 {
+            dist
+        } else {
+            dist.max(-ub)
+        }
+    } else /* ub ≥ 0.0*/ {
+        dist.max(lb)
+    }
+}
+
+/// Returns the ∞-norm minimal subdifferential of $x ↦ (β/2)|x-y|_1^2 - g^⊤ x + λ\|x\|₁ +δ_{≥}(x)$ at $x$.
+///
+/// `v` will be modified and cannot be trusted to contain useful values afterwards.
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    y : &DVector<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F,
+    β : F
+) -> F {
+    let mut val = 0.0;
+    let tmp = β*y.dist(x, L1);
+    for (&g_i, &x_i, y_i) in izip!(g.iter(), x.iter(), y.iter()) {
+        let (mut lb, mut ub) = (-g_i, -g_i);
+        match x_i.partial_cmp(y_i) {
+            Some(Greater) => { lb += tmp; ub += tmp },
+            Some(Less) => { lb -= tmp; ub -= tmp },
+            Some(Equal) => { lb -= tmp; ub += tmp },
+            None => {},
+        }
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            // Less should not happen
+            Some(Less|Equal) => { lb = F::NEG_INFINITY; ub += λ },
+            None => {},
+        };
+        val = max_interval_dist_to_zero(val, lb, ub);
+    }
+    val
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+fn lbd_soft_thresholding<F : Float>(v : F, λ : F, b : F) -> F
+{
+    match (b >= 0.0, v >= b) {
+        (true, false)  => b,
+        (true, true)   => b.max(v - λ),         // soft-to-b from above
+        (false, true)  => super::unconstrained::soft_thresholding(v, λ),
+        (false, false)  => 0.0.min(b.max(v - λ)), // soft-to-0 with lower bound
+    }
+}
+
+/// Calculate $prox_f(x)$ for $f(x)=\frac{β}{2}\norm{x-y}_1^2 + δ_{≥0}(x)$.
+///
+/// To derive an algorithm for this, we can use
+/// $prox_f(x) = prox_{f_0}(x - y) - y$ for
+/// $f_0(z)=\frac{β}{2}\norm{z}_1^2 + δ_{≥-y}(z)$.
+/// Now, the optimality conditions for $w = prox_{f_0}(x)$ are
+/// $$\tag{*}
+///     x ∈ w + β\norm{w}_1\sign w + N_{≥ -y}(w).
+/// $$
+/// If we know $\norm{w}_1$, then this is easily solved by lower-bounded soft-thresholding.
+/// We find this by sorting the elements by the distance to the 'locked' lower-bounded
+/// soft-thresholding target ($0$ or $-y_i$).
+/// Then we loop over this sorted vector, increasing our estimate of $\norm{w}_1$ as we decide
+/// that the soft-thresholding parameter `β\norm{w}_1` has to be such that the passed elements
+/// will reach their locked value (after which they cannot change anymore, for a larger
+/// soft-thresholding parameter. This has to be slightly more fine-grained for account
+/// for the case that $-y_i<0$ and $x_i < -y_i$.
+///
+/// Indeed, we denote by x' and w' the subset of elements such that w_i ≠ 0 and w_i > -y_i,
+/// we can calculate by applying $⟨\cdot, \sign w'⟩$ to the corresponding lines of (*) that
+/// $$
+///     \norm{x'} = \norm{w'} + β \norm{w}_1 m.
+/// $$
+/// Having a value for $t = \norm{w}-\norm{w'}$, we can then calculate
+/// $$
+///     \norm{x'} - t = (1+β m)\norm{w}_1,
+/// $$
+/// from where we can calculate the soft-thresholding parameter $λ=β\norm{w}_1$.
+/// Since we do not actually know the unlocked elements, but just loop over all the possibilities
+/// for them, we have to check that $λ$ is above the current lower bound for this parameter
+/// (`shift` in the code), and below a value that would cause changes in the locked set
+/// (`max_shift` in the code).
+#[replace_float_literals(F::cast_from(literal))]
+pub(super) fn l1squared_nonneg_prox<F :Float + nalgebra::RealField>(
+    sorted : &mut Vec<(F, F, F, Option<(F, F)>)>,
+    x : &mut DVector<F>,
+    y : &DVector<F>,
+    β : F
+) {
+    // nalgebra double-definition bullshit workaround
+    //let max = alg_tools::NumTraitsFloat::max;
+    let abs = alg_tools::NumTraitsFloat::abs;
+
+    *x -= y;
+
+    for (az_x_i, &x_i, &y_i) in izip!(sorted.iter_mut(), x.iter(), y.iter()) {
+        // The first component of each az_x_i contains the distance of x_i to the
+        // soft-thresholding limit. If it is negative, it is always reached.
+        // The second component contains the absolute value of the result for that component
+        // w_i of the solution, if the soft-thresholding limit is reached.
+        // This is stored here due to the sorting, although could otherwise be computed directly.
+        // Likewise the third component contains the absolute value of x_i.
+        // The fourth component contains an initial lower bound.
+        let a_i = abs(x_i);
+        let b = -y_i;
+        *az_x_i = match (b >= 0.0, x_i >= b) {
+            (true, false)  => (x_i-b, b, a_i, None),  // w_i=b, so sorting element negative!
+            (true, true)   => (x_i-b, b, a_i, None),  // soft-to-b from above
+            (false, true)  => (a_i, 0.0, a_i, None),  // soft-to-0
+            (false, false) => (a_i, 0.0, a_i, Some((b, b-x_i))),   // soft-to-0 with initial limit
+        };
+    }
+    sorted.as_mut_slice()
+          .sort_unstable_by(|(a, _, _, _), (b, _, _, _)| NaNLeast(*a).cmp(&NaNLeast(*b)));
+
+    let mut nwlow = 0.0;
+    let mut shift = 0.0;
+    // This main loop is over different combinations of elements of the solution locked
+    // to the soft-thresholding lower bound (`0` or `-y_i`), in the sorted order of locking.
+    for (i, az_x_i) in izip!(0.., sorted.iter()) {
+        // This 'attempt is over different combinations of elements locked to the
+        // lower bound (`-y_i ≤ 0`). It calculates `max_shift` as the maximum shift that
+        // can be done until the locking would change (or become non-strictly-complementary).
+        // If the main rule (*) gives an estimate of `λ` that stays below `max_shift`, it is
+        // accepted. Otherwise `shift` is updated to `max_shift`, and we attempt again,
+        // with the non-locking set that participates in the calculation of `λ` then including
+        // the elements that are no longer locked to the lower bound.
+        'attempt: loop {
+            let mut nwthis = 0.0; // contribution to ‖w‖ from elements with locking
+                                  //soft-thresholding parameter = `shift`
+            let mut nxmore = 0.0; // ‖x'‖ for those elements through to not be locked to
+                                  // neither the soft-thresholding limit, nor the lower bound
+            let mut nwlbd = 0.0;  // contribution to ‖w‖ from those elements locked to their
+                                  // lower bound
+            let mut m = 0;
+            let mut max_shift = F::INFINITY; // maximal shift for which our estimate of the set of
+                                             // unlocked elements is valid.
+            let mut max_shift_from_lbd = false; // Whether max_shift comes from the next element
+                                                // or from a lower bound being reached.
+            for az_x_j in sorted[i as usize..].iter() {
+                if az_x_j.0 <= shift {
+                    nwthis += az_x_j.1;
+                } else {
+                    match az_x_j.3 {
+                        Some((l, s)) if shift < s => {
+                            if max_shift > s {
+                                max_shift_from_lbd = true;
+                                max_shift = s;
+                            }
+                            nwlbd += -l;
+                        },
+                        _ => {
+                            nxmore += az_x_j.2;
+                            if m == 0 && max_shift > az_x_j.0 {
+                                max_shift = az_x_j.0;
+                                max_shift_from_lbd = false;
+                            }
+                            m += 1;
+                        }
+                    }
+                }
+            }
+
+            // We need ‖x'‖ = ‖w'‖ + β m ‖w‖, i.e. ‖x'‖ - (‖w‖-‖w'‖)= (1 + β m)‖w‖.
+            let tmp = β*(nxmore - (nwlow + nwthis + nwlbd))/(1.0 + β*F::cast_from(m));
+            if tmp > max_shift {
+                if max_shift_from_lbd {
+                    shift = max_shift;
+                    continue 'attempt;
+                } else {
+                    break 'attempt
+                }
+            } else if tmp < shift {
+                // TODO: this should probably be an assert!(false)
+                break 'attempt;
+            } else {
+                // success
+                x.zip_apply(y, |x_i, y_i| *x_i = y_i + lbd_soft_thresholding(*x_i, tmp, -y_i));
+                return
+            }
+        }
+        shift = az_x_i.0;
+        nwlow += az_x_i.1;
+    }
+    // TODO: this fallback has not been verified to be correct
+    x.zip_apply(y, |x_i, y_i| *x_i = y_i + lbd_soft_thresholding(*x_i, shift, -y_i));
+}
+
+/// Proximal point method implementation of [`l1squared_nonneg`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_nonneg_pp<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    θ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let mut τ = τ_.to_nalgebra_mixed();
+    let θ = θ_.to_nalgebra_mixed();
+    let mut tmp = std::iter::repeat((0.0, 0.0, 0.0, None)).take(x.len()).collect();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = prox_{(τβ/2)|.-y|_1^2+δ_{≥0}+}(x^k - τ(λ𝟙^⊤-g))
+        x.apply(|x_i| *x_i -= τ*λ);
+        x.axpy(τ, g, 1.0);
+        l1squared_nonneg_prox(&mut tmp, x, y, τ*β);
+        
+        iters += 1;
+        // This gives O(1/N^2) rates due to monotonicity of function values.
+        // Higher acceleration does not seem to be numerically stable.
+        τ += θ;
+
+        // This gives O(1/N^3) rates due to monotonicity of function values.
+        // Higher acceleration does not seem to be numerically stable.
+        //τ + = F::cast_from(iters).to_nalgebra_mixed()*θ;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+/// PDPS implementation of [`l1squared_nonneg`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+/// The parameter `θ` is used to multiply the rescale the operator (identity) of the PDPS model.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_nonneg_pdps<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    σ_ : F,
+    θ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let σ = σ_.to_nalgebra_mixed();
+    let θ = θ_.to_nalgebra_mixed();
+    let mut w = DVector::zeros(x.len());
+    let mut tmp = DVector::zeros(x.len());
+    let mut xprev = x.clone();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = prox_{(τβ/2)|.-y|_1^2}(x^k - τ (w^k - g))
+        x.axpy(-τ*θ, &w, 1.0);
+        x.axpy(τ, g, 1.0);
+        l1squared_prox(&mut tmp, x, y, τ*β);
+        
+        // Dual step: w^{k+1} = proj_{[-∞,λ]}(w^k + σ(2x^{k+1}-x^k))
+        w.axpy(2.0*σ*θ, x, 1.0);
+        w.axpy(-σ*θ, &xprev, 1.0);
+        w.apply(|w_i| *w_i = w_i.min(λ));
+        xprev.copy_from(x);
+        
+        iters +=1;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+/// Alternative PDPS implementation of [`l1squared_nonneg`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// By not dualising the 1-norm, this should produce more sparse solutions than
+/// [`l1squared_nonneg_pdps`].
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+/// The parameter `θ` is used to multiply the rescale the operator (identity) of the PDPS model.
+/// We rewrite
+/// <div>$$
+///     \begin{split}
+///     & \min_{x ∈ ℝ^n} \frac{β}{2} |x-y|_1^2 - g^⊤ x + λ\|x\|₁ + δ_{≥ 0}(x) \\
+///     & = \min_{x ∈ ℝ^n} \max_{w} ⟨θ w, x⟩ - g^⊤ x + λ\|x\|₁ + δ_{≥ 0}(x)
+///      - \left(x ↦ \frac{β}{2θ} |x-y|_1^2 \right)^*(w).
+///     \end{split}
+/// $$</div>
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_nonneg_pdps_alt<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    σ_ : F,
+    θ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let σ = σ_.to_nalgebra_mixed();
+    let θ = θ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let σθ = σ*θ;
+    let τθ = τ*θ;
+    let mut w = DVector::zeros(x.len());
+    let mut tmp = DVector::zeros(x.len());
+    let mut xprev = x.clone();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = nonnegsoft_τλ(x^k - τ(θ w^k -g))
+        x.axpy(-τθ, &w, 1.0);
+        x.axpy(τ, g, 1.0);
+        x.apply(|x_i| *x_i = nonneg_soft_thresholding(*x_i, τ*λ));
+        
+        // Dual step: with g(x) = (β/(2θ))‖x-y‖₁² and q = w^k + σ(2x^{k+1}-x^k),
+        // we compute w^{k+1} = prox_{σg^*}(q) for
+        //                    = q - σ prox_{g/σ}(q/σ)
+        //                    = q - σ prox_{(β/(2θσ))‖.-y‖₁²}(q/σ)
+        //                    = σ(q/σ - prox_{(β/(2θσ))‖.-y‖₁²}(q/σ))
+        // where q/σ = w^k/σ + (2x^{k+1}-x^k),
+        w /= σ;
+        w.axpy(2.0, x, 1.0);
+        w.axpy(-1.0, &xprev, 1.0);
+        xprev.copy_from(&w); // use xprev as temporary variable
+        l1squared_prox(&mut tmp, &mut xprev, y, β/σθ);
+        w -= &xprev;
+        w *= σ;
+        xprev.copy_from(x);
+        
+        iters += 1;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+
+/// This function applies an iterative method for the solution of the problem
+/// <div>$$
+///     \min_{x ∈ ℝ^n} \frac{β}{2} |x-y|_1^2 - g^⊤ x + λ\|x\|₁ + δ_{≥ 0}(x).
+/// $$</div>
+///
+/// This function returns the number of iterations taken.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn l1squared_nonneg<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ : F,
+    β : F,
+    x : &mut DVector<F::MixedType>,
+    inner : &InnerSettings<F>,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    match inner.method {
+        InnerMethod::PDPS => {
+            let inner_θ = 1.0;
+            // Estimate of ‖K‖ for K=θ\Id.
+            let normest = inner_θ;
+            let (inner_τ, inner_σ) = (inner.pdps_τσ0.0 / normest, inner.pdps_τσ0.1 / normest);
+            l1squared_nonneg_pdps_alt(y, g, λ, β, x, inner_τ, inner_σ, inner_θ, iterator)
+        },
+        InnerMethod::FB => {
+            // The Lipschitz factor of ∇[x ↦ g^⊤ x + λ∑x]=g - λ𝟙 is FB is just a proximal point
+            // method with on constraints on τ. We “accelerate” it by adding to τ the constant θ
+            // on each iteration. Exponential growth does not seem stable.
+            let inner_τ = inner.fb_τ0;
+            let inner_θ = inner_τ;
+            l1squared_nonneg_pp(y, g, λ, β, x, inner_τ, inner_θ, iterator)
+        },
+        other => unimplemented!("${other:?} is unimplemented"),
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/subproblem/l1squared_unconstrained.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -0,0 +1,271 @@
+/*!
+Iterative algorithms for solving the finite-dimensional subproblem without constraints.
+*/
+
+use nalgebra::DVector;
+use numeric_literals::replace_float_literals;
+use itertools::izip;
+use std::cmp::Ordering::*;
+
+use std::iter::zip;
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::nanleast::NaNLeast;
+use alg_tools::norms::{Dist, L1};
+
+use crate::types::*;
+use super::{
+    InnerMethod,
+    InnerSettings
+};
+use super::unconstrained::soft_thresholding;
+use super::l1squared_nonneg::max_interval_dist_to_zero;
+
+/// Calculate $prox_f(x)$ for $f(x)=\frac{β}{2}\norm{x-y}_1^2$.
+///
+/// To derive an algorithm for this, we can assume that $y=0$, as
+/// $prox_f(x) = prox_{f_0}(x - y) - y$ for $f_0=\frac{β}{2}\norm{x}_1^2$.
+/// Now, the optimality conditions for $w = prox_f(x)$ are
+/// $$\tag{*}
+///     0 ∈ w-x + β\norm{w}_1\sign w.
+/// $$
+/// Clearly then $w = \soft_{β\norm{w}_1}(x)$.
+/// Thus the components of $x$ with smallest absolute value will be zeroed out.
+/// Denoting by $w'$ the non-zero components, and by $x'$ the corresponding components
+/// of $x$, and by $m$ their count,  multipying the corresponding lines of (*) by $\sign x'$,
+/// we obtain
+/// $$
+///     \norm{x'}_1 = (1+βm)\norm{w'}_1.
+/// $$
+/// That is, $\norm{w}_1=\norm{w'}_1=\norm{x'}_1/(1+βm)$.
+/// Thus, sorting $x$ by absolute value, and sequentially in order eliminating the smallest
+/// elements, we can easily calculate what $\norm{w}_1$ should be for that choice, and
+/// then easily calculate $w = \soft_{β\norm{w}_1}(x)$. We just have to verify that
+/// the resulting $w$ has the same norm. There's a shortcut to this, as we work
+/// sequentially: just check that the smallest assumed-nonzero component $i$ satisfies the
+/// condition of soft-thresholding to remain non-zero: $|x_i|>τ\norm{x'}/(1+τm)$.
+/// Clearly, if this condition fails for x_i, it will fail for all the components
+/// already exluced. While, if it holds, it will hold for all components not excluded.
+#[replace_float_literals(F::cast_from(literal))]
+pub(super) fn l1squared_prox<F :Float + nalgebra::RealField>(
+    sorted_abs : &mut DVector<F>,
+    x : &mut DVector<F>,
+    y : &DVector<F>,
+    β : F
+) {
+    sorted_abs.copy_from(x);
+    sorted_abs.axpy(-1.0, y, 1.0);
+    sorted_abs.apply(|z_i| *z_i = num_traits::abs(*z_i));
+    sorted_abs.as_mut_slice().sort_unstable_by(|a, b| NaNLeast(*a).cmp(&NaNLeast(*b)));
+
+    let mut n = sorted_abs.sum();
+    for (m, az_i) in zip((1..=x.len() as u32).rev(), sorted_abs) {
+        // test first
+        let tmp = β*n/(1.0 + β*F::cast_from(m));
+        if *az_i <= tmp {
+            // Fail
+            n -= *az_i;
+        } else {
+            // Success
+            x.zip_apply(y, |x_i, y_i| *x_i = y_i + soft_thresholding(*x_i-y_i, tmp));
+            return
+        }
+    }
+    // m = 0 should always work, but x is zero.
+    x.fill(0.0);
+}
+
+/// Returns the ∞-norm minimal subdifferential of $x ↦ (β/2)|x-y|_1^2 - g^⊤ x + λ\|x\|₁$ at $x$.
+///
+/// `v` will be modified and cannot be trusted to contain useful values afterwards.
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    y : &DVector<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F,
+    β : F
+) -> F {
+    let mut val = 0.0;
+    let tmp = β*y.dist(x, L1);
+    for (&g_i, &x_i, y_i) in izip!(g.iter(), x.iter(), y.iter()) {
+        let (mut lb, mut ub) = (-g_i, -g_i);
+        match x_i.partial_cmp(y_i) {
+            Some(Greater) => { lb += tmp; ub += tmp },
+            Some(Less) => { lb -= tmp; ub -= tmp },
+            Some(Equal) => { lb -= tmp; ub += tmp },
+            None => {},
+        }
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            Some(Less) => { lb -= λ; ub -= λ },
+            Some(Equal) => { lb -= λ; ub += λ },
+            None => {},
+        };
+        val = max_interval_dist_to_zero(val, lb, ub);
+    }
+    val
+}
+
+
+/// PDPS implementation of [`l1squared_unconstrained`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_unconstrained_pdps<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    σ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let σ = σ_.to_nalgebra_mixed();
+    let mut w = DVector::zeros(x.len());
+    let mut tmp = DVector::zeros(x.len());
+    let mut xprev = x.clone();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = prox_{τ|.-y|_1^2}(x^k - τ (w^k - g))
+        x.axpy(-τ, &w, 1.0);
+        x.axpy(τ, g, 1.0);
+        l1squared_prox(&mut tmp, x, y, τ*β);
+        
+        // Dual step: w^{k+1} = proj_{[-λ,λ]}(w^k + σ(2x^{k+1}-x^k))
+        w.axpy(2.0*σ, x, 1.0);
+        w.axpy(-σ, &xprev, 1.0);
+        w.apply(|w_i| *w_i = num_traits::clamp(*w_i, -λ, λ));
+        xprev.copy_from(x);
+        
+        iters +=1;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+/// Alternative PDPS implementation of [`l1squared_unconstrained`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// By not dualising the 1-norm, this should produce more sparse solutions than
+/// [`l1squared_unconstrained_pdps`].
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+/// The parameter `θ` is used to multiply the rescale the operator (identity) of the PDPS model.
+/// We rewrite
+/// <div>$$
+///     \begin{split}
+///     & \min_{x ∈ ℝ^n} \frac{β}{2} |x-y|_1^2 - g^⊤ x + λ\|x\|₁ \\
+///     & = \min_{x ∈ ℝ^n} \max_{w} ⟨θ w, x⟩ - g^⊤ x + λ\|x\|₁
+///      - \left(x ↦ \frac{β}{2θ} |x-y|_1^2 \right)^*(w).
+///     \end{split}
+/// $$</div>
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_unconstrained_pdps_alt<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    σ_ : F,
+    θ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let σ = σ_.to_nalgebra_mixed();
+    let θ = θ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let σθ = σ*θ;
+    let τθ = τ*θ;
+    let mut w = DVector::zeros(x.len());
+    let mut tmp = DVector::zeros(x.len());
+    let mut xprev = x.clone();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = soft_τλ(x^k - τ(θ w^k -g))
+        x.axpy(-τθ, &w, 1.0);
+        x.axpy(τ, g, 1.0);
+        x.apply(|x_i| *x_i = soft_thresholding(*x_i, τ*λ));
+        
+        // Dual step: with g(x) = (β/(2θ))‖x-y‖₁² and q = w^k + σ(2x^{k+1}-x^k),
+        // we compute w^{k+1} = prox_{σg^*}(q) for
+        //                    = q - σ prox_{g/σ}(q/σ)
+        //                    = q - σ prox_{(β/(2θσ))‖.-y‖₁²}(q/σ)
+        //                    = σ(q/σ - prox_{(β/(2θσ))‖.-y‖₁²}(q/σ))
+        // where q/σ = w^k/σ + (2x^{k+1}-x^k),
+        w /= σ;
+        w.axpy(2.0, x, 1.0);
+        w.axpy(-1.0, &xprev, 1.0);
+        xprev.copy_from(&w); // use xprev as temporary variable
+        l1squared_prox(&mut tmp, &mut xprev, y, β/σθ);
+        w -= &xprev;
+        w *= σ;
+        xprev.copy_from(x);
+        
+        iters += 1;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+
+/// This function applies an iterative method for the solution of the problem
+/// <div>$$
+///     \min_{x ∈ ℝ^n} \frac{β}{2} |x-y|_1^2 - g^⊤ x + λ\|x\|₁.
+/// $$</div>
+/// Only PDPS is supported.
+///
+/// This function returns the number of iterations taken.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn l1squared_unconstrained<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ : F,
+    β : F,
+    x : &mut DVector<F::MixedType>,
+    inner : &InnerSettings<F>,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    // Estimate of ‖K‖ for K=θ Id.
+    let inner_θ = 1.0;
+    let normest = inner_θ;
+
+    let (inner_τ, inner_σ) = (inner.pdps_τσ0.0 / normest, inner.pdps_τσ0.1 / normest);
+
+    match inner.method {
+        InnerMethod::PDPS =>
+            l1squared_unconstrained_pdps_alt(y, g, λ, β, x, inner_τ, inner_σ, inner_θ, iterator),
+        other => unimplemented!("${other:?} is unimplemented"),
+    }
+}
--- a/src/subproblem/nonneg.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/subproblem/nonneg.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -6,6 +6,7 @@
 use numeric_literals::replace_float_literals;
 use itertools::{izip, Itertools};
 use colored::Colorize;
+use std::cmp::Ordering::*;
 
 use alg_tools::iter::Mappable;
 use alg_tools::error::NumericalError;
@@ -22,38 +23,42 @@
     InnerMethod,
     InnerSettings
 };
+use super::l1squared_nonneg::max_interval_dist_to_zero;
 
 /// Compute the proximal operator of $x \mapsto x + \delta\_{[0, \infty)}$, i.e.,
 /// the non-negativity contrained soft-thresholding operator.
 #[inline]
 #[replace_float_literals(F::cast_from(literal))]
-fn nonneg_soft_thresholding<F : Float>(v : F, λ : F) -> F {
+pub(super) fn nonneg_soft_thresholding<F : Float>(v : F, λ : F) -> F {
     (v - λ).max(0.0)
 }
 
-/// Returns the ∞-norm minimal subdifferential of $x ↦ x^⊤Ax - g^⊤ x + λ\vec 1^⊤ x δ_{≥ 0}(x)$
+/// Returns the ∞-norm minimal subdifferential of $x ↦ x^⊤Ax/2 - g^⊤ x + λ\vec 1^⊤ x + δ_{≥ 0}(x)$
 /// at $x$.
 ///
 /// `v` will be modified and cannot be trusted to contain useful values afterwards.
-#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
-fn min_subdifferential<F : Float + ToNalgebraRealField>(
-    v : &mut DVector<F::MixedType>,
-    mA : &DMatrix<F::MixedType>,
-    x : &DVector<F::MixedType>,
-    g : &DVector<F::MixedType>,
-    λ : F::MixedType
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    v : &mut DVector<F>,
+    mA : &DMatrix<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F
 ) -> F {
     v.copy_from(g);
     mA.gemv(v, 1.0, x, -1.0);   // v =  Ax - g
     let mut val = 0.0;
     for (&v_i, &x_i) in izip!(v.iter(), x.iter()) {
-        // The subdifferential of the objective is $Ax - g + λ + ∂ δ_{≥ 0}(x)$.
-        let d = v_i + λ;
-        if x_i > 0.0 || d < 0.0 {
-            val = val.max(d.abs());
+        let (mut lb, mut ub) = (v_i, v_i);
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            // Less should not happen
+            Some(Less|Equal) => { lb = F::NEG_INFINITY; ub += λ },
+            None => {},
         }
+        val = max_interval_dist_to_zero(val, lb, ub);
     }
-    F::from_nalgebra_mixed(val)
+    val
 }
 
 /// Forward-backward splitting implementation of [`quadratic_nonneg`].
@@ -98,7 +103,7 @@
         iters +=1;
 
         backup.map(|_| {
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -281,7 +286,7 @@
         // 4. Report solution quality
         state.if_verbose(|| {
             // Calculate subdifferential at the FB step `x` that hasn't yet had `s` yet added.
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -291,7 +296,7 @@
 /// This function applies an iterative method for the solution of the quadratic non-negativity
 /// constrained problem
 /// <div>$$
-///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + c + δ_{≥ 0}(x).
+///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + δ_{≥ 0}(x).
 /// $$</div>
 /// Semismooth Newton or forward-backward are supported based on the setting in `method`.
 /// The parameter `mA` is matrix $A$, and `g` and `λ` are as in the mathematical formulation.
@@ -307,27 +312,28 @@
 ///
 /// This function returns the number of iterations taken.
 pub fn quadratic_nonneg<F, I>(
-    method : InnerMethod,
     mA : &DMatrix<F::MixedType>,
     g : &DVector<F::MixedType>,
-    //c_ : F,
     λ : F,
     x : &mut DVector<F::MixedType>,
-    τ : F,
+    mA_normest : F,
+    inner : &InnerSettings<F>,
     iterator : I
 ) -> usize
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<F>
 {
-    
-    match method {
+    let inner_τ = inner.fb_τ0 / mA_normest;
+
+    match inner.method {
         InnerMethod::FB =>
-            quadratic_nonneg_fb(mA, g, λ, x, τ, iterator),
+            quadratic_nonneg_fb(mA, g, λ, x, inner_τ, iterator),
         InnerMethod::SSN =>
-            quadratic_nonneg_ssn(mA, g, λ, x, τ, iterator).unwrap_or_else(|e| {
+            quadratic_nonneg_ssn(mA, g, λ, x, inner_τ, iterator).unwrap_or_else(|e| {
                 println!("{}", format!("{e}. Using FB fallback.").red());
                 let ins = InnerSettings::<F>::default();
-                quadratic_nonneg_fb(mA, g, λ, x, τ, ins.iterator_options)
-            })
+                quadratic_nonneg_fb(mA, g, λ, x, inner_τ, ins.iterator_options)
+            }),
+        other => unimplemented!("${other:?} is unimplemented"),
     }
 }
--- a/src/subproblem/unconstrained.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/subproblem/unconstrained.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -23,11 +23,12 @@
     InnerMethod,
     InnerSettings
 };
+use super::l1squared_nonneg::max_interval_dist_to_zero;
 
 /// Compute the proximal operator of $x \mapsto |x|$, i.e., the soft-thresholding operator.
 #[inline]
 #[replace_float_literals(F::cast_from(literal))]
-fn soft_thresholding<F : Float>(v : F, λ : F) -> F {
+pub(crate) fn soft_thresholding<F : Float>(v : F, λ : F) -> F {
     if v > λ {
         v - λ
     } else if v < -λ {
@@ -40,27 +41,28 @@
 /// Returns the ∞-norm minimal subdifferential of $x ↦  x^⊤Ax - g^⊤ x + λ\|x\|₁$ at $x$.
 ///
 /// `v` will be modified and cannot be trusted to contain useful values afterwards.
-#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
-fn min_subdifferential<F : Float + ToNalgebraRealField>(
-    v : &mut DVector<F::MixedType>,
-    mA : &DMatrix<F::MixedType>,
-    x : &DVector<F::MixedType>,
-    g : &DVector<F::MixedType>,
-    λ : F::MixedType
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    v : &mut DVector<F>,
+    mA : &DMatrix<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F
 ) -> F {
     v.copy_from(g);
     mA.gemv(v, 1.0, x, -1.0);   // v =  Ax - g
     let mut val = 0.0;
     for (&v_i, &x_i) in izip!(v.iter(), x.iter()) {
-        // The subdifferential at x is $Ax - g + λ ∂‖·‖₁(x)$.
-        val = val.max(match x_i.partial_cmp(&0.0) {
-            Some(Greater) => v_i + λ,
-            Some(Less) => v_i - λ,
-            Some(Equal) => soft_thresholding(v_i, λ),
-            None => F::MixedType::nan(),
-        })
+        let (mut lb, mut ub) = (v_i, v_i);
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            Some(Less) => { lb -= λ; ub -= λ },
+            Some(Equal) => { lb -= λ; ub += λ },
+            None => {},
+        }
+        val = max_interval_dist_to_zero(val, lb, ub);
     }
-    F::from_nalgebra_mixed(val)
+    val
 }
 
 
@@ -106,7 +108,7 @@
         iters +=1;
 
         backup.map(|_| {
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -242,7 +244,7 @@
         // 4. Report solution quality
         state.if_verbose(|| {
             // Calculate subdifferential at the FB step `x` that hasn't yet had `s` yet added.
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -251,7 +253,7 @@
 
 /// This function applies an iterative method for the solution of the problem
 /// <div>$$
-///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ\|x\|₁ + c.
+///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ\|x\|₁.
 /// $$</div>
 /// Semismooth Newton or forward-backward are supported based on the setting in `method`.
 /// The parameter `mA` is matrix $A$, and `g` and `λ` are as in the mathematical formulation.
@@ -262,27 +264,28 @@
 ///
 /// This function returns the number of iterations taken.
 pub fn quadratic_unconstrained<F, I>(
-    method : InnerMethod,
     mA : &DMatrix<F::MixedType>,
     g : &DVector<F::MixedType>,
-    //c_ : F,
     λ : F,
     x : &mut DVector<F::MixedType>,
-    τ : F,
+    mA_normest : F,
+    inner : &InnerSettings<F>,
     iterator : I
 ) -> usize
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<F>
 {
+    let inner_τ = inner.fb_τ0 / mA_normest;
     
-    match method {
+    match inner.method {
         InnerMethod::FB =>
-            quadratic_unconstrained_fb(mA, g, λ, x, τ, iterator),
+            quadratic_unconstrained_fb(mA, g, λ, x, inner_τ, iterator),
         InnerMethod::SSN =>
-            quadratic_unconstrained_ssn(mA, g, λ, x, τ, iterator).unwrap_or_else(|e| {
+            quadratic_unconstrained_ssn(mA, g, λ, x, inner_τ, iterator).unwrap_or_else(|e| {
                 println!("{}", format!("{e}. Using FB fallback.").red());
                 let ins = InnerSettings::<F>::default();
-                quadratic_unconstrained_fb(mA, g, λ, x, τ, ins.iterator_options)
-            })
+                quadratic_unconstrained_fb(mA, g, λ, x, inner_τ, ins.iterator_options)
+            }),
+        other => unimplemented!("${other:?} is unimplemented"),
     }
 }
--- a/src/types.rs	Tue Aug 01 10:25:09 2023 +0300
+++ b/src/types.rs	Mon Feb 17 13:54:53 2025 -0500
@@ -4,7 +4,6 @@
 
 use colored::ColoredString;
 use serde::{Serialize, Deserialize};
-use clap::ValueEnum;
 use alg_tools::iterate::LogRepr;
 use alg_tools::euclidean::Euclidean;
 use alg_tools::norms::{Norm, L1};
@@ -13,7 +12,7 @@
 pub use alg_tools::loc::Loc;
 pub use alg_tools::sets::Cube;
 
-use crate::measures::DiscreteMeasure;
+// use crate::measures::DiscreteMeasure;
 
 /// [`Float`] with extra display and string conversion traits such that [`clap`] doesn't choke up.
 pub trait ClapFloat : Float
@@ -27,31 +26,74 @@
 pub struct IterInfo<F : Float, const N : usize> {
     /// Function value
     pub value : F,
-    /// Number of speaks
+    /// Number of spikes
     pub n_spikes : usize,
     /// Number of iterations this statistic covers
     pub this_iters : usize,
+    /// Number of spikes inserted since last IterInfo statistic
+    pub inserted : usize,
     /// Number of spikes removed by merging since last IterInfo statistic
     pub merged : usize,
     /// Number of spikes removed by pruning since last IterInfo statistic
     pub pruned : usize,
     /// Number of inner iterations since last IterInfo statistic
     pub inner_iters : usize,
+    /// Tuple of (transported mass, source mass)
+    pub untransported_fraction : Option<(F, F)>,
+    /// Tuple of (|destination mass - untransported_mass|, transported mass)
+    pub transport_error : Option<(F, F)>,
     /// Current tolerance
     pub ε : F,
-    /// Solve fin.dim problem for this measure to get the optimal `value`.
-    pub postprocessing : Option<DiscreteMeasure<Loc<F, N>, F>>,
+    // /// Solve fin.dim problem for this measure to get the optimal `value`.
+    // pub postprocessing : Option<RNDM<F, N>>,
 }
 
+impl<F : Float, const N : usize>  IterInfo<F, N> {
+    /// Initialise statistics with zeros. `ε` and `value` are unspecified.
+    pub fn new() -> Self {
+        IterInfo {
+            value : F::NAN,
+            n_spikes : 0,
+            this_iters : 0,
+            merged : 0,
+            inserted : 0,
+            pruned : 0,
+            inner_iters : 0,
+            ε : F::NAN,
+            // postprocessing : None,
+            untransported_fraction : None,
+            transport_error : None,
+        }
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
 impl<F, const N : usize> LogRepr for IterInfo<F, N> where F : LogRepr + Float {
     fn logrepr(&self) -> ColoredString {
-        format!("{}\t| N = {}, ε = {:.8}, inner_iters_mean = {}, merged+pruned_mean = {}+{}",
+        format!("{}\t| N = {}, ε = {:.8}, 𝔼inner_it = {}, 𝔼ins/mer/pru = {}/{}/{}{}{}",
                 self.value.logrepr(),
                 self.n_spikes,
                 self.ε,
-                self.inner_iters as float / self.this_iters as float,
-                self.merged as float / self.this_iters as float,
-                self.pruned as float / self.this_iters as float,
+                self.inner_iters as float / self.this_iters.max(1) as float,
+                self.inserted as float / self.this_iters.max(1) as float,
+                self.merged as float / self.this_iters.max(1) as float,
+                self.pruned as float / self.this_iters.max(1) as float,
+                match self.untransported_fraction {
+                    None => format!(""),
+                    Some((a, b)) => if b > 0.0 {
+                        format!(", untransported {:.2}%", 100.0*a/b)
+                    } else {
+                        format!("")
+                    }
+                },
+                match self.transport_error {
+                    None => format!(""),
+                    Some((a, b)) => if b > 0.0 {
+                        format!(", transport error {:.2}%", 100.0*a/b)
+                    } else {
+                        format!("")
+                    }
+                }
         ).as_str().into()
     }
 }
@@ -78,7 +120,7 @@
 }
 
 /// Data term type
-#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug, ValueEnum)]
+#[derive(Clone, Copy, PartialEq, Serialize, Deserialize, Debug)]
 pub enum DataTerm {
     /// $\\|z\\|\_2^2/2$
     L2Squared,
@@ -95,3 +137,24 @@
         }
     }
 }
+
+/// Type for indicating norm-2-squared data fidelity or transport cost.
+#[derive(Clone, Copy, Serialize, Deserialize)]
+pub struct L2Squared;
+
+/// Trait for indicating that `Self` is Lipschitz with respect to the (semi)norm `D`.
+pub trait Lipschitz<M> {
+    /// The type of floats
+    type FloatType : Float;
+
+    /// Returns the Lipschitz factor of `self` with respect to the (semi)norm `D`.
+    fn lipschitz_factor(&self, seminorm : M) -> Option<Self::FloatType>;
+}
+
+/// Trait for norm-bounded functions.
+pub trait NormBounded<M> {
+    type FloatType : Float;
+
+    /// Returns a bound on the values of this function object in the `M`-norm.
+    fn norm_bound(&self, m : M) -> Self::FloatType;
+}

mercurial